content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
##################################################
### Global.R
setGeneric("privateA",function(object){standardGeneric("privateA")})
setGeneric("publicA",function(object){standardGeneric("publicA")})
setGeneric("publicB",function(objectV,objectW){standardGeneric("publicB")})
functionClassicA <- function(age){return(age*2)}
#.onLoad <- function(libname,pkgname){packageStartupMessage("### [packS4] You load package <",pkgname,"> from <",libname,"> ###\n",sep="")}
#.onAttach <- function(libname,pkgname){packageStartupMessage("### [packS4] You attach pacakge <",pkgname,"> from <",libname,"> ###\n",sep="")}
.onUnload <- function(libpath){cat("### [packS4] You unload the pacakge ###\n",sep="")}
| /packS4/R/global.R | no_license | ingted/R-Examples | R | false | false | 725 | r | ##################################################
### Global.R
setGeneric("privateA",function(object){standardGeneric("privateA")})
setGeneric("publicA",function(object){standardGeneric("publicA")})
setGeneric("publicB",function(objectV,objectW){standardGeneric("publicB")})
functionClassicA <- function(age){return(age*2)}
#.onLoad <- function(libname,pkgname){packageStartupMessage("### [packS4] You load package <",pkgname,"> from <",libname,"> ###\n",sep="")}
#.onAttach <- function(libname,pkgname){packageStartupMessage("### [packS4] You attach pacakge <",pkgname,"> from <",libname,"> ###\n",sep="")}
.onUnload <- function(libpath){cat("### [packS4] You unload the pacakge ###\n",sep="")}
|
tabItem("dpt",
box(
title = "Titre",
width = 12,
"à venir"
)
) | /src/ui/35_ui_dpt.R | no_license | NicolasImberty/Prenoms | R | false | false | 119 | r | tabItem("dpt",
box(
title = "Titre",
width = 12,
"à venir"
)
) |
\name{CPart}
\alias{CPart}
\alias{CP}
\title{Creates a fuzzy partition}
\description{This is the implementation in R of a fuzzy partition, described in, chapter 1, pages 005-009 at Ishibuchi et al.\ . It has a sister function, with different parameter, but with the same objective. It provides a wrapper to create the appropriate list with the appropriate codes. A partition is defined by a set of values which mark the limits and top point of every triangular division. The divisions are in a chain, the left and right limit of a division, are the top points of its neighbors. And in the same way, the top point of a given partition is the limit of its neighbors. Graphically this is a succession of mixed triangles, where the projection of a height of a given triangle marks the point where two triangles join.
}
\usage{
CPart(elem, min, max)
}
\arguments{
Takes the number of elements and the minimum and maximum value of the
partition.
\item{elem}{The number of elements of the partition.}
\item{min}{The minimum value of the partition.}
\item{max}{The maximum value of the partition.}
}
\value{Returns a list, the first three elements (numElem, numMin and
numMax) are the arguments given to the function. The fourth one, is the
actual partition.
list(numMin=x1, numMax=x2, part=x3, numElem=x5)
}
\examples{
CPart(3,0.0,1.0) # 0.0 0.5 1.0
}
\keyword{univar}
| /man/CPart.Rd | no_license | cran/FKBL | R | false | false | 1,399 | rd | \name{CPart}
\alias{CPart}
\alias{CP}
\title{Creates a fuzzy partition}
\description{This is the implementation in R of a fuzzy partition, described in, chapter 1, pages 005-009 at Ishibuchi et al.\ . It has a sister function, with different parameter, but with the same objective. It provides a wrapper to create the appropriate list with the appropriate codes. A partition is defined by a set of values which mark the limits and top point of every triangular division. The divisions are in a chain, the left and right limit of a division, are the top points of its neighbors. And in the same way, the top point of a given partition is the limit of its neighbors. Graphically this is a succession of mixed triangles, where the projection of a height of a given triangle marks the point where two triangles join.
}
\usage{
CPart(elem, min, max)
}
\arguments{
Takes the number of elements and the minimum and maximum value of the
partition.
\item{elem}{The number of elements of the partition.}
\item{min}{The minimum value of the partition.}
\item{max}{The maximum value of the partition.}
}
\value{Returns a list, the first three elements (numElem, numMin and
numMax) are the arguments given to the function. The fourth one, is the
actual partition.
list(numMin=x1, numMax=x2, part=x3, numElem=x5)
}
\examples{
CPart(3,0.0,1.0) # 0.0 0.5 1.0
}
\keyword{univar}
|
library(shiny)
library(ggplot2)
plotAreaT <- function(section = "upper", q1 = -1.5, label.quantiles=TRUE, dist = "T", xlab=" ") {
require(ggplot2)
x <- seq(-4, 4, by = 0.01)
df <- 10
data <- data.frame(x = x, density = dt(x, df))
p <- ggplot(data, aes(x, y=density)) + geom_line() + xlab(xlab)
quantile1 <- annotate("text", label = paste("t* =", q1), x = q1, y = 0, size = 8, colour = "black")
quantile2 <- NULL
if (section == "upper") {
section <- subset(data, x > q1)
area <- pt(q1, df, lower.tail = FALSE)
p_value <- annotate("text", label = paste("P(", toupper(dist), ">", q1, ") = ",
round(area, 4)), x = 2.9, y = 0.3, size = 8, colour = "black")
} else if (section == "lower") {
section <- subset(data, x < q1)
area <- pt(q1, df)
p_value <- annotate("text", label = paste("P(", toupper(dist), "<", q1, ") = ",
round(area, 4)), x = 2.9, y = 0.3, size = 8, colour = "black")
} else if (section == "both"){
section1 <- subset(data, x > abs(q1))
p <- p + geom_ribbon(data=section1, aes(ymin=0, ymax=density, fill="blue", alpha=.4))
section <- subset(data, x < -abs(q1))
area <- pt(abs(q1), df, lower.tail = FALSE) + pt(-abs(q1), df)
p_value <- annotate("text", label = paste("2*P(", toupper(dist), ">", abs(q1), ") = ",
round(area, 4)), x = 2.9, y = 0.3, size = 8, colour = "black")
quantile2 <- annotate("text", label = paste("t* =", -q1), x = -q1, y = 0, size = 8, colour = "black")
}
p + p_value + quantile1 + quantile2 + #geom_vline(xintercept = 0, color = "blue") + annotate("text", label = "0", x = 0, y = 0, size = 5) +
geom_ribbon(data=section, aes(ymin=0, ymax=density, fill="blue", alpha=.4))+theme(legend.position = "none")
}
shinyServer(function(input, output, session) {
output$plot1 <- renderPlot({
#Koshke is the man -- http://stackoverflow.com/questions/14313285/ggplot2-theme-with-no-axes-or-grid
library(grid)
p <- plotAreaT(section=input$type, q1=input$q1)
p <- p + theme(line = element_blank(),
text = element_blank(),
line = element_blank(),
title = element_blank())
gt <- ggplot_gtable(ggplot_build(p))
ge <- subset(gt$layout, name == "panel")
print(grid.draw(gt[ge$t:ge$b, ge$l:ge$r]))
})
}) | /99-ReferenceApps/shiny_apps/testing/server.R | permissive | nwstephens/shiny-day-2016 | R | false | false | 2,402 | r | library(shiny)
library(ggplot2)
plotAreaT <- function(section = "upper", q1 = -1.5, label.quantiles=TRUE, dist = "T", xlab=" ") {
require(ggplot2)
x <- seq(-4, 4, by = 0.01)
df <- 10
data <- data.frame(x = x, density = dt(x, df))
p <- ggplot(data, aes(x, y=density)) + geom_line() + xlab(xlab)
quantile1 <- annotate("text", label = paste("t* =", q1), x = q1, y = 0, size = 8, colour = "black")
quantile2 <- NULL
if (section == "upper") {
section <- subset(data, x > q1)
area <- pt(q1, df, lower.tail = FALSE)
p_value <- annotate("text", label = paste("P(", toupper(dist), ">", q1, ") = ",
round(area, 4)), x = 2.9, y = 0.3, size = 8, colour = "black")
} else if (section == "lower") {
section <- subset(data, x < q1)
area <- pt(q1, df)
p_value <- annotate("text", label = paste("P(", toupper(dist), "<", q1, ") = ",
round(area, 4)), x = 2.9, y = 0.3, size = 8, colour = "black")
} else if (section == "both"){
section1 <- subset(data, x > abs(q1))
p <- p + geom_ribbon(data=section1, aes(ymin=0, ymax=density, fill="blue", alpha=.4))
section <- subset(data, x < -abs(q1))
area <- pt(abs(q1), df, lower.tail = FALSE) + pt(-abs(q1), df)
p_value <- annotate("text", label = paste("2*P(", toupper(dist), ">", abs(q1), ") = ",
round(area, 4)), x = 2.9, y = 0.3, size = 8, colour = "black")
quantile2 <- annotate("text", label = paste("t* =", -q1), x = -q1, y = 0, size = 8, colour = "black")
}
p + p_value + quantile1 + quantile2 + #geom_vline(xintercept = 0, color = "blue") + annotate("text", label = "0", x = 0, y = 0, size = 5) +
geom_ribbon(data=section, aes(ymin=0, ymax=density, fill="blue", alpha=.4))+theme(legend.position = "none")
}
shinyServer(function(input, output, session) {
output$plot1 <- renderPlot({
#Koshke is the man -- http://stackoverflow.com/questions/14313285/ggplot2-theme-with-no-axes-or-grid
library(grid)
p <- plotAreaT(section=input$type, q1=input$q1)
p <- p + theme(line = element_blank(),
text = element_blank(),
line = element_blank(),
title = element_blank())
gt <- ggplot_gtable(ggplot_build(p))
ge <- subset(gt$layout, name == "panel")
print(grid.draw(gt[ge$t:ge$b, ge$l:ge$r]))
})
}) |
rm(list = ls())
require(openxlsx)
require(tidyr)
require(dplyr)
require(ggplot2)
require(psych)
setwd("D:/abroad/Harvard/18spring courses/SOCIOL 198/final")
####2016####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong16.csv")
companylist16 <- as.data.frame(unique(dat$ticker))
idlist16 <- as.data.frame(unique(dat$id))
write.csv(companylist16, "data/companylist16.csv", row.names = FALSE)
write.csv(idlist16, "data/idlist16.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty16 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty16.csv")
rownames(empty16) <- empty16$id
empty16 <- empty16[,-1]
empty16[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty16)==xticker)
xcol <- which(rownames(empty16)==xid)
empty16[xcol,xrow] <- 1
}
write.csv(empty16, "data/tm16.csv", row.names = T, col.names = T)
####2015####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong15.csv")
companylist15 <- as.data.frame(unique(dat$ticker))
idlist15 <- as.data.frame(unique(dat$id))
write.csv(companylist15, "data/companylist15.csv", row.names = FALSE)
write.csv(idlist15, "data/idlist15.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty15 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty15.csv")
rownames(empty15) <- empty15$id
empty15 <- empty15[,-1]
empty15[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty15)==xticker)
xcol <- which(rownames(empty15)==xid)
empty15[xcol,xrow] <- 1
}
write.csv(empty15, "data/tm15.csv", row.names = T, col.names = T)
####2014####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong14.csv")
companylist14 <- as.data.frame(unique(dat$ticker))
idlist14 <- as.data.frame(unique(dat$id))
write.csv(companylist14, "data/companylist14.csv", row.names = FALSE)
write.csv(idlist14, "data/idlist14.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty14 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty14.csv")
rownames(empty14) <- empty14$id
empty14 <- empty14[,-1]
empty14[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty14)==xticker)
xcol <- which(rownames(empty14)==xid)
empty14[xcol,xrow] <- 1
}
write.csv(empty14, "data/tm14.csv", row.names = T, col.names = T)
####2013####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong13.csv")
companylist13 <- as.data.frame(unique(dat$ticker))
idlist13 <- as.data.frame(unique(dat$id))
write.csv(companylist13, "data/companylist13.csv", row.names = FALSE)
write.csv(idlist13, "data/idlist13.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty13 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty13.csv")
rownames(empty13) <- empty13$id
empty13 <- empty13[,-1]
empty13[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty13)==xticker)
xcol <- which(rownames(empty13)==xid)
empty13[xcol,xrow] <- 1
}
write.csv(empty13, "data/tm13.csv", row.names = T, col.names = T)
####2012####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong12.csv")
companylist12 <- as.data.frame(unique(dat$ticker))
idlist12 <- as.data.frame(unique(dat$id))
write.csv(companylist12, "data/companylist12.csv", row.names = FALSE)
write.csv(idlist12, "data/idlist12.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty12 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty12.csv")
rownames(empty12) <- empty12$id
empty12 <- empty12[,-1]
empty12[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty12)==xticker)
xcol <- which(rownames(empty12)==xid)
empty12[xcol,xrow] <- 1
}
write.csv(empty12, "data/tm12.csv", row.names = T, col.names = T)
####2011####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong11.csv")
companylist11 <- as.data.frame(unique(dat$ticker))
idlist11 <- as.data.frame(unique(dat$id))
write.csv(companylist11, "data/companylist11.csv", row.names = FALSE)
write.csv(idlist11, "data/idlist11.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty11 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty11.csv")
rownames(empty11) <- empty11$id
empty11 <- empty11[,-1]
empty11[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty11)==xticker)
xcol <- which(rownames(empty11)==xid)
empty11[xcol,xrow] <- 1
}
write.csv(empty11, "data/tm11.csv", row.names = T, col.names = T)
####2010####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong10.csv")
companylist10 <- as.data.frame(unique(dat$ticker))
idlist10 <- as.data.frame(unique(dat$id))
write.csv(companylist10, "data/companylist10.csv", row.names = FALSE)
write.csv(idlist10, "data/idlist10.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty10 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty10.csv")
rownames(empty10) <- empty10$id
empty10 <- empty10[,-1]
empty10[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty10)==xticker)
xcol <- which(rownames(empty10)==xid)
empty10[xcol,xrow] <- 1
}
write.csv(empty10, "data/tm10.csv", row.names = T, col.names = T)
####2009####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong09.csv")
companylist09 <- as.data.frame(unique(dat$ticker))
idlist09 <- as.data.frame(unique(dat$id))
write.csv(companylist09, "data/companylist09.csv", row.names = FALSE)
write.csv(idlist09, "data/idlist09.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty09 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty09.csv")
rownames(empty09) <- empty09$id
empty09 <- empty09[,-1]
empty09[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty09)==xticker)
xcol <- which(rownames(empty09)==xid)
empty09[xcol,xrow] <- 1
}
write.csv(empty09, "data/tm09.csv", row.names = T, col.names = T)
####2008####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong08.csv")
companylist08 <- as.data.frame(unique(dat$ticker))
idlist08 <- as.data.frame(unique(dat$id))
write.csv(companylist08, "data/companylist08.csv", row.names = FALSE)
write.csv(idlist08, "data/idlist08.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty08 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty08.csv")
rownames(empty08) <- empty08$id
empty08 <- empty08[,-1]
empty08[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty08)==xticker)
xcol <- which(rownames(empty08)==xid)
empty08[xcol,xrow] <- 1
}
write.csv(empty08, "data/tm08.csv", row.names = T, col.names = T)
####2007####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong07.csv")
companylist07 <- as.data.frame(unique(dat$ticker))
idlist07 <- as.data.frame(unique(dat$id))
write.csv(companylist07, "data/companylist07.csv", row.names = FALSE)
write.csv(idlist07, "data/idlist07.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty07 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty07.csv")
rownames(empty07) <- empty07$id
empty07 <- empty07[,-1]
empty07[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty07)==xticker)
xcol <- which(rownames(empty07)==xid)
empty07[xcol,xrow] <- 1
}
write.csv(empty07, "data/tm07.csv", row.names = T, col.names = T)
####2016m-2007m####
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong16.csv")
limcompanylist16 <- as.data.frame(unique(limdat$ticker))
limidlist16 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist16, "data/limcompanylist16.csv", row.names = FALSE)
write.csv(limidlist16, "data/limidlist16.csv", row.names = FALSE)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong15.csv")
limcompanylist15 <- as.data.frame(unique(limdat$ticker))
limidlist15 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist15, "data/limcompanylist15.csv", row.names = FALSE)
write.csv(limidlist15, "data/limidlist15.csv", row.names = FALSE)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong14.csv")
limcompanylist14 <- as.data.frame(unique(limdat$ticker))
limidlist14 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist14, "data/limcompanylist14.csv", row.names = FALSE)
write.csv(limidlist14, "data/limidlist14.csv", row.names = FALSE)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong13.csv")
limcompanylist13 <- as.data.frame(unique(limdat$ticker))
limidlist13 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist13, "data/limcompanylist13.csv", row.names = FALSE)
write.csv(limidlist13, "data/limidlist13.csv", row.names = FALSE)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong12.csv")
limcompanylist12 <- as.data.frame(unique(limdat$ticker))
limidlist12 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist12, "data/limcompanylist12.csv", row.names = FALSE)
write.csv(limidlist12, "data/limidlist12.csv", row.names = FALSE)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong11.csv")
limcompanylist11 <- as.data.frame(unique(limdat$ticker))
limidlist11 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist11, "data/limcompanylist11.csv", row.names = FALSE)
write.csv(limidlist11, "data/limidlist11.csv", row.names = FALSE)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong10.csv")
limcompanylist10 <- as.data.frame(unique(limdat$ticker))
limidlist10 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist10, "data/limcompanylist10.csv", row.names = FALSE)
write.csv(limidlist10, "data/limidlist10.csv", row.names = FALSE)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong09.csv")
limcompanylist09 <- as.data.frame(unique(limdat$ticker))
limidlist09 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist09, "data/limcompanylist09.csv", row.names = FALSE)
write.csv(limidlist09, "data/limidlist09.csv", row.names = FALSE)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong08.csv")
limcompanylist08 <- as.data.frame(unique(limdat$ticker))
limidlist08 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist08, "data/limcompanylist08.csv", row.names = FALSE)
write.csv(limidlist08, "data/limidlist08.csv", row.names = FALSE)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong07.csv")
limcompanylist07 <- as.data.frame(unique(limdat$ticker))
limidlist07 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist07, "data/limcompanylist07.csv", row.names = FALSE)
write.csv(limidlist07, "data/limidlist07.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as limempty##
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong16.csv")
limempty16 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty16.csv")
rownames(limempty16) <- limempty16$id
limempty16 <- limempty16[,-1]
limempty16[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty16)==xticker)
xcol <- which(rownames(limempty16)==xid)
limempty16[xcol,xrow] <- 1
}
write.csv(limempty16, "data/limtm16.csv", row.names = T, col.names = T)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong15.csv")
limempty15 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty15.csv")
rownames(limempty15) <- limempty15$id
limempty15 <- limempty15[,-1]
limempty15[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty15)==xticker)
xcol <- which(rownames(limempty15)==xid)
limempty15[xcol,xrow] <- 1
}
write.csv(limempty15, "data/limtm15.csv", row.names = T, col.names = T)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong14.csv")
limempty14 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty14.csv")
rownames(limempty14) <- limempty14$id
limempty14 <- limempty14[,-1]
limempty14[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty14)==xticker)
xcol <- which(rownames(limempty14)==xid)
limempty14[xcol,xrow] <- 1
}
write.csv(limempty14, "data/limtm14.csv", row.names = T, col.names = T)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong13.csv")
limempty13 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty13.csv")
rownames(limempty13) <- limempty13$id
limempty13 <- limempty13[,-1]
limempty13[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty13)==xticker)
xcol <- which(rownames(limempty13)==xid)
limempty13[xcol,xrow] <- 1
}
write.csv(limempty13, "data/limtm13.csv", row.names = T, col.names = T)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong12.csv")
limempty12 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty12.csv")
rownames(limempty12) <- limempty12$id
limempty12 <- limempty12[,-1]
limempty12[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty12)==xticker)
xcol <- which(rownames(limempty12)==xid)
limempty12[xcol,xrow] <- 1
}
write.csv(limempty12, "data/limtm12.csv", row.names = T, col.names = T)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong11.csv")
limempty11 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty11.csv")
rownames(limempty11) <- limempty11$id
limempty11 <- limempty11[,-1]
limempty11[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty11)==xticker)
xcol <- which(rownames(limempty11)==xid)
limempty11[xcol,xrow] <- 1
}
write.csv(limempty11, "data/limtm11.csv", row.names = T, col.names = T)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong10.csv")
limempty10 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty10.csv")
rownames(limempty10) <- limempty10$id
limempty10 <- limempty10[,-1]
limempty10[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty10)==xticker)
xcol <- which(rownames(limempty10)==xid)
limempty10[xcol,xrow] <- 1
}
write.csv(limempty10, "data/limtm10.csv", row.names = T, col.names = T)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong09.csv")
limempty09 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty09.csv")
rownames(limempty09) <- limempty09$id
limempty09 <- limempty09[,-1]
limempty09[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty09)==xticker)
xcol <- which(rownames(limempty09)==xid)
limempty09[xcol,xrow] <- 1
}
write.csv(limempty09, "data/limtm09.csv", row.names = T, col.names = T)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong08.csv")
limempty08 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty08.csv")
rownames(limempty08) <- limempty08$id
limempty08 <- limempty08[,-1]
limempty08[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty08)==xticker)
xcol <- which(rownames(limempty08)==xid)
limempty08[xcol,xrow] <- 1
}
write.csv(limempty08, "data/limtm08.csv", row.names = T, col.names = T)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong07.csv")
limempty07 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty07.csv")
rownames(limempty07) <- limempty07$id
limempty07 <- limempty07[,-1]
limempty07[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty07)==xticker)
xcol <- which(rownames(limempty07)==xid)
limempty07[xcol,xrow] <- 1
}
write.csv(limempty07, "data/limtm07.csv", row.names = T, col.names = T)
####centrality_all####
cen16 <- read.xlsx("data/centrality_year.xlsx", sheet = "2016")
cen16$year <- 2016
cen15 <- read.xlsx("data/centrality_year.xlsx", sheet = "2015")
cen15$year <- 2015
cen14 <- read.xlsx("data/centrality_year.xlsx", sheet = "2014")
cen14$year <- 2014
cen13 <- read.xlsx("data/centrality_year.xlsx", sheet = "2013")
cen13$year <- 2013
cen12 <- read.xlsx("data/centrality_year.xlsx", sheet = "2012")
cen12$year <- 2012
cen11 <- read.xlsx("data/centrality_year.xlsx", sheet = "2011")
cen11$year <- 2011
cen10 <- read.xlsx("data/centrality_year.xlsx", sheet = "2010")
cen10$year <- 2010
cen09 <- read.xlsx("data/centrality_year.xlsx", sheet = "2009")
cen09$year <- 2009
cen08 <- read.xlsx("data/centrality_year.xlsx", sheet = "2008")
cen08$year <- 2008
cen07 <- read.xlsx("data/centrality_year.xlsx", sheet = "2007")
cen07$year <- 2007
cenall <- bind_rows(cen16, cen15, cen14, cen13, cen12, cen11, cen10, cen09, cen08, cen07)
cenall$year <- as.factor(cenall$year)
cenall$female <- as.factor(cenall$female)
write.csv(cenall, file = "data/cenfinal.csv", row.names = F)
####centrality_limit####
limcen16 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2016")
limcen16$year <- 2016
limcen15 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2015")
limcen15$year <- 2015
limcen14 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2014")
limcen14$year <- 2014
limcen13 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2013")
limcen13$year <- 2013
limcen12 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2012")
limcen12$year <- 2012
limcen11 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2011")
limcen11$year <- 2011
limcen10 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2010")
limcen10$year <- 2010
limcen09 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2009")
limcen09$year <- 2009
limcen08 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2008")
limcen08$year <- 2008
limcen07 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2007")
limcen07$year <- 2007
limcenall <- bind_rows(limcen16, limcen15, limcen14, limcen13, limcen12, limcen11, limcen10, limcen09, limcen08, limcen07)
limcenall$year <- as.factor(limcenall$year)
limcenall$female <- as.factor(limcenall$female)
names(limcenall)[names(limcenall)=="nDegre"]="nDegree"
write.csv(limcenall, file = "data/limcenfinal.csv", row.names = F)
####summary####
#cenall <- read.csv("data/cenfinal.csv")
#limcenall <- read.csv("data/limcenfinal.csv")
censum <- cenall %>% group_by(year, female) %>%
summarise(., ndegree_m = mean(nDegree),
ndegree_sd = sd(nDegree),
ndegree_min = min(nDegree),
ndegree_max = max(nDegree),
nbet_m = mean(nBetweenness),
nbet_sd = sd(nBetweenness),
nbet_min = min(nBetweenness),
nbet_max = max(nBetweenness))
limcensum <- limcenall %>% group_by(year, female) %>%
summarise(., ndegree_m = mean(nDegree),
ndegree_sd = sd(nDegree),
ndegree_min = min(nDegree),
ndegree_max = max(nDegree),
nbet_m = mean(nBetweenness),
nbet_sd = sd(nBetweenness),
nbet_min = min(nBetweenness),
nbet_max = max(nBetweenness),
eigen_m = mean(Eigenv),
eigen_sd = sd(Eigenv),
eigen_min = min(Eigenv),
eigen_max = max(Eigenv)
)
write.csv(censum, file = "data/censum.csv", row.names = F)
write.csv(limcensum, file = "data/limcensum.csv", row.names = F)
gallsum <- cenall %>% group_by(year) %>%
summarise(., female = mean(female))
glimsum <- limcenall %>% group_by(year) %>%
summarise(., female = mean(female))
names(censum)[names(censum)=="female"]="gender"
levels(censum$gender) <- c('Male', 'Female')
names(limcensum)[names(limcensum)=="female"]="gender"
levels(limcensum$gender) <- c('Male', 'Female')
ggplot() +
geom_point(censum, mapping = aes(x = year, y = ndegree_m, color = gender, shape = gender)) +
labs(title = 'Figure Normalized degree centrality') +
ylab('normalized degree centrality')
ggplot() +
geom_point(censum, mapping = aes(x = year, y = nbet_m, color = gender, shape = gender)) +
labs(title = 'Figure Normalized betweenness centrality') +
ylab('normalized betweenness centrality')
ggplot() +
geom_point(limcensum, mapping = aes(x = year, y = ndegree_m, color = gender, shape = gender)) +
labs(title = 'Figure normalized degree centrality (multiple directors)') +
ylab('normalized degree centrality')
ggplot() +
geom_point(limcensum, mapping = aes(x = year, y = nbet_m, color = gender, shape = gender)) +
labs(title = 'Figure Normalized betweenness centrality (multiple directors)') +
ylab('normalized betweenness centrality')
ggplot() +
geom_point(limcensum, mapping = aes(x = year, y = eigen_m, color = gender, shape = gender)) +
labs(title = 'Figure Eigenvector centrality') +
ylab('eigenvector centrality')
| /reshape.R | no_license | SUN-Wenjun/SOCIOL-198 | R | false | false | 22,906 | r | rm(list = ls())
require(openxlsx)
require(tidyr)
require(dplyr)
require(ggplot2)
require(psych)
setwd("D:/abroad/Harvard/18spring courses/SOCIOL 198/final")
####2016####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong16.csv")
companylist16 <- as.data.frame(unique(dat$ticker))
idlist16 <- as.data.frame(unique(dat$id))
write.csv(companylist16, "data/companylist16.csv", row.names = FALSE)
write.csv(idlist16, "data/idlist16.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty16 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty16.csv")
rownames(empty16) <- empty16$id
empty16 <- empty16[,-1]
empty16[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty16)==xticker)
xcol <- which(rownames(empty16)==xid)
empty16[xcol,xrow] <- 1
}
write.csv(empty16, "data/tm16.csv", row.names = T, col.names = T)
####2015####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong15.csv")
companylist15 <- as.data.frame(unique(dat$ticker))
idlist15 <- as.data.frame(unique(dat$id))
write.csv(companylist15, "data/companylist15.csv", row.names = FALSE)
write.csv(idlist15, "data/idlist15.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty15 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty15.csv")
rownames(empty15) <- empty15$id
empty15 <- empty15[,-1]
empty15[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty15)==xticker)
xcol <- which(rownames(empty15)==xid)
empty15[xcol,xrow] <- 1
}
write.csv(empty15, "data/tm15.csv", row.names = T, col.names = T)
####2014####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong14.csv")
companylist14 <- as.data.frame(unique(dat$ticker))
idlist14 <- as.data.frame(unique(dat$id))
write.csv(companylist14, "data/companylist14.csv", row.names = FALSE)
write.csv(idlist14, "data/idlist14.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty14 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty14.csv")
rownames(empty14) <- empty14$id
empty14 <- empty14[,-1]
empty14[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty14)==xticker)
xcol <- which(rownames(empty14)==xid)
empty14[xcol,xrow] <- 1
}
write.csv(empty14, "data/tm14.csv", row.names = T, col.names = T)
####2013####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong13.csv")
companylist13 <- as.data.frame(unique(dat$ticker))
idlist13 <- as.data.frame(unique(dat$id))
write.csv(companylist13, "data/companylist13.csv", row.names = FALSE)
write.csv(idlist13, "data/idlist13.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty13 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty13.csv")
rownames(empty13) <- empty13$id
empty13 <- empty13[,-1]
empty13[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty13)==xticker)
xcol <- which(rownames(empty13)==xid)
empty13[xcol,xrow] <- 1
}
write.csv(empty13, "data/tm13.csv", row.names = T, col.names = T)
####2012####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong12.csv")
companylist12 <- as.data.frame(unique(dat$ticker))
idlist12 <- as.data.frame(unique(dat$id))
write.csv(companylist12, "data/companylist12.csv", row.names = FALSE)
write.csv(idlist12, "data/idlist12.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty12 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty12.csv")
rownames(empty12) <- empty12$id
empty12 <- empty12[,-1]
empty12[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty12)==xticker)
xcol <- which(rownames(empty12)==xid)
empty12[xcol,xrow] <- 1
}
write.csv(empty12, "data/tm12.csv", row.names = T, col.names = T)
####2011####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong11.csv")
companylist11 <- as.data.frame(unique(dat$ticker))
idlist11 <- as.data.frame(unique(dat$id))
write.csv(companylist11, "data/companylist11.csv", row.names = FALSE)
write.csv(idlist11, "data/idlist11.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty11 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty11.csv")
rownames(empty11) <- empty11$id
empty11 <- empty11[,-1]
empty11[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty11)==xticker)
xcol <- which(rownames(empty11)==xid)
empty11[xcol,xrow] <- 1
}
write.csv(empty11, "data/tm11.csv", row.names = T, col.names = T)
####2010####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong10.csv")
companylist10 <- as.data.frame(unique(dat$ticker))
idlist10 <- as.data.frame(unique(dat$id))
write.csv(companylist10, "data/companylist10.csv", row.names = FALSE)
write.csv(idlist10, "data/idlist10.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty10 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty10.csv")
rownames(empty10) <- empty10$id
empty10 <- empty10[,-1]
empty10[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty10)==xticker)
xcol <- which(rownames(empty10)==xid)
empty10[xcol,xrow] <- 1
}
write.csv(empty10, "data/tm10.csv", row.names = T, col.names = T)
####2009####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong09.csv")
companylist09 <- as.data.frame(unique(dat$ticker))
idlist09 <- as.data.frame(unique(dat$id))
write.csv(companylist09, "data/companylist09.csv", row.names = FALSE)
write.csv(idlist09, "data/idlist09.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty09 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty09.csv")
rownames(empty09) <- empty09$id
empty09 <- empty09[,-1]
empty09[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty09)==xticker)
xcol <- which(rownames(empty09)==xid)
empty09[xcol,xrow] <- 1
}
write.csv(empty09, "data/tm09.csv", row.names = T, col.names = T)
####2008####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong08.csv")
companylist08 <- as.data.frame(unique(dat$ticker))
idlist08 <- as.data.frame(unique(dat$id))
write.csv(companylist08, "data/companylist08.csv", row.names = FALSE)
write.csv(idlist08, "data/idlist08.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty08 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty08.csv")
rownames(empty08) <- empty08$id
empty08 <- empty08[,-1]
empty08[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty08)==xticker)
xcol <- which(rownames(empty08)==xid)
empty08[xcol,xrow] <- 1
}
write.csv(empty08, "data/tm08.csv", row.names = T, col.names = T)
####2007####
dat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/alltmlong07.csv")
companylist07 <- as.data.frame(unique(dat$ticker))
idlist07 <- as.data.frame(unique(dat$id))
write.csv(companylist07, "data/companylist07.csv", row.names = FALSE)
write.csv(idlist07, "data/idlist07.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as empty##
empty07 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/empty07.csv")
rownames(empty07) <- empty07$id
empty07 <- empty07[,-1]
empty07[,] <- 0
nid <- nrow(dat)
for (x in 1:nid){
xticker <- dat[x,1]
xid <- dat[x,2]
xrow <- which(colnames(empty07)==xticker)
xcol <- which(rownames(empty07)==xid)
empty07[xcol,xrow] <- 1
}
write.csv(empty07, "data/tm07.csv", row.names = T, col.names = T)
####2016m-2007m####
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong16.csv")
limcompanylist16 <- as.data.frame(unique(limdat$ticker))
limidlist16 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist16, "data/limcompanylist16.csv", row.names = FALSE)
write.csv(limidlist16, "data/limidlist16.csv", row.names = FALSE)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong15.csv")
limcompanylist15 <- as.data.frame(unique(limdat$ticker))
limidlist15 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist15, "data/limcompanylist15.csv", row.names = FALSE)
write.csv(limidlist15, "data/limidlist15.csv", row.names = FALSE)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong14.csv")
limcompanylist14 <- as.data.frame(unique(limdat$ticker))
limidlist14 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist14, "data/limcompanylist14.csv", row.names = FALSE)
write.csv(limidlist14, "data/limidlist14.csv", row.names = FALSE)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong13.csv")
limcompanylist13 <- as.data.frame(unique(limdat$ticker))
limidlist13 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist13, "data/limcompanylist13.csv", row.names = FALSE)
write.csv(limidlist13, "data/limidlist13.csv", row.names = FALSE)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong12.csv")
limcompanylist12 <- as.data.frame(unique(limdat$ticker))
limidlist12 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist12, "data/limcompanylist12.csv", row.names = FALSE)
write.csv(limidlist12, "data/limidlist12.csv", row.names = FALSE)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong11.csv")
limcompanylist11 <- as.data.frame(unique(limdat$ticker))
limidlist11 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist11, "data/limcompanylist11.csv", row.names = FALSE)
write.csv(limidlist11, "data/limidlist11.csv", row.names = FALSE)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong10.csv")
limcompanylist10 <- as.data.frame(unique(limdat$ticker))
limidlist10 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist10, "data/limcompanylist10.csv", row.names = FALSE)
write.csv(limidlist10, "data/limidlist10.csv", row.names = FALSE)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong09.csv")
limcompanylist09 <- as.data.frame(unique(limdat$ticker))
limidlist09 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist09, "data/limcompanylist09.csv", row.names = FALSE)
write.csv(limidlist09, "data/limidlist09.csv", row.names = FALSE)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong08.csv")
limcompanylist08 <- as.data.frame(unique(limdat$ticker))
limidlist08 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist08, "data/limcompanylist08.csv", row.names = FALSE)
write.csv(limidlist08, "data/limidlist08.csv", row.names = FALSE)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong07.csv")
limcompanylist07 <- as.data.frame(unique(limdat$ticker))
limidlist07 <- as.data.frame(unique(limdat$id))
write.csv(limcompanylist07, "data/limcompanylist07.csv", row.names = FALSE)
write.csv(limidlist07, "data/limidlist07.csv", row.names = FALSE)
#create an empty 2-mode csv file based on companylist## and idlist## manually for each year, name it as limempty##
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong16.csv")
limempty16 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty16.csv")
rownames(limempty16) <- limempty16$id
limempty16 <- limempty16[,-1]
limempty16[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty16)==xticker)
xcol <- which(rownames(limempty16)==xid)
limempty16[xcol,xrow] <- 1
}
write.csv(limempty16, "data/limtm16.csv", row.names = T, col.names = T)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong15.csv")
limempty15 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty15.csv")
rownames(limempty15) <- limempty15$id
limempty15 <- limempty15[,-1]
limempty15[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty15)==xticker)
xcol <- which(rownames(limempty15)==xid)
limempty15[xcol,xrow] <- 1
}
write.csv(limempty15, "data/limtm15.csv", row.names = T, col.names = T)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong14.csv")
limempty14 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty14.csv")
rownames(limempty14) <- limempty14$id
limempty14 <- limempty14[,-1]
limempty14[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty14)==xticker)
xcol <- which(rownames(limempty14)==xid)
limempty14[xcol,xrow] <- 1
}
write.csv(limempty14, "data/limtm14.csv", row.names = T, col.names = T)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong13.csv")
limempty13 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty13.csv")
rownames(limempty13) <- limempty13$id
limempty13 <- limempty13[,-1]
limempty13[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty13)==xticker)
xcol <- which(rownames(limempty13)==xid)
limempty13[xcol,xrow] <- 1
}
write.csv(limempty13, "data/limtm13.csv", row.names = T, col.names = T)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong12.csv")
limempty12 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty12.csv")
rownames(limempty12) <- limempty12$id
limempty12 <- limempty12[,-1]
limempty12[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty12)==xticker)
xcol <- which(rownames(limempty12)==xid)
limempty12[xcol,xrow] <- 1
}
write.csv(limempty12, "data/limtm12.csv", row.names = T, col.names = T)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong11.csv")
limempty11 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty11.csv")
rownames(limempty11) <- limempty11$id
limempty11 <- limempty11[,-1]
limempty11[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty11)==xticker)
xcol <- which(rownames(limempty11)==xid)
limempty11[xcol,xrow] <- 1
}
write.csv(limempty11, "data/limtm11.csv", row.names = T, col.names = T)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong10.csv")
limempty10 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty10.csv")
rownames(limempty10) <- limempty10$id
limempty10 <- limempty10[,-1]
limempty10[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty10)==xticker)
xcol <- which(rownames(limempty10)==xid)
limempty10[xcol,xrow] <- 1
}
write.csv(limempty10, "data/limtm10.csv", row.names = T, col.names = T)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong09.csv")
limempty09 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty09.csv")
rownames(limempty09) <- limempty09$id
limempty09 <- limempty09[,-1]
limempty09[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty09)==xticker)
xcol <- which(rownames(limempty09)==xid)
limempty09[xcol,xrow] <- 1
}
write.csv(limempty09, "data/limtm09.csv", row.names = T, col.names = T)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong08.csv")
limempty08 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty08.csv")
rownames(limempty08) <- limempty08$id
limempty08 <- limempty08[,-1]
limempty08[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty08)==xticker)
xcol <- which(rownames(limempty08)==xid)
limempty08[xcol,xrow] <- 1
}
write.csv(limempty08, "data/limtm08.csv", row.names = T, col.names = T)
limdat <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limtmlong07.csv")
limempty07 <- read.csv("D:/abroad/Harvard/18spring courses/SOCIOL 198/final/data/limempty07.csv")
rownames(limempty07) <- limempty07$id
limempty07 <- limempty07[,-1]
limempty07[,] <- 0
nid <- nrow(limdat)
for (x in 1:nid){
xticker <- limdat[x,1]
xid <- limdat[x,2]
xrow <- which(colnames(limempty07)==xticker)
xcol <- which(rownames(limempty07)==xid)
limempty07[xcol,xrow] <- 1
}
write.csv(limempty07, "data/limtm07.csv", row.names = T, col.names = T)
####centrality_all####
cen16 <- read.xlsx("data/centrality_year.xlsx", sheet = "2016")
cen16$year <- 2016
cen15 <- read.xlsx("data/centrality_year.xlsx", sheet = "2015")
cen15$year <- 2015
cen14 <- read.xlsx("data/centrality_year.xlsx", sheet = "2014")
cen14$year <- 2014
cen13 <- read.xlsx("data/centrality_year.xlsx", sheet = "2013")
cen13$year <- 2013
cen12 <- read.xlsx("data/centrality_year.xlsx", sheet = "2012")
cen12$year <- 2012
cen11 <- read.xlsx("data/centrality_year.xlsx", sheet = "2011")
cen11$year <- 2011
cen10 <- read.xlsx("data/centrality_year.xlsx", sheet = "2010")
cen10$year <- 2010
cen09 <- read.xlsx("data/centrality_year.xlsx", sheet = "2009")
cen09$year <- 2009
cen08 <- read.xlsx("data/centrality_year.xlsx", sheet = "2008")
cen08$year <- 2008
cen07 <- read.xlsx("data/centrality_year.xlsx", sheet = "2007")
cen07$year <- 2007
cenall <- bind_rows(cen16, cen15, cen14, cen13, cen12, cen11, cen10, cen09, cen08, cen07)
cenall$year <- as.factor(cenall$year)
cenall$female <- as.factor(cenall$female)
write.csv(cenall, file = "data/cenfinal.csv", row.names = F)
####centrality_limit####
limcen16 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2016")
limcen16$year <- 2016
limcen15 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2015")
limcen15$year <- 2015
limcen14 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2014")
limcen14$year <- 2014
limcen13 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2013")
limcen13$year <- 2013
limcen12 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2012")
limcen12$year <- 2012
limcen11 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2011")
limcen11$year <- 2011
limcen10 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2010")
limcen10$year <- 2010
limcen09 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2009")
limcen09$year <- 2009
limcen08 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2008")
limcen08$year <- 2008
limcen07 <- read.xlsx("data/limcentrality_year.xlsx", sheet = "2007")
limcen07$year <- 2007
limcenall <- bind_rows(limcen16, limcen15, limcen14, limcen13, limcen12, limcen11, limcen10, limcen09, limcen08, limcen07)
limcenall$year <- as.factor(limcenall$year)
limcenall$female <- as.factor(limcenall$female)
names(limcenall)[names(limcenall)=="nDegre"]="nDegree"
write.csv(limcenall, file = "data/limcenfinal.csv", row.names = F)
####summary####
#cenall <- read.csv("data/cenfinal.csv")
#limcenall <- read.csv("data/limcenfinal.csv")
censum <- cenall %>% group_by(year, female) %>%
summarise(., ndegree_m = mean(nDegree),
ndegree_sd = sd(nDegree),
ndegree_min = min(nDegree),
ndegree_max = max(nDegree),
nbet_m = mean(nBetweenness),
nbet_sd = sd(nBetweenness),
nbet_min = min(nBetweenness),
nbet_max = max(nBetweenness))
limcensum <- limcenall %>% group_by(year, female) %>%
summarise(., ndegree_m = mean(nDegree),
ndegree_sd = sd(nDegree),
ndegree_min = min(nDegree),
ndegree_max = max(nDegree),
nbet_m = mean(nBetweenness),
nbet_sd = sd(nBetweenness),
nbet_min = min(nBetweenness),
nbet_max = max(nBetweenness),
eigen_m = mean(Eigenv),
eigen_sd = sd(Eigenv),
eigen_min = min(Eigenv),
eigen_max = max(Eigenv)
)
write.csv(censum, file = "data/censum.csv", row.names = F)
write.csv(limcensum, file = "data/limcensum.csv", row.names = F)
gallsum <- cenall %>% group_by(year) %>%
summarise(., female = mean(female))
glimsum <- limcenall %>% group_by(year) %>%
summarise(., female = mean(female))
names(censum)[names(censum)=="female"]="gender"
levels(censum$gender) <- c('Male', 'Female')
names(limcensum)[names(limcensum)=="female"]="gender"
levels(limcensum$gender) <- c('Male', 'Female')
ggplot() +
geom_point(censum, mapping = aes(x = year, y = ndegree_m, color = gender, shape = gender)) +
labs(title = 'Figure Normalized degree centrality') +
ylab('normalized degree centrality')
ggplot() +
geom_point(censum, mapping = aes(x = year, y = nbet_m, color = gender, shape = gender)) +
labs(title = 'Figure Normalized betweenness centrality') +
ylab('normalized betweenness centrality')
ggplot() +
geom_point(limcensum, mapping = aes(x = year, y = ndegree_m, color = gender, shape = gender)) +
labs(title = 'Figure normalized degree centrality (multiple directors)') +
ylab('normalized degree centrality')
ggplot() +
geom_point(limcensum, mapping = aes(x = year, y = nbet_m, color = gender, shape = gender)) +
labs(title = 'Figure Normalized betweenness centrality (multiple directors)') +
ylab('normalized betweenness centrality')
ggplot() +
geom_point(limcensum, mapping = aes(x = year, y = eigen_m, color = gender, shape = gender)) +
labs(title = 'Figure Eigenvector centrality') +
ylab('eigenvector centrality')
|
/Plot3.r | no_license | Majeson/ExData_Plotting1 | R | false | false | 1,259 | r | ||
\name{make.lags}
\title{
Lags vectors and covariates correctly so that an autoregressive model
can be estimated by regression.
}
\usage{
make.lags(x, lags, cov,nobs=3500)
}
\arguments{
\item{x}{
Vector or matrix representing a univariate or multivariate time series.
(rows are assumed to idex time)
}
\item{lags}{
Vector of time delays used in reconstruction.
}
\item{nobs}{
Maximum length of time series.
}
\item{cov}{
A vector or matrix of covariates that will be matched with the times for
the independent varaible
}}
\value{
\item{x}{
Matrix of lagged values of the time series, independent variables.
The covaraites are the last columns of this matrix
}
\item{y}{
Vector of time series values, dependent variables.
}
\item{nvar}{
Number of variables or dimension of x matrix.
}
\item{lags}{
Time delays used in constructing the x matrix.
}
\item{start}{
Observation number of univariate time series used for the start of the
y vector.
}
\item{end}{
Observation number of univariate time series used for the end of the
y vector.
}
\item{skip}{
Information about which columns of the returned X matrix are covariates.
}}
\description{
This function is used to create the appropriate data structure for
a nonlinear autoregressive process of the form X_t = F(X_t-1) + e_t.
}
\seealso{
nnreg, rossler
}
\examples{
make.lags(rossler.state[,1],c(1,2,3)) -> data
# create
# 3-d time delay vector model of the x variable of rossler system.
nnreg(data$x,data$y,5,5) -> fit # fit time series model using nnreg.
# fitting a state space model to the rossler state vector
# only one lag is neede in this case.
make.lags(rossler.state, lags=c(1))-> data
nnreg( data$x, data$y[,1], 5,5)-> fit1
nnreg( data$x, data$y[,2], 5,5)-> fit2
nnreg( data$x, data$y[,3], 5,5)-> fit3
}
\keyword{FUNFITS}
% Converted by Sd2Rd version 0.2-a3.
| /man/make.lags.bak.Rd | no_license | cran/funfits | R | false | false | 1,847 | rd | \name{make.lags}
\title{
Lags vectors and covariates correctly so that an autoregressive model
can be estimated by regression.
}
\usage{
make.lags(x, lags, cov,nobs=3500)
}
\arguments{
\item{x}{
Vector or matrix representing a univariate or multivariate time series.
(rows are assumed to idex time)
}
\item{lags}{
Vector of time delays used in reconstruction.
}
\item{nobs}{
Maximum length of time series.
}
\item{cov}{
A vector or matrix of covariates that will be matched with the times for
the independent varaible
}}
\value{
\item{x}{
Matrix of lagged values of the time series, independent variables.
The covaraites are the last columns of this matrix
}
\item{y}{
Vector of time series values, dependent variables.
}
\item{nvar}{
Number of variables or dimension of x matrix.
}
\item{lags}{
Time delays used in constructing the x matrix.
}
\item{start}{
Observation number of univariate time series used for the start of the
y vector.
}
\item{end}{
Observation number of univariate time series used for the end of the
y vector.
}
\item{skip}{
Information about which columns of the returned X matrix are covariates.
}}
\description{
This function is used to create the appropriate data structure for
a nonlinear autoregressive process of the form X_t = F(X_t-1) + e_t.
}
\seealso{
nnreg, rossler
}
\examples{
make.lags(rossler.state[,1],c(1,2,3)) -> data
# create
# 3-d time delay vector model of the x variable of rossler system.
nnreg(data$x,data$y,5,5) -> fit # fit time series model using nnreg.
# fitting a state space model to the rossler state vector
# only one lag is neede in this case.
make.lags(rossler.state, lags=c(1))-> data
nnreg( data$x, data$y[,1], 5,5)-> fit1
nnreg( data$x, data$y[,2], 5,5)-> fit2
nnreg( data$x, data$y[,3], 5,5)-> fit3
}
\keyword{FUNFITS}
% Converted by Sd2Rd version 0.2-a3.
|
base <- read.csv("C:/Users/Disponible/Documents/Nueva carpeta/CORPUS/portales_1M.csv", fileEncoding = 'UTF-8')
base2 <- read.csv("C:/Users/Disponible/Documents/Nueva carpeta/CORPUS/portales_1.csv")
base$texto_simple <- NULL
base$salario_men_med <- NULL
base$termino_simp <- NULL
base$termino <- NULL
base$X <- NULL
base$Unnamed <- NULL
names(base)
# base$'' = c(0:1132251)
b <- data.frame(b,"X" = c(0:1064624))
# names(base)
table(a$X)
base <- base [ ,c(25,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24)]
names(base)
names(base2)
names(base) = c ("x", "empresa", "puesto", "categoria", "publicacion", "salario", "dedicacion", "contrato",
"area", "texto", "fconsul","url", "portal", "contrato_beca", "contrato_indefinido", "contrato_otro",
"contrato_comision", "contrato_temporal", "salariof", "periodo_salario", "tiene_dec", "salarioi",
"salario_men_med2","cve_mun", "alcaldia")
carpeta <- "C:/Users/Disponible/Documents/Nueva carpeta/CORPUS/"
write.csv(base, paste0(carpeta, "portales_intento", ".csv"), fileEncoding = "UTF-8")
###############################################################################################
######## Para unir la de Portales estrucurado con la de perl
a <- read.csv("C:/Users/Disponible/Documents/Nueva carpeta/CORPUS/portales_intento_2.csv", fileEncoding = 'UTF-8')
bb <- read.csv("C:/Users/Disponible/Documents/Nueva carpeta/ANALYSE/fichero_analisis.csv", fileEncoding = 'UTF-8')
a$X <- as.factor(a$X)
b <- b[b$X.ID !="#----------------------------------------------------------------------------------------------------", ]
b$X <- NULL
b$X.81 <- NULL
b$X.82 <- NULL
b$X.83 <- NULL
b$X.84 <- NULL
b$X.85 <- NULL
b$X.86 <- NULL
b$X.87 <- NULL
b$X.88 <- NULL
b$X.89 <- NULL
b$X.90 <- NULL
ab <-full_join(a,b, by= c("X"="X.ID"))
c <- anti_join(a,b, by =c("X"="X.ID"))
carpeta_2 <- "C:/Users/Disponible/Documents/Nueva carpeta/"
write.csv(ab, paste0(carpeta_2, "portales_union", ".csv"), fileEncoding = "UTF-8")
| /Procesamiento base perl.R | no_license | amaurydata100tific/Procesamiento-base-para-perl | R | false | false | 2,038 | r | base <- read.csv("C:/Users/Disponible/Documents/Nueva carpeta/CORPUS/portales_1M.csv", fileEncoding = 'UTF-8')
base2 <- read.csv("C:/Users/Disponible/Documents/Nueva carpeta/CORPUS/portales_1.csv")
base$texto_simple <- NULL
base$salario_men_med <- NULL
base$termino_simp <- NULL
base$termino <- NULL
base$X <- NULL
base$Unnamed <- NULL
names(base)
# base$'' = c(0:1132251)
b <- data.frame(b,"X" = c(0:1064624))
# names(base)
table(a$X)
base <- base [ ,c(25,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24)]
names(base)
names(base2)
names(base) = c ("x", "empresa", "puesto", "categoria", "publicacion", "salario", "dedicacion", "contrato",
"area", "texto", "fconsul","url", "portal", "contrato_beca", "contrato_indefinido", "contrato_otro",
"contrato_comision", "contrato_temporal", "salariof", "periodo_salario", "tiene_dec", "salarioi",
"salario_men_med2","cve_mun", "alcaldia")
carpeta <- "C:/Users/Disponible/Documents/Nueva carpeta/CORPUS/"
write.csv(base, paste0(carpeta, "portales_intento", ".csv"), fileEncoding = "UTF-8")
###############################################################################################
######## Para unir la de Portales estrucurado con la de perl
a <- read.csv("C:/Users/Disponible/Documents/Nueva carpeta/CORPUS/portales_intento_2.csv", fileEncoding = 'UTF-8')
bb <- read.csv("C:/Users/Disponible/Documents/Nueva carpeta/ANALYSE/fichero_analisis.csv", fileEncoding = 'UTF-8')
a$X <- as.factor(a$X)
b <- b[b$X.ID !="#----------------------------------------------------------------------------------------------------", ]
b$X <- NULL
b$X.81 <- NULL
b$X.82 <- NULL
b$X.83 <- NULL
b$X.84 <- NULL
b$X.85 <- NULL
b$X.86 <- NULL
b$X.87 <- NULL
b$X.88 <- NULL
b$X.89 <- NULL
b$X.90 <- NULL
ab <-full_join(a,b, by= c("X"="X.ID"))
c <- anti_join(a,b, by =c("X"="X.ID"))
carpeta_2 <- "C:/Users/Disponible/Documents/Nueva carpeta/"
write.csv(ab, paste0(carpeta_2, "portales_union", ".csv"), fileEncoding = "UTF-8")
|
#!/usr/bin/env Rscript
#----------------------------------------------------------------------------------#
# Constructs phenotypes from WLS data
# Author: Joel Becker
# Notes:
#
#----------------------------------------------------------------------------------#
########################################################
######################## Set-up ########################
########################################################
# load libraries
packages <- c("data.table", "foreign", "dplyr", "tidyr", "Rmpfr", "sjmisc", "stringr")
new.packages <- packages[!(packages %in% installed.packages()[, "Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(packages, library, character.only = TRUE)
########################################################
####################### Load data ######################
########################################################
data <- fread("tmp/WLS_renamed.csv")
########################################################
################ Residualising function ################
########################################################
residualise <- function(data, age_residualise=TRUE, nosex=FALSE) {
# residualise within-wave, record missings
if (age_residualise==TRUE) { # if residualising on age
reg <- summary(lm(pheno ~ age + age2 + male + male_age + male_age2, data))
} else if (nosex==FALSE) { # else if residualising on cohort
reg <- summary(lm(pheno ~ dob + dob2 + male + male_dob + male_dob2, data))
} else {
reg <- summary(lm(pheno ~ dob + dob2, data))
}
sel <- which(!is.na(data$pheno)) # which observations do we have proxy phenotype for?
data$resid <- NA
data$resid[sel] <- reg$resid
data$missing <- 1
data$missing[sel] <- 0
# standardise residuals
mean <- mean(data$resid); sd <- sd(data$resid)
data$std_resid <- (data$resid - mean) / sd
return(data)
}
residualise.average.save <- function(data, average=TRUE, age_residualise=TRUE, name, nosex=FALSE, neb=FALSE) {
# residualise within wave?
if (average==TRUE) { # yes
# get data column names for new df
df <- data[FALSE,]
# list waves, to run regressions separately
waves <- sort(unique(data$wave))
for (i in waves) {
# residualise within-wave
data_wave <- filter(data, wave==i)
data_wave <- residualise(data=data_wave, age_residualise=age_residualise)
# record this wave
df <- rbind(df, data_wave)
}
# average residuals within-wave
df <- df %>%
group_by(id, respondent_type) %>%
summarise(phenotype = mean(std_resid, na.rm=TRUE)) %>%
ungroup() %>%
filter(!is.na(phenotype))
} else if (average==FALSE) { # no
# residualise (ignoring waves)
df <- data %>%
residualise(., age_residualise = age_residualise, nosex=nosex) %>%
select(id, respondent_type, phenotype = std_resid)
if (neb==TRUE) { # if NEB, split by sex
nebmen <- data %>%
filter(male == 1) %>%
residualise(., age_residualise = age_residualise, nosex=neb) %>%
select(id, respondent_type, phenotype = std_resid)
nebwom <- data %>%
filter(male == 0) %>%
residualise(., age_residualise = age_residualise, nosex=neb) %>%
select(id, respondent_type, phenotype=std_resid)
fwrite(nebmen, "input/WLS/NEBmen.pheno")
fwrite(nebwom, "input/WLS/NEBwomen.pheno")
}
}
# save data
fwrite(df, paste0("input/WLS/", name, ".pheno"))
}
########################################################
############# Construct phenotype: activity ############
########################################################
activity <- data %>%
select(id_old,
id,
respondent_type,
yob,
contains("age"),
male,
african_american,
contains("activity")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("activity_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = activity) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=activity, average=TRUE, age_residualise=TRUE, name="ACTIVITY")
########################################################
############### Construct phenotype: ADHD ##############
########################################################
ADHD <- data %>%
select(id_old,
id,
respondent_type,
yob,
contains("age"),
male,
african_american,
contains("ADHD")) %>%
select(-contains("1957"),
-contains("1975"),
-contains("1993"),
-contains("2004")) %>%
gather(key = "wave", value = "value",
paste0("ADHD_", 2011),
paste0("age_", 2011)) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = ADHD) %>% # reverse coded in construction stage
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=ADHD, average=FALSE, age_residualise=TRUE, name="ADHD")
########################################################
######### Construct phenotype: age first birth #########
########################################################
AFB <- data %>%
select(id_old,
id,
respondent_type,
yob,
contains("age"),
male,
african_american,
contains("AFB")) %>%
select(-contains("1957"), -contains("1993"), -contains("2004")) %>%
mutate(AFB = case_when(!is.na(AFB_2011) ~ AFB_2011, TRUE ~ AFB_1975),
dob = yob,
dob2 = yob^2,
male_dob = male * dob,
male_dob2 = male * dob2,
pheno = AFB) %>% # reverse code
select(id, respondent_type, pheno, dob, dob2, male, male_dob, male_dob2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=AFB, average=FALSE, age_residualise=FALSE, name="AFB")
########################################################
######### Construct phenotype: age first menses ########
########################################################
AFM <- data %>%
select(id_old,
id,
respondent_type,
yob,
contains("age"),
african_american,
contains("AFM")) %>%
select(-contains("1957"), -contains("1975"), -contains("2011")) %>%
gather(key="wave", value="value",
paste0("AFM_", c(1993, 2004)), paste0("age_", c(1993, 2004))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
group_by(id, respondent_type) %>%
mutate(AFM_average = mean(AFM, na.rm=TRUE),
row_n = row_number()) %>%
filter(row_n == 1) %>%
ungroup() %>%
mutate(dob = yob,
dob2 = yob^2,
pheno = AFM_average) %>% # reverse code
select(id, respondent_type, wave, pheno, dob, dob2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=AFM, average=FALSE, age_residualise=FALSE, nosex=TRUE, name="MENARCHE")
########################################################
########## Construct phenotype: agreeableness ##########
########################################################
#agree <- data %>%
# select(id_old,
# id,
# respondent_type,
# yob,
# contains("age"),
# male,
# african_american,
# contains("agree")) %>%
# select(-contains("phone"), -contains("nanswered")) %>%
# select(-contains("1957"), -contains("1975")) %>%
# gather(key = "wave", value = "value",
# paste0("agree_", c(1993, 2004, 2011)),
# paste0("age_", c(1993, 2004, 2011))) %>%
# separate("wave", c("var", "wave")) %>%
# spread("var", "value") %>%
# mutate(age2 = age^2,
# male_age = male * age,
# male_age2 = male * age2,
# pheno = agree) %>%
# select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
# drop_na()
# residualise, average, save
#residualise.average.save(data=agree, average=TRUE, age_residualise=TRUE, name="AGREE")
########################################################
############## Construct phenotype: asthma #############
########################################################
asthma <- data %>%
select(id_old,
id,
respondent_type,
yob,
contains("age"),
male,
african_american,
contains("asthma"),
-contains("hayfever")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("asthma_", c(1993, 2011)), paste0("age_", c(1993, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
group_by(id, respondent_type) %>%
mutate(asthma = max(asthma, na.rm=TRUE),
row_n = row_number()) %>%
filter(row_n == 1) %>%
ungroup() %>%
mutate(dob = yob,
dob2 = yob^2,
male_dob = male * dob,
male_dob2 = male * dob2,
pheno = asthma) %>%
filter(pheno >= 0) %>%
select(id, respondent_type, wave, pheno, dob, dob2, male, male_dob, male_dob2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=asthma, average=FALSE, age_residualise=FALSE, name="ASTHMA")
########################################################
########## Construct phenotype: asthmahayfever #########
########################################################
asthmahayfever <- data %>%
select(id_old,
id,
respondent_type,
yob,
contains("age"),
male,
african_american,
contains("asthma_2011"),
contains("hayfever_2011")) %>%
mutate(dob = yob,
dob2 = yob^2,
male_dob = male * dob,
male_dob2 = male * dob2,
pheno = case_when(asthma_2011 + hayfever_2011 > 0 ~ 1,
asthma_2011 + hayfever_2011 == 0 ~ 0)) %>%
select(id, respondent_type, pheno, dob, dob2, male, male_dob, male_dob2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=asthmahayfever, average=FALSE, age_residualise=FALSE, name="ASTECZRHI")
########################################################
############## Construct phenotype: audit ##############
########################################################
audit <- data %>%
select(id_old,
id,
respondent_type,
yob,
contains("age"),
male,
african_american,
contains("audit")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("audit_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = audit) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=audit, average=T, age_residualise=T, name="AUDIT")
########################################################
############### Construct phenotype: bmi ###############
########################################################
bmi <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("bmi")) %>%
select(-contains("1975")) %>%
gather(key="wave", value="value",
paste0("bmi_", c(1957, 1993, 2004, 2011)), paste0("age_", c(1957, 1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = bmi) %>%
filter(pheno >= 0) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=bmi, average=TRUE, age_residualise=TRUE, name="BMI")
########################################################
########### Construct phenotype: cat allergy ###########
########################################################
cat <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("cat")) %>%
select(-contains("1957"), -contains("1975"), -contains("1993"), -contains("2004")) %>%
gather(key="wave", value="value",
paste0("cat_", 2011), paste0("age_", 2011)) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = cat) %>%
filter(pheno >= 0) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=cat, average=FALSE, age_residualise=TRUE, name="ALLERGYCAT")
########################################################
########## Construct phenotype: conscientious ##########
########################################################
consc <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("consc"), -contains("phone")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("consc_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = consc) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=consc, average=TRUE, age_residualise=TRUE, name="CONSCIENTIOUSNESS")
########################################################
############### Construct phenotype: COPD ##############
########################################################
copd <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("copd"), -contains("phone")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("copd_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = copd) %>%
group_by(id, respondent_type) %>%
mutate(pheno = max(pheno, na.rm = T)) %>%
ungroup() %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
filter(pheno %in% 0:1) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=copd, average=TRUE, age_residualise=TRUE, name="COPD")
########################################################
########### Construct phenotype: cigs per day ##########
########################################################
CPD <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("CPD"), -contains("phone")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("CPD_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = CPD) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=CPD, average=TRUE, age_residualise=TRUE, name="CPD")
########################################################
############ Construct phenotype: depression ###########
########################################################
depr <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("depr"), -contains("nanswered")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("depr_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = depr) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=depr, average=TRUE, age_residualise=TRUE, name="DEP")
########################################################
############### Construct phenotype: DPW ###############
########################################################
dpw <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("dpw"), -contains("nanswered")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("dpw_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = dpw) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=dpw, average=TRUE, age_residualise=TRUE, name="DPW")
########################################################
########### Construct phenotype: dust allergy ##########
########################################################
dust <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("dust")) %>%
select(-contains("1957"), -contains("1975"), -contains("1993"), -contains("2004")) %>%
gather(key="wave", value="value",
paste0("dust_", 2011), paste0("age_", 2011)) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = dust) %>%
filter(pheno >= 0) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=dust, average=FALSE, age_residualise=TRUE, name="ALLERGYDUST")
########################################################
################ Construct phenotype: EA ###############
########################################################
EA <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("EA_")) %>%
#mutate(EA_diff = EA_2011 - EA_2004) %>% # EA values barely change, as expected
mutate(dob = yob,
dob2 = yob^2,
male_dob = male * dob,
male_dob2 = male * dob2,
pheno = case_when(!is.na(EA_2011) ~ EA_2011, TRUE ~ EA_2004)) %>%
select(id, respondent_type, pheno, dob, dob2, male, male_dob, male_dob2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=EA, average=FALSE, age_residualise=FALSE, name="EA")
########################################################
########### Construct phenotype: ever smoker ###########
########################################################
eversmoke <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("eversmoke")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("eversmoke_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
group_by(id, respondent_type) %>%
mutate(eversmoke_max = max(eversmoke, na.rm=TRUE),
row_n = row_number()) %>%
ungroup() %>%
filter(row_n == 1 & eversmoke_max >= 0) %>%
mutate(dob = yob,
dob2 = yob^2,
male_dob = male * dob,
male_dob2 = male * dob2,
pheno = eversmoke_max) %>%
select(id, respondent_type, wave, pheno, dob, dob2, male, male_dob, male_dob2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=eversmoke, average=FALSE, age_residualise=FALSE, name="EVERSMOKE")
########################################################
########### Construct phenotype: extraversion ##########
########################################################
extra <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("extra"), -contains("nanswered"), -contains("phone")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("extra_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = extra) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=extra, average=TRUE, age_residualise=TRUE, name="EXTRA")
########################################################
####### Construct phenotype: family satisfaction #######
########################################################
famsat <- data %>%
select(id_old, id, respondent_type, yob, contains("age_2004"), male, african_american, contains("famsat")) %>%
mutate(age = age_2004,
age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = famsat_2004) %>% # reverse code
select(id, respondent_type, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=famsat, average=FALSE, age_residualise=TRUE, name="FAMSAT")
########################################################
###### Construct phenotype: financial satisfaction #####
########################################################
finsat <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("finsat")) %>%
select(-contains("1957"), -contains("1975"), -contains("1993")) %>%
gather(key="wave", value="value",
paste0("finsat_", c(2004, 2011)), paste0("age_", c(2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = finsat) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=finsat, average=TRUE, age_residualise=TRUE, name="FINSAT")
########################################################
####### Construct phenotype: friend satisfaction #######
########################################################
friendsat1 <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("friendsat1")) %>%
select(-contains("1957"), -contains("1975"), -contains("1993"), -contains("2011")) %>%
mutate(age = age_2004, friendsat1 = friendsat1_2004, wave = 2004) %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = friendsat1) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
friendsat2 <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("friendsat2")) %>%
select(-contains("1957"), -contains("1975"), -contains("1993"), -contains("2011")) %>%
mutate(age = age_2004, friendsat2 = friendsat2_2004, wave = 2004) %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = friendsat2) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=friendsat1, average=TRUE, age_residualise=TRUE, name="FRIENDSAT1")
residualise.average.save(data=friendsat2, average=TRUE, age_residualise=TRUE, name="FRIENDSAT2")
########################################################
############ Construct phenotype: hay-fever ############
########################################################
hayfever <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("hayfever"), -contains("asthma")) %>%
select(-contains("1957"), -contains("1975"), -contains("1993"), -contains("2004")) %>%
gather(key="wave", value="value",
paste0("hayfever_", 2011), paste0("age_", 2011)) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(dob = yob,
dob2 = yob^2,
male_dob = male * dob,
male_dob2 = male * dob2,
pheno = hayfever) %>%
filter(pheno >= 0) %>%
select(id, respondent_type, pheno, dob, dob2, male, male_dob, male_dob2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=hayfever, average=FALSE, age_residualise=FALSE, name="HAYFEVER")
########################################################
############## Construct phenotype: height #############
########################################################
height <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("height")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("height_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
group_by(id, respondent_type) %>%
mutate(height = mean(height, na.rm=TRUE),
rn = row_number()) %>%
ungroup() %>%
filter(rn == 1) %>%
mutate(dob = yob,
dob2 = yob^2,
male_dob = male * dob,
male_dob2 = male * dob2,
pheno = height) %>%
select(id, respondent_type, pheno, dob, dob2, male, male_dob, male_dob2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=height, average=FALSE, age_residualise=FALSE, name="HEIGHT")
########################################################
########### Construct phenotype: intelligence ##########
########################################################
intelligence <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("intelligence_")) %>%
gather(key="wave", value="value", paste0("intelligence_", c(1957, 1975)), paste0("age_", c(1957, 1975))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(dob = yob,
dob2 = yob^2,
male_dob = male * dob,
male_dob2 = male * dob2,
pheno = intelligence) %>%
select(id, respondent_type, pheno, dob, dob2, male, male_dob, male_dob2) %>%
drop_na() %>% drop_na()
# residualise, average, save
residualise.average.save(data=intelligence, average=FALSE, age_residualise=FALSE, name="CP")
########################################################
######### Construct phenotype: left out social #########
########################################################
#leftoutsocial <- data %>%
# select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("leftoutsocial"), -contains("phone")) %>%
# select(-contains("1957"), -contains("1975")) %>%
# gather(key="wave", value="value",
# paste0("leftoutsocial_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
# separate("wave", c("var", "wave")) %>%
# spread("var", "value") %>%
# mutate(age2 = age^2,
# male_age = male * age,
# male_age2 = male * age2,
# pheno = leftoutsocial) %>%
# select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
# drop_na()
# residualise, average, save
#residualise.average.save(data=leftoutsocial, average=TRUE, age_residualise=TRUE, name="LEFTOUT")
########################################################
############## Construct phenotype: lonely #############
########################################################
lonely <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("lonely")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("lonely_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = lonely) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=lonely, average=TRUE, age_residualise=TRUE, name="LONELY")
########################################################
############# Construct phenotype: migraine ############
########################################################
#migraine <- data %>%
# select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("migraine")) %>%
# select(-contains("1957"), -contains("1975")) %>%
# gather(key="wave", value="value",
# paste0("migraine_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
# separate("wave", c("var", "wave")) %>%
# spread("var", "value") %>%
# mutate(age2 = age^2,
# male_age = male * age,
# male_age2 = male * age2,
# pheno = migraine) %>%
# filter(pheno >= 0) %>%
# select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
# drop_na()
# residualise, average, save
#residualise.average.save(data=migraine, average=TRUE, age_residualise=TRUE, name="MIGRAINE")
########################################################
######### Construct phenotype: number ever born ########
########################################################
NEB <- data %>%
select(id_old, id, respondent_type, yob, male, african_american, contains("NEB")) %>%
mutate(dob = yob,
dob2 = yob^2,
male_dob = male * dob,
male_dob2 = male * dob2,
pheno = NEB) %>% # reverse code
select(id, respondent_type, pheno, dob, dob2, male, male_dob, male_dob2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=NEB, average=FALSE, age_residualise=FALSE, name="NEB", neb=TRUE)
########################################################
########### Construct phenotype: neuroticism ###########
########################################################
neur <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("neur")) %>%
select(-contains("phone"), -contains("nanswered")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("neur_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = neur) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=neur, average=TRUE, age_residualise=TRUE, name="NEURO")
########################################################
############# Construct phenotype: openness ############
########################################################
open <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("open"), -contains("phone")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("open_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = open) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=open, average=TRUE, age_residualise=TRUE, name="OPEN")
########################################################
########## Construct phenotype: pollen allergy #########
########################################################
pollen <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("pollen")) %>%
select(-contains("1957"), -contains("1975"), -contains("1993"), -contains("2004")) %>%
gather(key="wave", value="value",
paste0("pollen_", 2011), paste0("age_", 2011)) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = pollen) %>%
filter(pheno >= 0) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=pollen, average=FALSE, age_residualise=TRUE, name="ALLERGYPOLLEN")
########################################################
########### Construct phenotype: religiosity ###########
########################################################
relig <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("relig")) %>%
select(-contains("1957"), -contains("phone"), -contains("mail")) %>%
gather(key="wave", value="value",
paste0("relig_", c(1975, 1993, 2004, 2011)), paste0("age_", c(1975, 1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = relig) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na() %>% drop_na()
# residualise, average, save
residualise.average.save(data=relig, average=TRUE, age_residualise=TRUE, name="RELIGATT")
########################################################
############### Construct phenotype: risk ##############
########################################################
risk <- data %>%
select(id_old, id, respondent_type, yob, contains("age_2011"), male, african_american, contains("risk")) %>%
select(-contains("losing")) %>%
gather(key="wave", value="value",
paste0("risk", c(5, 9, 11), "_2011")) %>%
separate("wave", c("wave", "var")) %>%
mutate(age = age_2011,
age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = value,
wave = str_remove(wave, "risk")) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
risk_loss <- data %>%
select(id_old, id, respondent_type, yob, contains("age_2011"), male, african_american, contains("risklosing")) %>%
gather(key="wave", value="value",
paste0("risklosing", c(5, 9, 11), "_2011")) %>%
separate("wave", c("wave", "var")) %>%
mutate(age = age_2011,
age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = value,
wave = str_remove(wave, "risklosing")) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=risk, average=TRUE, age_residualise=TRUE, name="RISK")
residualise.average.save(data=risk_loss, average=TRUE, age_residualise=TRUE, name="RISKLOSS")
########################################################
######## Construct phenotype: self-rated health ########
########################################################
selfhealth <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("selfhealth")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("selfhealth_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = selfhealth) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=selfhealth, average=TRUE, age_residualise=TRUE, name="SELFHEALTH")
########################################################
###### Construct phenotype: subjective well-being ######
########################################################
SWB <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("SWB")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("SWB_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = SWB) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=SWB, average=TRUE, age_residualise=TRUE, name="SWB")
########################################################
######## Construct phenotype: work satisfaciton ########
########################################################
worksat <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("worksat")) %>%
select(-contains("1957")) %>%
gather(key="wave", value="value",
paste0("worksat_", c(1975, 1993, 2004, 2011)), paste0("age_", c(1975, 1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = worksat) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=worksat, average=TRUE, age_residualise=TRUE, name="WORKSAT")
| /10_Prediction/10.1.1_construct_WLS_phenotypes.R | no_license | LabLemos/PGI_Repo | R | false | false | 38,820 | r | #!/usr/bin/env Rscript
#----------------------------------------------------------------------------------#
# Constructs phenotypes from WLS data
# Author: Joel Becker
# Notes:
#
#----------------------------------------------------------------------------------#
########################################################
######################## Set-up ########################
########################################################
# load libraries
packages <- c("data.table", "foreign", "dplyr", "tidyr", "Rmpfr", "sjmisc", "stringr")
new.packages <- packages[!(packages %in% installed.packages()[, "Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(packages, library, character.only = TRUE)
########################################################
####################### Load data ######################
########################################################
data <- fread("tmp/WLS_renamed.csv")
########################################################
################ Residualising function ################
########################################################
residualise <- function(data, age_residualise=TRUE, nosex=FALSE) {
# residualise within-wave, record missings
if (age_residualise==TRUE) { # if residualising on age
reg <- summary(lm(pheno ~ age + age2 + male + male_age + male_age2, data))
} else if (nosex==FALSE) { # else if residualising on cohort
reg <- summary(lm(pheno ~ dob + dob2 + male + male_dob + male_dob2, data))
} else {
reg <- summary(lm(pheno ~ dob + dob2, data))
}
sel <- which(!is.na(data$pheno)) # which observations do we have proxy phenotype for?
data$resid <- NA
data$resid[sel] <- reg$resid
data$missing <- 1
data$missing[sel] <- 0
# standardise residuals
mean <- mean(data$resid); sd <- sd(data$resid)
data$std_resid <- (data$resid - mean) / sd
return(data)
}
residualise.average.save <- function(data, average=TRUE, age_residualise=TRUE, name, nosex=FALSE, neb=FALSE) {
# residualise within wave?
if (average==TRUE) { # yes
# get data column names for new df
df <- data[FALSE,]
# list waves, to run regressions separately
waves <- sort(unique(data$wave))
for (i in waves) {
# residualise within-wave
data_wave <- filter(data, wave==i)
data_wave <- residualise(data=data_wave, age_residualise=age_residualise)
# record this wave
df <- rbind(df, data_wave)
}
# average residuals within-wave
df <- df %>%
group_by(id, respondent_type) %>%
summarise(phenotype = mean(std_resid, na.rm=TRUE)) %>%
ungroup() %>%
filter(!is.na(phenotype))
} else if (average==FALSE) { # no
# residualise (ignoring waves)
df <- data %>%
residualise(., age_residualise = age_residualise, nosex=nosex) %>%
select(id, respondent_type, phenotype = std_resid)
if (neb==TRUE) { # if NEB, split by sex
nebmen <- data %>%
filter(male == 1) %>%
residualise(., age_residualise = age_residualise, nosex=neb) %>%
select(id, respondent_type, phenotype = std_resid)
nebwom <- data %>%
filter(male == 0) %>%
residualise(., age_residualise = age_residualise, nosex=neb) %>%
select(id, respondent_type, phenotype=std_resid)
fwrite(nebmen, "input/WLS/NEBmen.pheno")
fwrite(nebwom, "input/WLS/NEBwomen.pheno")
}
}
# save data
fwrite(df, paste0("input/WLS/", name, ".pheno"))
}
########################################################
############# Construct phenotype: activity ############
########################################################
activity <- data %>%
select(id_old,
id,
respondent_type,
yob,
contains("age"),
male,
african_american,
contains("activity")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("activity_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = activity) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=activity, average=TRUE, age_residualise=TRUE, name="ACTIVITY")
########################################################
############### Construct phenotype: ADHD ##############
########################################################
ADHD <- data %>%
select(id_old,
id,
respondent_type,
yob,
contains("age"),
male,
african_american,
contains("ADHD")) %>%
select(-contains("1957"),
-contains("1975"),
-contains("1993"),
-contains("2004")) %>%
gather(key = "wave", value = "value",
paste0("ADHD_", 2011),
paste0("age_", 2011)) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = ADHD) %>% # reverse coded in construction stage
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=ADHD, average=FALSE, age_residualise=TRUE, name="ADHD")
########################################################
######### Construct phenotype: age first birth #########
########################################################
AFB <- data %>%
select(id_old,
id,
respondent_type,
yob,
contains("age"),
male,
african_american,
contains("AFB")) %>%
select(-contains("1957"), -contains("1993"), -contains("2004")) %>%
mutate(AFB = case_when(!is.na(AFB_2011) ~ AFB_2011, TRUE ~ AFB_1975),
dob = yob,
dob2 = yob^2,
male_dob = male * dob,
male_dob2 = male * dob2,
pheno = AFB) %>% # reverse code
select(id, respondent_type, pheno, dob, dob2, male, male_dob, male_dob2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=AFB, average=FALSE, age_residualise=FALSE, name="AFB")
########################################################
######### Construct phenotype: age first menses ########
########################################################
AFM <- data %>%
select(id_old,
id,
respondent_type,
yob,
contains("age"),
african_american,
contains("AFM")) %>%
select(-contains("1957"), -contains("1975"), -contains("2011")) %>%
gather(key="wave", value="value",
paste0("AFM_", c(1993, 2004)), paste0("age_", c(1993, 2004))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
group_by(id, respondent_type) %>%
mutate(AFM_average = mean(AFM, na.rm=TRUE),
row_n = row_number()) %>%
filter(row_n == 1) %>%
ungroup() %>%
mutate(dob = yob,
dob2 = yob^2,
pheno = AFM_average) %>% # reverse code
select(id, respondent_type, wave, pheno, dob, dob2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=AFM, average=FALSE, age_residualise=FALSE, nosex=TRUE, name="MENARCHE")
########################################################
########## Construct phenotype: agreeableness ##########
########################################################
#agree <- data %>%
# select(id_old,
# id,
# respondent_type,
# yob,
# contains("age"),
# male,
# african_american,
# contains("agree")) %>%
# select(-contains("phone"), -contains("nanswered")) %>%
# select(-contains("1957"), -contains("1975")) %>%
# gather(key = "wave", value = "value",
# paste0("agree_", c(1993, 2004, 2011)),
# paste0("age_", c(1993, 2004, 2011))) %>%
# separate("wave", c("var", "wave")) %>%
# spread("var", "value") %>%
# mutate(age2 = age^2,
# male_age = male * age,
# male_age2 = male * age2,
# pheno = agree) %>%
# select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
# drop_na()
# residualise, average, save
#residualise.average.save(data=agree, average=TRUE, age_residualise=TRUE, name="AGREE")
########################################################
############## Construct phenotype: asthma #############
########################################################
asthma <- data %>%
select(id_old,
id,
respondent_type,
yob,
contains("age"),
male,
african_american,
contains("asthma"),
-contains("hayfever")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("asthma_", c(1993, 2011)), paste0("age_", c(1993, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
group_by(id, respondent_type) %>%
mutate(asthma = max(asthma, na.rm=TRUE),
row_n = row_number()) %>%
filter(row_n == 1) %>%
ungroup() %>%
mutate(dob = yob,
dob2 = yob^2,
male_dob = male * dob,
male_dob2 = male * dob2,
pheno = asthma) %>%
filter(pheno >= 0) %>%
select(id, respondent_type, wave, pheno, dob, dob2, male, male_dob, male_dob2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=asthma, average=FALSE, age_residualise=FALSE, name="ASTHMA")
########################################################
########## Construct phenotype: asthmahayfever #########
########################################################
asthmahayfever <- data %>%
select(id_old,
id,
respondent_type,
yob,
contains("age"),
male,
african_american,
contains("asthma_2011"),
contains("hayfever_2011")) %>%
mutate(dob = yob,
dob2 = yob^2,
male_dob = male * dob,
male_dob2 = male * dob2,
pheno = case_when(asthma_2011 + hayfever_2011 > 0 ~ 1,
asthma_2011 + hayfever_2011 == 0 ~ 0)) %>%
select(id, respondent_type, pheno, dob, dob2, male, male_dob, male_dob2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=asthmahayfever, average=FALSE, age_residualise=FALSE, name="ASTECZRHI")
########################################################
############## Construct phenotype: audit ##############
########################################################
audit <- data %>%
select(id_old,
id,
respondent_type,
yob,
contains("age"),
male,
african_american,
contains("audit")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("audit_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = audit) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=audit, average=T, age_residualise=T, name="AUDIT")
########################################################
############### Construct phenotype: bmi ###############
########################################################
bmi <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("bmi")) %>%
select(-contains("1975")) %>%
gather(key="wave", value="value",
paste0("bmi_", c(1957, 1993, 2004, 2011)), paste0("age_", c(1957, 1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = bmi) %>%
filter(pheno >= 0) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=bmi, average=TRUE, age_residualise=TRUE, name="BMI")
########################################################
########### Construct phenotype: cat allergy ###########
########################################################
cat <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("cat")) %>%
select(-contains("1957"), -contains("1975"), -contains("1993"), -contains("2004")) %>%
gather(key="wave", value="value",
paste0("cat_", 2011), paste0("age_", 2011)) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = cat) %>%
filter(pheno >= 0) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=cat, average=FALSE, age_residualise=TRUE, name="ALLERGYCAT")
########################################################
########## Construct phenotype: conscientious ##########
########################################################
consc <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("consc"), -contains("phone")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("consc_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = consc) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=consc, average=TRUE, age_residualise=TRUE, name="CONSCIENTIOUSNESS")
########################################################
############### Construct phenotype: COPD ##############
########################################################
copd <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("copd"), -contains("phone")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("copd_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = copd) %>%
group_by(id, respondent_type) %>%
mutate(pheno = max(pheno, na.rm = T)) %>%
ungroup() %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
filter(pheno %in% 0:1) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=copd, average=TRUE, age_residualise=TRUE, name="COPD")
########################################################
########### Construct phenotype: cigs per day ##########
########################################################
CPD <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("CPD"), -contains("phone")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("CPD_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = CPD) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=CPD, average=TRUE, age_residualise=TRUE, name="CPD")
########################################################
############ Construct phenotype: depression ###########
########################################################
depr <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("depr"), -contains("nanswered")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("depr_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = depr) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=depr, average=TRUE, age_residualise=TRUE, name="DEP")
########################################################
############### Construct phenotype: DPW ###############
########################################################
dpw <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("dpw"), -contains("nanswered")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("dpw_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = dpw) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=dpw, average=TRUE, age_residualise=TRUE, name="DPW")
########################################################
########### Construct phenotype: dust allergy ##########
########################################################
dust <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("dust")) %>%
select(-contains("1957"), -contains("1975"), -contains("1993"), -contains("2004")) %>%
gather(key="wave", value="value",
paste0("dust_", 2011), paste0("age_", 2011)) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = dust) %>%
filter(pheno >= 0) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=dust, average=FALSE, age_residualise=TRUE, name="ALLERGYDUST")
########################################################
################ Construct phenotype: EA ###############
########################################################
EA <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("EA_")) %>%
#mutate(EA_diff = EA_2011 - EA_2004) %>% # EA values barely change, as expected
mutate(dob = yob,
dob2 = yob^2,
male_dob = male * dob,
male_dob2 = male * dob2,
pheno = case_when(!is.na(EA_2011) ~ EA_2011, TRUE ~ EA_2004)) %>%
select(id, respondent_type, pheno, dob, dob2, male, male_dob, male_dob2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=EA, average=FALSE, age_residualise=FALSE, name="EA")
########################################################
########### Construct phenotype: ever smoker ###########
########################################################
eversmoke <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("eversmoke")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("eversmoke_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
group_by(id, respondent_type) %>%
mutate(eversmoke_max = max(eversmoke, na.rm=TRUE),
row_n = row_number()) %>%
ungroup() %>%
filter(row_n == 1 & eversmoke_max >= 0) %>%
mutate(dob = yob,
dob2 = yob^2,
male_dob = male * dob,
male_dob2 = male * dob2,
pheno = eversmoke_max) %>%
select(id, respondent_type, wave, pheno, dob, dob2, male, male_dob, male_dob2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=eversmoke, average=FALSE, age_residualise=FALSE, name="EVERSMOKE")
########################################################
########### Construct phenotype: extraversion ##########
########################################################
extra <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("extra"), -contains("nanswered"), -contains("phone")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("extra_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = extra) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=extra, average=TRUE, age_residualise=TRUE, name="EXTRA")
########################################################
####### Construct phenotype: family satisfaction #######
########################################################
famsat <- data %>%
select(id_old, id, respondent_type, yob, contains("age_2004"), male, african_american, contains("famsat")) %>%
mutate(age = age_2004,
age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = famsat_2004) %>% # reverse code
select(id, respondent_type, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=famsat, average=FALSE, age_residualise=TRUE, name="FAMSAT")
########################################################
###### Construct phenotype: financial satisfaction #####
########################################################
finsat <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("finsat")) %>%
select(-contains("1957"), -contains("1975"), -contains("1993")) %>%
gather(key="wave", value="value",
paste0("finsat_", c(2004, 2011)), paste0("age_", c(2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = finsat) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=finsat, average=TRUE, age_residualise=TRUE, name="FINSAT")
########################################################
####### Construct phenotype: friend satisfaction #######
########################################################
friendsat1 <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("friendsat1")) %>%
select(-contains("1957"), -contains("1975"), -contains("1993"), -contains("2011")) %>%
mutate(age = age_2004, friendsat1 = friendsat1_2004, wave = 2004) %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = friendsat1) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
friendsat2 <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("friendsat2")) %>%
select(-contains("1957"), -contains("1975"), -contains("1993"), -contains("2011")) %>%
mutate(age = age_2004, friendsat2 = friendsat2_2004, wave = 2004) %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = friendsat2) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=friendsat1, average=TRUE, age_residualise=TRUE, name="FRIENDSAT1")
residualise.average.save(data=friendsat2, average=TRUE, age_residualise=TRUE, name="FRIENDSAT2")
########################################################
############ Construct phenotype: hay-fever ############
########################################################
hayfever <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("hayfever"), -contains("asthma")) %>%
select(-contains("1957"), -contains("1975"), -contains("1993"), -contains("2004")) %>%
gather(key="wave", value="value",
paste0("hayfever_", 2011), paste0("age_", 2011)) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(dob = yob,
dob2 = yob^2,
male_dob = male * dob,
male_dob2 = male * dob2,
pheno = hayfever) %>%
filter(pheno >= 0) %>%
select(id, respondent_type, pheno, dob, dob2, male, male_dob, male_dob2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=hayfever, average=FALSE, age_residualise=FALSE, name="HAYFEVER")
########################################################
############## Construct phenotype: height #############
########################################################
height <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("height")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("height_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
group_by(id, respondent_type) %>%
mutate(height = mean(height, na.rm=TRUE),
rn = row_number()) %>%
ungroup() %>%
filter(rn == 1) %>%
mutate(dob = yob,
dob2 = yob^2,
male_dob = male * dob,
male_dob2 = male * dob2,
pheno = height) %>%
select(id, respondent_type, pheno, dob, dob2, male, male_dob, male_dob2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=height, average=FALSE, age_residualise=FALSE, name="HEIGHT")
########################################################
########### Construct phenotype: intelligence ##########
########################################################
intelligence <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("intelligence_")) %>%
gather(key="wave", value="value", paste0("intelligence_", c(1957, 1975)), paste0("age_", c(1957, 1975))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(dob = yob,
dob2 = yob^2,
male_dob = male * dob,
male_dob2 = male * dob2,
pheno = intelligence) %>%
select(id, respondent_type, pheno, dob, dob2, male, male_dob, male_dob2) %>%
drop_na() %>% drop_na()
# residualise, average, save
residualise.average.save(data=intelligence, average=FALSE, age_residualise=FALSE, name="CP")
########################################################
######### Construct phenotype: left out social #########
########################################################
#leftoutsocial <- data %>%
# select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("leftoutsocial"), -contains("phone")) %>%
# select(-contains("1957"), -contains("1975")) %>%
# gather(key="wave", value="value",
# paste0("leftoutsocial_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
# separate("wave", c("var", "wave")) %>%
# spread("var", "value") %>%
# mutate(age2 = age^2,
# male_age = male * age,
# male_age2 = male * age2,
# pheno = leftoutsocial) %>%
# select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
# drop_na()
# residualise, average, save
#residualise.average.save(data=leftoutsocial, average=TRUE, age_residualise=TRUE, name="LEFTOUT")
########################################################
############## Construct phenotype: lonely #############
########################################################
lonely <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("lonely")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("lonely_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = lonely) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=lonely, average=TRUE, age_residualise=TRUE, name="LONELY")
########################################################
############# Construct phenotype: migraine ############
########################################################
#migraine <- data %>%
# select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("migraine")) %>%
# select(-contains("1957"), -contains("1975")) %>%
# gather(key="wave", value="value",
# paste0("migraine_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
# separate("wave", c("var", "wave")) %>%
# spread("var", "value") %>%
# mutate(age2 = age^2,
# male_age = male * age,
# male_age2 = male * age2,
# pheno = migraine) %>%
# filter(pheno >= 0) %>%
# select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
# drop_na()
# residualise, average, save
#residualise.average.save(data=migraine, average=TRUE, age_residualise=TRUE, name="MIGRAINE")
########################################################
######### Construct phenotype: number ever born ########
########################################################
NEB <- data %>%
select(id_old, id, respondent_type, yob, male, african_american, contains("NEB")) %>%
mutate(dob = yob,
dob2 = yob^2,
male_dob = male * dob,
male_dob2 = male * dob2,
pheno = NEB) %>% # reverse code
select(id, respondent_type, pheno, dob, dob2, male, male_dob, male_dob2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=NEB, average=FALSE, age_residualise=FALSE, name="NEB", neb=TRUE)
########################################################
########### Construct phenotype: neuroticism ###########
########################################################
neur <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("neur")) %>%
select(-contains("phone"), -contains("nanswered")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("neur_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = neur) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=neur, average=TRUE, age_residualise=TRUE, name="NEURO")
########################################################
############# Construct phenotype: openness ############
########################################################
open <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("open"), -contains("phone")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("open_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = open) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=open, average=TRUE, age_residualise=TRUE, name="OPEN")
########################################################
########## Construct phenotype: pollen allergy #########
########################################################
pollen <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("pollen")) %>%
select(-contains("1957"), -contains("1975"), -contains("1993"), -contains("2004")) %>%
gather(key="wave", value="value",
paste0("pollen_", 2011), paste0("age_", 2011)) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = pollen) %>%
filter(pheno >= 0) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=pollen, average=FALSE, age_residualise=TRUE, name="ALLERGYPOLLEN")
########################################################
########### Construct phenotype: religiosity ###########
########################################################
relig <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("relig")) %>%
select(-contains("1957"), -contains("phone"), -contains("mail")) %>%
gather(key="wave", value="value",
paste0("relig_", c(1975, 1993, 2004, 2011)), paste0("age_", c(1975, 1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = relig) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na() %>% drop_na()
# residualise, average, save
residualise.average.save(data=relig, average=TRUE, age_residualise=TRUE, name="RELIGATT")
########################################################
############### Construct phenotype: risk ##############
########################################################
risk <- data %>%
select(id_old, id, respondent_type, yob, contains("age_2011"), male, african_american, contains("risk")) %>%
select(-contains("losing")) %>%
gather(key="wave", value="value",
paste0("risk", c(5, 9, 11), "_2011")) %>%
separate("wave", c("wave", "var")) %>%
mutate(age = age_2011,
age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = value,
wave = str_remove(wave, "risk")) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
risk_loss <- data %>%
select(id_old, id, respondent_type, yob, contains("age_2011"), male, african_american, contains("risklosing")) %>%
gather(key="wave", value="value",
paste0("risklosing", c(5, 9, 11), "_2011")) %>%
separate("wave", c("wave", "var")) %>%
mutate(age = age_2011,
age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = value,
wave = str_remove(wave, "risklosing")) %>%
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=risk, average=TRUE, age_residualise=TRUE, name="RISK")
residualise.average.save(data=risk_loss, average=TRUE, age_residualise=TRUE, name="RISKLOSS")
########################################################
######## Construct phenotype: self-rated health ########
########################################################
selfhealth <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("selfhealth")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("selfhealth_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = selfhealth) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=selfhealth, average=TRUE, age_residualise=TRUE, name="SELFHEALTH")
########################################################
###### Construct phenotype: subjective well-being ######
########################################################
SWB <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("SWB")) %>%
select(-contains("1957"), -contains("1975")) %>%
gather(key="wave", value="value",
paste0("SWB_", c(1993, 2004, 2011)), paste0("age_", c(1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = SWB) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=SWB, average=TRUE, age_residualise=TRUE, name="SWB")
########################################################
######## Construct phenotype: work satisfaciton ########
########################################################
worksat <- data %>%
select(id_old, id, respondent_type, yob, contains("age"), male, african_american, contains("worksat")) %>%
select(-contains("1957")) %>%
gather(key="wave", value="value",
paste0("worksat_", c(1975, 1993, 2004, 2011)), paste0("age_", c(1975, 1993, 2004, 2011))) %>%
separate("wave", c("var", "wave")) %>%
spread("var", "value") %>%
mutate(age2 = age^2,
male_age = male * age,
male_age2 = male * age2,
pheno = worksat) %>% # reverse code
select(id, respondent_type, wave, pheno, age, age2, male, male_age, male_age2) %>%
drop_na()
# residualise, average, save
residualise.average.save(data=worksat, average=TRUE, age_residualise=TRUE, name="WORKSAT")
|
## Assignment: Caching the Inverse of a Matrix
##
## Matrix inversion is usually a costly computation and there may be some benefit to caching the inverse of a matrix rather than compute it repeatedly
## (there are also alternatives to matrix inversion that we will not discuss here).
## Your assignment is to write a pair of functions that cache the inverse of a matrix.
## Write the following functions:
## 1.makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
## 2.cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
##
## Computing the inverse of a square matrix can be done with the solve function in R. For example, if X is a square invertible matrix, then solve(X) returns its inverse.
## In this Programming Assignment will take advantage of the scoping rules of the R language and how they can be manipulated to preserve state inside of an R object.
## For this assignment, assume that the matrix supplied is always invertible.
## Function - makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse
##
makeCacheMatrix <- function(X = matrix())
{
inverse <- NULL
set <- function(y)
{
X <<- y
inverse <<- NULL
}
get <- function() X
setinverse <- function(vInverse) inverse <<- vInverse
getinverse <- function() inverse
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## Function - cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above
## If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
## assumes a package like corpcor has been installed and loaded from a CRAN mirror
## type ??inverse or for an example which contains pseudoinverse: http://127.0.0.1:15599/library/corpcor/html/pseudoinverse.html
cacheSolve <- function(X, ...)
{
inverse <- X$getinverse()
if (!is.null(inverse))
{
message("Cached Data")
return(inverse)
}
message("Not Cached Data")
data <- X$get()
inverse <- pseudoinverse(data, ...)
X$setinverse(inverse)
inverse
}
| /cachematrix.R | no_license | kpeeples/ProgrammingAssignment2 | R | false | false | 2,321 | r | ## Assignment: Caching the Inverse of a Matrix
##
## Matrix inversion is usually a costly computation and there may be some benefit to caching the inverse of a matrix rather than compute it repeatedly
## (there are also alternatives to matrix inversion that we will not discuss here).
## Your assignment is to write a pair of functions that cache the inverse of a matrix.
## Write the following functions:
## 1.makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
## 2.cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
##
## Computing the inverse of a square matrix can be done with the solve function in R. For example, if X is a square invertible matrix, then solve(X) returns its inverse.
## In this Programming Assignment will take advantage of the scoping rules of the R language and how they can be manipulated to preserve state inside of an R object.
## For this assignment, assume that the matrix supplied is always invertible.
## Function - makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse
##
makeCacheMatrix <- function(X = matrix())
{
inverse <- NULL
set <- function(y)
{
X <<- y
inverse <<- NULL
}
get <- function() X
setinverse <- function(vInverse) inverse <<- vInverse
getinverse <- function() inverse
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## Function - cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above
## If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
## assumes a package like corpcor has been installed and loaded from a CRAN mirror
## type ??inverse or for an example which contains pseudoinverse: http://127.0.0.1:15599/library/corpcor/html/pseudoinverse.html
cacheSolve <- function(X, ...)
{
inverse <- X$getinverse()
if (!is.null(inverse))
{
message("Cached Data")
return(inverse)
}
message("Not Cached Data")
data <- X$get()
inverse <- pseudoinverse(data, ...)
X$setinverse(inverse)
inverse
}
|
stability_feature_selection <- function(group = Group$group1){
library(stabs)
# lets solve for Q
pfer = 1
pie = .9
p = ncol(group$Train$X)
q = sqrt(pfer*p*(2*pie-1))
cols = stabsel(group$Train$X, group$Train$Y,
fitfun = glmnet.lasso,
sampling.type = "SS",
q = q,
PFER = pfer)
cat(cols$selected %>% length(), '\n')
group$Train$X = group$Train$X[, cols$selected]
group$Test$X = group$Test$X[, cols$selected]
return(group)
}
| /rice/undergrad/stat-413/Functions/Feature Selection/stability_feature_selection.R | no_license | rodgers2000/Projects | R | false | false | 526 | r | stability_feature_selection <- function(group = Group$group1){
library(stabs)
# lets solve for Q
pfer = 1
pie = .9
p = ncol(group$Train$X)
q = sqrt(pfer*p*(2*pie-1))
cols = stabsel(group$Train$X, group$Train$Y,
fitfun = glmnet.lasso,
sampling.type = "SS",
q = q,
PFER = pfer)
cat(cols$selected %>% length(), '\n')
group$Train$X = group$Train$X[, cols$selected]
group$Test$X = group$Test$X[, cols$selected]
return(group)
}
|
setwd("~/Documents/datasciencecoursera/Getting and Cleaning Data/Week 1")
#question1
getdata.data.ss06hid <- read.csv("~/Documents/datasciencecoursera/Getting and Cleaning Data/Week 1/getdata-data-ss06hid.csv")
#VAL = '24' and TYPE = '1'
houses = getdata.data.ss06hid
x = houses[,'VAL']
y = houses[,'TYPE']
z = complete.cases(x,y)
houses2 = houses[z,]
houses3 = houses2[houses2[,'VAL']=='24',]
houses4 = houses3[houses3[,'TYPE']=='1',]
#question3
quiz1q3 <- read.csv("~/Documents/datasciencecoursera/Getting and Cleaning Data/Week 1/quiz1q3.csv")
dat = quiz1q3
sum(dat$Zip*dat$Ext,na.rm=T)
#question4
library(XML)
q4url = "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml"
q4doc = xmlTreeParse(q4url,useInternalNodes=TRUE,isURL=TRUE,isHTML=TRUE)
rootNode = xmlRoot(q4doc)
xmlName(rootNode)
zipcodes = xpathSApply(q4doc,"//zipcode",xmlValue)
zipcodes
#question5
q5url = 'https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv' | /Getting and Cleaning Data/Week 1/quiz1.R | no_license | johnkalish/datasciencecoursera | R | false | false | 962 | r | setwd("~/Documents/datasciencecoursera/Getting and Cleaning Data/Week 1")
#question1
getdata.data.ss06hid <- read.csv("~/Documents/datasciencecoursera/Getting and Cleaning Data/Week 1/getdata-data-ss06hid.csv")
#VAL = '24' and TYPE = '1'
houses = getdata.data.ss06hid
x = houses[,'VAL']
y = houses[,'TYPE']
z = complete.cases(x,y)
houses2 = houses[z,]
houses3 = houses2[houses2[,'VAL']=='24',]
houses4 = houses3[houses3[,'TYPE']=='1',]
#question3
quiz1q3 <- read.csv("~/Documents/datasciencecoursera/Getting and Cleaning Data/Week 1/quiz1q3.csv")
dat = quiz1q3
sum(dat$Zip*dat$Ext,na.rm=T)
#question4
library(XML)
q4url = "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml"
q4doc = xmlTreeParse(q4url,useInternalNodes=TRUE,isURL=TRUE,isHTML=TRUE)
rootNode = xmlRoot(q4doc)
xmlName(rootNode)
zipcodes = xpathSApply(q4doc,"//zipcode",xmlValue)
zipcodes
#question5
q5url = 'https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv' |
GetProvince = function(input_code){
file = 'Database projet v2.xlsx'
provinces = read_excel(file, sheet = 'provinces')
file = 'code-postaux-belge.csv'
df = read_csv2(file)
province_vector = vector()
distances = vector()
names = vector()
lon1 = (df%>%filter(Code == input_code))$Longitude
lat1 = (df%>%filter(Code == input_code))$Latitude
localite = (df%>%filter(Code == input_code))$Localite
for (i in seq(from = 1, to = nrow(provinces), by = 1)){
prov = provinces[i,][['id_province']]
lon2 = provinces[i,][['longitude']]
lat2 = provinces[i,][['latitude']]
name = provinces[i,][['name']]
dist = distm(c(lon1, lat1), c(lon2, lat2), fun = distHaversine)
province_vector = append(province_vector, prov)
distances = append(distances, dist)
names = append(names, name)
}
df = data.frame()
df = cbind(data.frame(province_vector), data.frame(distances), data.frame(names))
df = df[order(df$distances),]
output_list = list(df, localite)
return(output_list)
} | /ANAPROM/Shiny/.ipynb_checkpoints/Get_Province-checkpoint.R | no_license | ApprovisionnementLegumes/Modele | R | false | false | 1,026 | r | GetProvince = function(input_code){
file = 'Database projet v2.xlsx'
provinces = read_excel(file, sheet = 'provinces')
file = 'code-postaux-belge.csv'
df = read_csv2(file)
province_vector = vector()
distances = vector()
names = vector()
lon1 = (df%>%filter(Code == input_code))$Longitude
lat1 = (df%>%filter(Code == input_code))$Latitude
localite = (df%>%filter(Code == input_code))$Localite
for (i in seq(from = 1, to = nrow(provinces), by = 1)){
prov = provinces[i,][['id_province']]
lon2 = provinces[i,][['longitude']]
lat2 = provinces[i,][['latitude']]
name = provinces[i,][['name']]
dist = distm(c(lon1, lat1), c(lon2, lat2), fun = distHaversine)
province_vector = append(province_vector, prov)
distances = append(distances, dist)
names = append(names, name)
}
df = data.frame()
df = cbind(data.frame(province_vector), data.frame(distances), data.frame(names))
df = df[order(df$distances),]
output_list = list(df, localite)
return(output_list)
} |
rm(list=ls())
source(paste0(here::here(), "/0-config.R"))
#Set adjustment covariates
Wvars <- c("sex", "arm", "brthmon", "vagbrth", "hdlvry", "single", "trth2o",
"cleanck", "impfloor", "hfoodsec", "hhwealth_quart","W_mage", "W_mhtcm", "W_mwtkg",
"W_mbmi", "W_fage", "W_fhtcm", "W_meducyrs", "W_feducyrs", "W_nrooms", "W_nhh",
"W_nchldlt5", "W_parity", "impsan", "safeh20")
#load data
d <- readRDS(mortality_age_path)
d <- d %>% arrange(studyid, subjid, agedays)
d <- d %>% filter(!(studyid %in% c("VITALPAK-Pregnancy","iLiNS-DYAD-G","DIVIDS")))
summary(d$agedays)
table(d$agecat)
d <- d %>% filter(agedays <= 730)
d <- droplevels(d)
X_vector <- c("stunt", "wast","wast_muac","underwt",
"sstunt", "swast","swast_muac", "sunderwt", "stunt_uwt",
"wast_uwt", "co",
"ever_stunt", "ever_wast", "ever_wast_muac", "ever_uwt", "ever_sstunt",
"ever_swast", "ever_swast_muac","ever_suwt", "ever_stunt_uwt", "ever_wast_uwt", "ever_co")
#All ages < 730 days
res <- run_cox_meta(df=d, X_vector=X_vector, Y="dead", Wvars=Wvars, V=NULL)
res_sex_strat <- run_cox_meta(df=d, X_vector=X_vector, Y="dead", Wvars=Wvars, V="sex")
#Dropping prenatal deaths
res_noPN <- run_cox_meta(df=d%>% filter(agecat!="(0,30]"),
X_vector=X_vector, Y="dead", Wvars=Wvars, V=NULL, agecat="1-24 months")
res_noPN_sex_strat <- run_cox_meta(df=d%>% filter(agecat!="(0,30]"),
X_vector=X_vector, Y="dead", Wvars=Wvars, V="sex", agecat="1-24 months")
#Age-strat, starting from birth
res_age_strat <- run_cox_meta_agestrat(d=d, age_strat=levels(d$agecat), X_vector=X_vector, Y="dead", Wvars=Wvars, V=NULL)
res_age_sex_strat <- run_cox_meta_agestrat(d=d, age_strat=levels(d$agecat), X_vector=X_vector, Y="dead", Wvars=Wvars, V="sex")
res$df <- "res"
res_sex_strat$df <- "res_sex_strat"
res_noPN$df <- "res_noPN"
res_noPN_sex_strat$df <- "res_noPN_sex_strat"
res_age_strat$df <- "res_age_strat"
res_age_sex_strat$df <- "res_age_sex_strat"
fullres <- bind_rows(res, res_sex_strat,
res_noPN, res_noPN_sex_strat,
res_age_strat, res_age_sex_strat)
saveRDS(fullres, file=here("results/full_cox_results_allDeaths.RDS"))
#TO do:
#2)
# We will analyze anthropometry status using the last anthropometry measurement before death, excluding measurements
# occurring more than 6 months prior to death, or less than 1 week to avoid bias from potential reverse causation
#Need to recreate "ever" variables from "cumsum" variables after
#do this by checking the diff between agedays and death in the last obs for children who died
#drop the child from the analysis if not. Coded like this:
# d <- d %>% group_by(studyid, subjid) %>%
# mutate(diff_death = ifelse(dead==1, agedth - agedays, NA),
# drop_due_to_time_gap = max(ifelse(diff_death > 30.4167*6 | diff_death < 7, 1, 0))) %>%
# filter(drop_due_to_time_gap!=1)
#Note... this may be dropping too many children... investigate further
#3)
# We also will repeat the above analyses using different age ranges, starting at birth and ending at the following ages:
# birth, one month, 3 months, 6 months, 12 months, and 24 months.
#4) run with and without imputed age of death
#5) add covariate adjustment
#6) double check the coding of cumulative incidence analysis
#-also, should this impose a certain number of measurements?
#I.e ever stunting might be biased comparing kids with >8 measurements to neonatal deaths with 1 measurements
#7) MUAC analysis
# ## Solution 2: Time-varying effect
# ## Event status variable necessary
# d$event <- (d$dead == 1)
#
#
# ## Counting process format creation
# d.split <- survSplit(data = d,
# cut = c(0, 30, 90, 181, 365, 731, 7000), # vector of timepoints to cut at
# end = "agedays", # character string with name of event time variable
# event = "event", # character string with name of censoring indicator
# start = "start_age", # character string with name of start time variable (created)
# id = "id", # character string with name of new id variable to create
# zero = 0 # If start doesn't already exist, used as start
# )
#
# d.split <- d.split %>% arrange(studyid, subjid, id, agedays)
#
#
# ## Recreate SurbObj
# d.split$SurvObj <- with(d.split, Surv(time = (start_age), time2 = agedays, event = event))
#
# ## Check
#
# ## Time-varying effect of baseline variable by including interaction with interval
# res.cox1.strata <- coxph(SurvObj ~ sex + wast + wast:factor(start_age) + survival::cluster(id),
# data = d.split)
# summary(res.cox1.strata)
#
#
# #subset to primary dataset- has age of death, deaths before 2 years, last measure at least a week prior
# glimpse(d)
# d <- d %>% filter(imp_agedth==0, sufficient_lag==1) %>%
# group_by(studyid) %>% filter(sum(dead, na.rm=T)>10)
#
# table(d$studyid, d$dead)
#
# #Temp: subset to one study
# d <- d %>% filter(studyid=="JiVitA-3")
#
# ## Add survival object. status == 2 is death
# d$SurvObj <- with(d, Surv(agedth, dead == 1))
#
# ## Check data
# head(d)
#
# table(d$studyid, d$dead)
#
# #Note: I think I need to subset to the final obs
# #Also fix code for the first 3 studies to not drop any ons with missing age of death... do that here
# #make sure to only drop obs that are dead==1 but also missing agedth... impute agedth with maxage
#
# ## Fit Cox regression
# res.cox1 <- coxph(SurvObj ~ sex, data = d)
# res.cox1
#
# table(d$wast, d$dead)
# table(d$stunt, d$dead)
# res.cox2 <- coxph(SurvObj ~ stunt, data = d)
# res.cox2
#
# res.cox3 <- coxph(SurvObj ~ swast, data = d)
# res.cox3 | /src/03-coxPH-allDeaths.R | no_license | child-growth/ki-mortality | R | false | false | 6,063 | r |
rm(list=ls())
source(paste0(here::here(), "/0-config.R"))
#Set adjustment covariates
Wvars <- c("sex", "arm", "brthmon", "vagbrth", "hdlvry", "single", "trth2o",
"cleanck", "impfloor", "hfoodsec", "hhwealth_quart","W_mage", "W_mhtcm", "W_mwtkg",
"W_mbmi", "W_fage", "W_fhtcm", "W_meducyrs", "W_feducyrs", "W_nrooms", "W_nhh",
"W_nchldlt5", "W_parity", "impsan", "safeh20")
#load data
d <- readRDS(mortality_age_path)
d <- d %>% arrange(studyid, subjid, agedays)
d <- d %>% filter(!(studyid %in% c("VITALPAK-Pregnancy","iLiNS-DYAD-G","DIVIDS")))
summary(d$agedays)
table(d$agecat)
d <- d %>% filter(agedays <= 730)
d <- droplevels(d)
X_vector <- c("stunt", "wast","wast_muac","underwt",
"sstunt", "swast","swast_muac", "sunderwt", "stunt_uwt",
"wast_uwt", "co",
"ever_stunt", "ever_wast", "ever_wast_muac", "ever_uwt", "ever_sstunt",
"ever_swast", "ever_swast_muac","ever_suwt", "ever_stunt_uwt", "ever_wast_uwt", "ever_co")
#All ages < 730 days
res <- run_cox_meta(df=d, X_vector=X_vector, Y="dead", Wvars=Wvars, V=NULL)
res_sex_strat <- run_cox_meta(df=d, X_vector=X_vector, Y="dead", Wvars=Wvars, V="sex")
#Dropping prenatal deaths
res_noPN <- run_cox_meta(df=d%>% filter(agecat!="(0,30]"),
X_vector=X_vector, Y="dead", Wvars=Wvars, V=NULL, agecat="1-24 months")
res_noPN_sex_strat <- run_cox_meta(df=d%>% filter(agecat!="(0,30]"),
X_vector=X_vector, Y="dead", Wvars=Wvars, V="sex", agecat="1-24 months")
#Age-strat, starting from birth
res_age_strat <- run_cox_meta_agestrat(d=d, age_strat=levels(d$agecat), X_vector=X_vector, Y="dead", Wvars=Wvars, V=NULL)
res_age_sex_strat <- run_cox_meta_agestrat(d=d, age_strat=levels(d$agecat), X_vector=X_vector, Y="dead", Wvars=Wvars, V="sex")
res$df <- "res"
res_sex_strat$df <- "res_sex_strat"
res_noPN$df <- "res_noPN"
res_noPN_sex_strat$df <- "res_noPN_sex_strat"
res_age_strat$df <- "res_age_strat"
res_age_sex_strat$df <- "res_age_sex_strat"
fullres <- bind_rows(res, res_sex_strat,
res_noPN, res_noPN_sex_strat,
res_age_strat, res_age_sex_strat)
saveRDS(fullres, file=here("results/full_cox_results_allDeaths.RDS"))
#TO do:
#2)
# We will analyze anthropometry status using the last anthropometry measurement before death, excluding measurements
# occurring more than 6 months prior to death, or less than 1 week to avoid bias from potential reverse causation
#Need to recreate "ever" variables from "cumsum" variables after
#do this by checking the diff between agedays and death in the last obs for children who died
#drop the child from the analysis if not. Coded like this:
# d <- d %>% group_by(studyid, subjid) %>%
# mutate(diff_death = ifelse(dead==1, agedth - agedays, NA),
# drop_due_to_time_gap = max(ifelse(diff_death > 30.4167*6 | diff_death < 7, 1, 0))) %>%
# filter(drop_due_to_time_gap!=1)
#Note... this may be dropping too many children... investigate further
#3)
# We also will repeat the above analyses using different age ranges, starting at birth and ending at the following ages:
# birth, one month, 3 months, 6 months, 12 months, and 24 months.
#4) run with and without imputed age of death
#5) add covariate adjustment
#6) double check the coding of cumulative incidence analysis
#-also, should this impose a certain number of measurements?
#I.e ever stunting might be biased comparing kids with >8 measurements to neonatal deaths with 1 measurements
#7) MUAC analysis
# ## Solution 2: Time-varying effect
# ## Event status variable necessary
# d$event <- (d$dead == 1)
#
#
# ## Counting process format creation
# d.split <- survSplit(data = d,
# cut = c(0, 30, 90, 181, 365, 731, 7000), # vector of timepoints to cut at
# end = "agedays", # character string with name of event time variable
# event = "event", # character string with name of censoring indicator
# start = "start_age", # character string with name of start time variable (created)
# id = "id", # character string with name of new id variable to create
# zero = 0 # If start doesn't already exist, used as start
# )
#
# d.split <- d.split %>% arrange(studyid, subjid, id, agedays)
#
#
# ## Recreate SurbObj
# d.split$SurvObj <- with(d.split, Surv(time = (start_age), time2 = agedays, event = event))
#
# ## Check
#
# ## Time-varying effect of baseline variable by including interaction with interval
# res.cox1.strata <- coxph(SurvObj ~ sex + wast + wast:factor(start_age) + survival::cluster(id),
# data = d.split)
# summary(res.cox1.strata)
#
#
# #subset to primary dataset- has age of death, deaths before 2 years, last measure at least a week prior
# glimpse(d)
# d <- d %>% filter(imp_agedth==0, sufficient_lag==1) %>%
# group_by(studyid) %>% filter(sum(dead, na.rm=T)>10)
#
# table(d$studyid, d$dead)
#
# #Temp: subset to one study
# d <- d %>% filter(studyid=="JiVitA-3")
#
# ## Add survival object. status == 2 is death
# d$SurvObj <- with(d, Surv(agedth, dead == 1))
#
# ## Check data
# head(d)
#
# table(d$studyid, d$dead)
#
# #Note: I think I need to subset to the final obs
# #Also fix code for the first 3 studies to not drop any ons with missing age of death... do that here
# #make sure to only drop obs that are dead==1 but also missing agedth... impute agedth with maxage
#
# ## Fit Cox regression
# res.cox1 <- coxph(SurvObj ~ sex, data = d)
# res.cox1
#
# table(d$wast, d$dead)
# table(d$stunt, d$dead)
# res.cox2 <- coxph(SurvObj ~ stunt, data = d)
# res.cox2
#
# res.cox3 <- coxph(SurvObj ~ swast, data = d)
# res.cox3 |
#------------------------------------------------------
#------------------------------------------------------
# dat_sim.R
# - Simulates longitudinal and time-to-event data using the algorithm described in Section 5.
# Created by Ruth Keogh
#------------------------------------------------------
#------------------------------------------------------
#----
#sample size
n=5000
#----
#number of visits (K+1)
n.visit=5
#-------------------
#data generation
#-------------------
#----
#first generate the time-dependent A and L
A=matrix(nrow=n,ncol=n.visit)
L=matrix(nrow=n,ncol=n.visit)
U=rnorm(n,0,0.1)
L[,1]=rnorm(n,0+U,1)
A[,1]=rbinom(n,1,expit(-2+0.5*L[,1]))
for(k in 2:n.visit){
L[,k]=rnorm(n,0.8*L[,k-1]-A[,k-1]+0.1*(k-1)+U,1)
A[,k]=rbinom(n,1,expit(-2+0.5*L[,k]+A[,k-1]))
}
#----
#generate event times T.obs, and event indicators D.obs
T.obs=rep(NA,n)
sum.haz.neg=0 #used to check whether there are any instances of a negative hazard
for(v in 1:n.visit){
u.t=runif(n,0,1)
haz=0.7-0.2*A[,v]+0.05*L[,v]+0.05*U
new.t=-log(u.t)/haz
T.obs=ifelse(is.na(T.obs) & new.t<1 & haz>0,v-1+new.t,T.obs)#the haz>0 is just used to deal with tiny possibility (under this data generating mechanism) the hazard could go negative.
sum.haz.neg=sum(sum.haz.neg,(haz<0),na.rm=T)
}
D.obs=ifelse(is.na(T.obs),0,1)
T.obs=ifelse(is.na(T.obs),5,T.obs)
#----
#The above data are in 'wide' format (1 row per individual). Reshape into 'long' format (multiple rows per individual: 1 row for each visit)
L.dat=as.data.frame(L)
names(L.dat)=paste0("L.",0:4)
A.dat=as.data.frame(A)
names(A.dat)=paste0("A.",0:4)
Alag1.dat=as.data.frame(cbind(rep(0,n),A.dat[,1:4]))
Alag2.dat=as.data.frame(cbind(rep(0,n),rep(0,n),A.dat[,1:3]))
Alag3.dat=as.data.frame(cbind(rep(0,n),rep(0,n),rep(0,n),A.dat[,1:2]))
Alag4.dat=as.data.frame(cbind(rep(0,n),rep(0,n),rep(0,n),rep(0,n),A.dat[,1]))
names(Alag1.dat)=paste0("Alag1.",0:4)
names(Alag2.dat)=paste0("Alag2.",0:4)
names(Alag3.dat)=paste0("Alag3.",0:4)
names(Alag4.dat)=paste0("Alag4.",0:4)
dat=data.frame(id=1:n,T.obs,D.obs,A.dat,Alag1.dat,Alag2.dat,Alag3.dat,Alag4.dat,L.dat,U)
dat.long=reshape(data = dat,varying=c(paste0("A.",0:4),paste0("Alag1.",0:4),paste0("Alag2.",0:4),paste0("Alag3.",0:4),paste0("Alag4.",0:4),paste0("L.",0:4)),direction="long",idvar="id")
dat.long=dat.long[order(dat.long$id,dat.long$time),]
dat.long$time.stop=dat.long$time+1
dat.long=dat.long[dat.long$time<dat.long$T.obs,]
dat.long$time.stop=ifelse(dat.long$time.stop>dat.long$T.obs,dat.long$T.obs,dat.long$time.stop)
dat.long$event=ifelse(dat.long$time.stop==dat.long$T.obs & dat.long$D.obs==1,1,0)
| /dat_sim.R | no_license | hc704/causal_sim | R | false | false | 2,724 | r | #------------------------------------------------------
#------------------------------------------------------
# dat_sim.R
# - Simulates longitudinal and time-to-event data using the algorithm described in Section 5.
# Created by Ruth Keogh
#------------------------------------------------------
#------------------------------------------------------
#----
#sample size
n=5000
#----
#number of visits (K+1)
n.visit=5
#-------------------
#data generation
#-------------------
#----
#first generate the time-dependent A and L
A=matrix(nrow=n,ncol=n.visit)
L=matrix(nrow=n,ncol=n.visit)
U=rnorm(n,0,0.1)
L[,1]=rnorm(n,0+U,1)
A[,1]=rbinom(n,1,expit(-2+0.5*L[,1]))
for(k in 2:n.visit){
L[,k]=rnorm(n,0.8*L[,k-1]-A[,k-1]+0.1*(k-1)+U,1)
A[,k]=rbinom(n,1,expit(-2+0.5*L[,k]+A[,k-1]))
}
#----
#generate event times T.obs, and event indicators D.obs
T.obs=rep(NA,n)
sum.haz.neg=0 #used to check whether there are any instances of a negative hazard
for(v in 1:n.visit){
u.t=runif(n,0,1)
haz=0.7-0.2*A[,v]+0.05*L[,v]+0.05*U
new.t=-log(u.t)/haz
T.obs=ifelse(is.na(T.obs) & new.t<1 & haz>0,v-1+new.t,T.obs)#the haz>0 is just used to deal with tiny possibility (under this data generating mechanism) the hazard could go negative.
sum.haz.neg=sum(sum.haz.neg,(haz<0),na.rm=T)
}
D.obs=ifelse(is.na(T.obs),0,1)
T.obs=ifelse(is.na(T.obs),5,T.obs)
#----
#The above data are in 'wide' format (1 row per individual). Reshape into 'long' format (multiple rows per individual: 1 row for each visit)
L.dat=as.data.frame(L)
names(L.dat)=paste0("L.",0:4)
A.dat=as.data.frame(A)
names(A.dat)=paste0("A.",0:4)
Alag1.dat=as.data.frame(cbind(rep(0,n),A.dat[,1:4]))
Alag2.dat=as.data.frame(cbind(rep(0,n),rep(0,n),A.dat[,1:3]))
Alag3.dat=as.data.frame(cbind(rep(0,n),rep(0,n),rep(0,n),A.dat[,1:2]))
Alag4.dat=as.data.frame(cbind(rep(0,n),rep(0,n),rep(0,n),rep(0,n),A.dat[,1]))
names(Alag1.dat)=paste0("Alag1.",0:4)
names(Alag2.dat)=paste0("Alag2.",0:4)
names(Alag3.dat)=paste0("Alag3.",0:4)
names(Alag4.dat)=paste0("Alag4.",0:4)
dat=data.frame(id=1:n,T.obs,D.obs,A.dat,Alag1.dat,Alag2.dat,Alag3.dat,Alag4.dat,L.dat,U)
dat.long=reshape(data = dat,varying=c(paste0("A.",0:4),paste0("Alag1.",0:4),paste0("Alag2.",0:4),paste0("Alag3.",0:4),paste0("Alag4.",0:4),paste0("L.",0:4)),direction="long",idvar="id")
dat.long=dat.long[order(dat.long$id,dat.long$time),]
dat.long$time.stop=dat.long$time+1
dat.long=dat.long[dat.long$time<dat.long$T.obs,]
dat.long$time.stop=ifelse(dat.long$time.stop>dat.long$T.obs,dat.long$T.obs,dat.long$time.stop)
dat.long$event=ifelse(dat.long$time.stop==dat.long$T.obs & dat.long$D.obs==1,1,0)
|
#==================================================================================================#
# Script created by Mark Christie, contact at Redpath.Christie@gmail.com
# Script created in version R 3.3.1
# This script: Generates model output for Lampre Resistance project
# Usage notes: Set all parameters below and then source this file
#==================================================================================================#
# Set working directory and output directory
# Directory where model.R and 'source' folder reside
#Von Bert growth funcion!
e <- exp(1)
L = 150 # maximum size
K = 0.0011 # controls shape of the curve
K.sd <- 0.0002
tzero = -15 # size at age 0; shifts curves along x axis
years <- 7
#adults
#L = 800 # maximum size
#K = 0.002 # controls shape of the curve
#K.sd <- 0.0001
#tzero = -15 # size at age 0; shifts curves along x axis
#years <- 6
days <- seq(from = 1, to = (365*years), by = 10)
k.values <- rnorm(100, K, K.sd)
#note that this is slow, but in model will only have to solve for a single uear (consider using apply)
OUT <- NULL
for(K in k.values){
for(t in days){
y <- L * (1-e^(-K * (t-tzero)))
out <- cbind(K, t, y)
OUT <- rbind(OUT, out)
}
}
par(mar = c(4,5,1,1))
plot(-10, -10, xlim = c(0, max(days)/365), ylim = c(0, L + 10), xlab = "Years", ylab = "Size (mm)", cex.axis=1.8, cex.lab=2)
rect(par("usr")[1],par("usr")[3],par("usr")[2],par("usr")[4],col = "gray")
for(n in unique(OUT[, 1])){
dat <- OUT[OUT[, 1] == n, ]
lines(dat[, 2]/365, dat[, 3], col = "blue")
abline(h=120, lwd=4)
abline(h=450, cex=2) # 450 is from Fig. 3 of Hansen 2016
}
| /lamprey4.3/source/Reference/Growth.R | no_license | ChristieLab/Lamprey-Model | R | false | false | 1,705 | r | #==================================================================================================#
# Script created by Mark Christie, contact at Redpath.Christie@gmail.com
# Script created in version R 3.3.1
# This script: Generates model output for Lampre Resistance project
# Usage notes: Set all parameters below and then source this file
#==================================================================================================#
# Set working directory and output directory
# Directory where model.R and 'source' folder reside
#Von Bert growth funcion!
e <- exp(1)
L = 150 # maximum size
K = 0.0011 # controls shape of the curve
K.sd <- 0.0002
tzero = -15 # size at age 0; shifts curves along x axis
years <- 7
#adults
#L = 800 # maximum size
#K = 0.002 # controls shape of the curve
#K.sd <- 0.0001
#tzero = -15 # size at age 0; shifts curves along x axis
#years <- 6
days <- seq(from = 1, to = (365*years), by = 10)
k.values <- rnorm(100, K, K.sd)
#note that this is slow, but in model will only have to solve for a single uear (consider using apply)
OUT <- NULL
for(K in k.values){
for(t in days){
y <- L * (1-e^(-K * (t-tzero)))
out <- cbind(K, t, y)
OUT <- rbind(OUT, out)
}
}
par(mar = c(4,5,1,1))
plot(-10, -10, xlim = c(0, max(days)/365), ylim = c(0, L + 10), xlab = "Years", ylab = "Size (mm)", cex.axis=1.8, cex.lab=2)
rect(par("usr")[1],par("usr")[3],par("usr")[2],par("usr")[4],col = "gray")
for(n in unique(OUT[, 1])){
dat <- OUT[OUT[, 1] == n, ]
lines(dat[, 2]/365, dat[, 3], col = "blue")
abline(h=120, lwd=4)
abline(h=450, cex=2) # 450 is from Fig. 3 of Hansen 2016
}
|
#' @include JDBCObject.R
NULL
#' JDBCResult class
#'
#' Base class for \code{\linkS4class{JDBCQueryResult}} and \code{\linkS4class{JDBCUpdateResult}}.
#'
#' @family result classes
#' @export
setClass("JDBCResult",
contains = c("DBIResult", "JDBCObject", "VIRTUAL"))
RESULT_SET_TYPE <- list(
TYPE_FORWARD_ONLY = 1003L,
TYPE_SCROLL_INSENSITIVE = 1004L,
TYPE_SCROLL_SENSITIVE = 1005L
)
RESULT_SET_CONCURRENCY <- list(
CONCUR_READ_ONLY = 1007L,
CONCUR_UPDATABLE = 1008L
)
#' @rdname JDBCResult-class
#' @aliases dbBind,JDBCResult-method
#' @param res An object inheriting from \code{\linkS4class{JDBCResult}}
#' @inheritParams DBI::dbBind
#' @section Methods:
#' \code{dbBind}: Unsupported. Use parameter binding of \code{\link{dbSendQuery,JDBCConnection,character-method}} instead.
#' @export
setMethod("dbBind", signature(res = "JDBCResult"),
function(res, params, ...) {
stop("Unsupported. Use parameter binding of dbSendQuery instead.")
}
)
#' @rdname JDBCResult-class
#' @aliases fetch,JDBCResult-method
#' @inheritParams DBI::fetch
#' @section Methods:
#' \code{fetch}: Forwards to \code{\link{dbFetch}}.
#' @export
setMethod("fetch", signature(res = "JDBCResult"), function(res, n = -1, ...) {
dbFetch(res, n = n, ...)
})
| /R/JDBCResult.R | no_license | hoesler/dbj | R | false | false | 1,254 | r | #' @include JDBCObject.R
NULL
#' JDBCResult class
#'
#' Base class for \code{\linkS4class{JDBCQueryResult}} and \code{\linkS4class{JDBCUpdateResult}}.
#'
#' @family result classes
#' @export
setClass("JDBCResult",
contains = c("DBIResult", "JDBCObject", "VIRTUAL"))
RESULT_SET_TYPE <- list(
TYPE_FORWARD_ONLY = 1003L,
TYPE_SCROLL_INSENSITIVE = 1004L,
TYPE_SCROLL_SENSITIVE = 1005L
)
RESULT_SET_CONCURRENCY <- list(
CONCUR_READ_ONLY = 1007L,
CONCUR_UPDATABLE = 1008L
)
#' @rdname JDBCResult-class
#' @aliases dbBind,JDBCResult-method
#' @param res An object inheriting from \code{\linkS4class{JDBCResult}}
#' @inheritParams DBI::dbBind
#' @section Methods:
#' \code{dbBind}: Unsupported. Use parameter binding of \code{\link{dbSendQuery,JDBCConnection,character-method}} instead.
#' @export
setMethod("dbBind", signature(res = "JDBCResult"),
function(res, params, ...) {
stop("Unsupported. Use parameter binding of dbSendQuery instead.")
}
)
#' @rdname JDBCResult-class
#' @aliases fetch,JDBCResult-method
#' @inheritParams DBI::fetch
#' @section Methods:
#' \code{fetch}: Forwards to \code{\link{dbFetch}}.
#' @export
setMethod("fetch", signature(res = "JDBCResult"), function(res, n = -1, ...) {
dbFetch(res, n = n, ...)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meteo-autoplot.R
\name{autoplot.meteo_coverage}
\alias{autoplot.meteo_coverage}
\title{autoplot method for meteo_coverage objects}
\usage{
\method{autoplot}{meteo_coverage}(object)
}
\arguments{
\item{object}{(data.frame) a data.frame}
}
\value{
A ggplot2 plot
}
\description{
autoplot method for meteo_coverage objects
}
\details{
see \code{\link{meteo_coverage}} for examples
}
| /man/autoplot.meteo_coverage.Rd | permissive | martgnz/rnoaa | R | false | true | 458 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meteo-autoplot.R
\name{autoplot.meteo_coverage}
\alias{autoplot.meteo_coverage}
\title{autoplot method for meteo_coverage objects}
\usage{
\method{autoplot}{meteo_coverage}(object)
}
\arguments{
\item{object}{(data.frame) a data.frame}
}
\value{
A ggplot2 plot
}
\description{
autoplot method for meteo_coverage objects
}
\details{
see \code{\link{meteo_coverage}} for examples
}
|
/textminingdengue.R | no_license | RihabOueslati/DataMiningProject | R | false | false | 3,113 | r | ||
library(zoo)
### Name: zoo
### Title: Z's Ordered Observations
### Aliases: zoo with.zoo range.zoo print.zoo as.zoo.factor summary.zoo
### str.zoo is.zoo [.zoo [<-.zoo $.zoo $<-.zoo subset.zoo head.zoo
### tail.zoo Ops.zoo t.zoo cumsum.zoo cumprod.zoo cummin.zoo cummax.zoo
### mean.zoo median.zoo na.contiguous na.contiguous.data.frame
### na.contiguous.list na.contiguous.default na.contiguous.zoo scale.zoo
### xtfrm.zoo names.zoo names<-.zoo quantile.zoo rev.zoo transform.zoo
### ifelse.zoo dim<-.zoo index2char index2char.default index2char.numeric
### head.ts tail.ts
### Keywords: ts
### ** Examples
## simple creation and plotting
x.Date <- as.Date("2003-02-01") + c(1, 3, 7, 9, 14) - 1
x <- zoo(rnorm(5), x.Date)
plot(x)
time(x)
## subsetting with numeric indexes
x[c(2, 4)]
## subsetting with index class
x[as.Date("2003-02-01") + c(2, 8)]
## different classes of indexes/times can be used, e.g. numeric vector
x <- zoo(rnorm(5), c(1, 3, 7, 9, 14))
## subsetting with numeric indexes then uses observation numbers
x[c(2, 4)]
## subsetting with index class can be enforced by I()
x[I(c(3, 9))]
## visualization
plot(x)
## or POSIXct
y.POSIXct <- ISOdatetime(2003, 02, c(1, 3, 7, 9, 14), 0, 0, 0)
y <- zoo(rnorm(5), y.POSIXct)
plot(y)
## create a constant series
z <- zoo(1, seq(4)[-2])
## create a 0-dimensional zoo series
z0 <- zoo(, 1:4)
## create a 2-dimensional zoo series
z2 <- zoo(matrix(1:12, 4, 3), as.Date("2003-01-01") + 0:3)
## create a factor zoo object
fz <- zoo(gl(2,5), as.Date("2004-01-01") + 0:9)
## create a zoo series with 0 columns
z20 <- zoo(matrix(nrow = 4, ncol = 0), 1:4)
## arithmetic on zoo objects intersects them first
x1 <- zoo(1:5, 1:5)
x2 <- zoo(2:6, 2:6)
10 * x1 + x2
## $ extractor for multivariate zoo series with column names
z <- zoo(cbind(foo = rnorm(5), bar = rnorm(5)))
z$foo
z$xyz <- zoo(rnorm(3), 2:4)
z
## add comments to a zoo object
comment(x1) <- c("This is a very simple example of a zoo object.",
"It can be recreated using this R code: example(zoo)")
## comments are not output by default but are still there
x1
comment(x1)
# ifelse does not work with zoo but this works
# to create a zoo object which equals x1 at
# time i if x1[i] > x1[i-1] and 0 otherwise
(diff(x1) > 0) * x1
## zoo series with duplicated indexes
z3 <- zoo(1:8, c(1, 2, 2, 2, 3, 4, 5, 5))
plot(z3)
## remove duplicated indexes by averaging
lines(aggregate(z3, index(z3), mean), col = 2)
## or by using the last observation
lines(aggregate(z3, index(z3), tail, 1), col = 4)
## x1[x1 > 3] is not officially supported since
## x1 > 3 is of class "zoo", not "logical".
## Use one of these instead:
x1[which(x1 > 3)]
x1[coredata(x1 > 3)]
x1[as.logical(x1 > 3)]
subset(x1, x1 > 3)
## any class supporting the methods discussed can be used
## as an index class. Here are examples using complex numbers
## and letters as the time class.
z4 <- zoo(11:15, complex(real = c(1, 3, 4, 5, 6), imag = c(0, 1, 0, 0, 1)))
merge(z4, lag(z4))
z5 <- zoo(11:15, letters[1:5])
merge(z5, lag(z5))
# index values relative to 2001Q1
zz <- zooreg(cbind(a = 1:10, b = 11:20), start = as.yearqtr(2000), freq = 4)
zz[] <- mapply("/", as.data.frame(zz), coredata(zz[as.yearqtr("2001Q1")]))
## even though time index must be unique zoo (and read.zoo)
## will both allow creation of such illegal objects with
## a warning (rather than ana error) to give the user a
## chance to fix them up. Extracting and replacing times
## and aggregate.zoo will still work.
## Not run:
##D # this gives a warning
##D # and then creates an illegal zoo object
##D z6 <- zoo(11:15, c(1, 1, 2, 2, 5))
##D z6
##D
##D # fix it up by averaging duplicates
##D aggregate(z6, identity, mean)
##D
##D # or, fix it up by taking last in each set of duplicates
##D aggregate(z6, identity, tail, 1)
##D
##D # fix it up via interpolation of duplicate times
##D time(z6) <- na.approx(ifelse(duplicated(time(z6)), NA, time(z6)), na.rm = FALSE)
##D # if there is a run of equal times at end they
##D # wind up as NAs and we cannot have NA times
##D z6 <- z6[!is.na(time(z6))]
##D z6
##D
##D x1. <- x1 <- zoo (matrix (1:12, nrow = 3), as.Date("2008-08-01") + 0:2)
##D colnames (x1) <- c ("A", "B", "C", "D")
##D x2 <- zoo (matrix (1:12, nrow = 3), as.Date("2008-08-01") + 1:3)
##D colnames (x2) <- c ("B", "C", "D", "E")
##D
##D both.dates = as.Date (intersect (index (t1), index (t2)))
##D both.cols = intersect (colnames (t1), colnames (t2))
##D
##D x1[both.dates, both.cols]
##D ## there is "[.zoo" but no "[<-.zoo" however four of the following
##D ## five examples work
##D
##D ## wrong
##D ## x1[both.dates, both.cols] <- x2[both.dates, both.cols]
##D
##D # 4 correct alternatives
##D # #1
##D window(x1, both.dates)[, both.cols] <- x2[both.dates, both.cols]
##D
##D # #2. restore x1 and show a different way
##D x1 <- x1.
##D window(x1, both.dates)[, both.cols] <- window(x2, both.dates)[, both.cols]
##D
##D # #3. restore x1 and show a different way
##D x1 <- x1.
##D x1[time(x1) ##D
##D
##D # #4. restore x1 and show a different way
##D x1 <- x1.
##D x1[time(x1) ##D
##D
## End(Not run)
| /data/genthat_extracted_code/zoo/examples/zoo.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 5,119 | r | library(zoo)
### Name: zoo
### Title: Z's Ordered Observations
### Aliases: zoo with.zoo range.zoo print.zoo as.zoo.factor summary.zoo
### str.zoo is.zoo [.zoo [<-.zoo $.zoo $<-.zoo subset.zoo head.zoo
### tail.zoo Ops.zoo t.zoo cumsum.zoo cumprod.zoo cummin.zoo cummax.zoo
### mean.zoo median.zoo na.contiguous na.contiguous.data.frame
### na.contiguous.list na.contiguous.default na.contiguous.zoo scale.zoo
### xtfrm.zoo names.zoo names<-.zoo quantile.zoo rev.zoo transform.zoo
### ifelse.zoo dim<-.zoo index2char index2char.default index2char.numeric
### head.ts tail.ts
### Keywords: ts
### ** Examples
## simple creation and plotting
x.Date <- as.Date("2003-02-01") + c(1, 3, 7, 9, 14) - 1
x <- zoo(rnorm(5), x.Date)
plot(x)
time(x)
## subsetting with numeric indexes
x[c(2, 4)]
## subsetting with index class
x[as.Date("2003-02-01") + c(2, 8)]
## different classes of indexes/times can be used, e.g. numeric vector
x <- zoo(rnorm(5), c(1, 3, 7, 9, 14))
## subsetting with numeric indexes then uses observation numbers
x[c(2, 4)]
## subsetting with index class can be enforced by I()
x[I(c(3, 9))]
## visualization
plot(x)
## or POSIXct
y.POSIXct <- ISOdatetime(2003, 02, c(1, 3, 7, 9, 14), 0, 0, 0)
y <- zoo(rnorm(5), y.POSIXct)
plot(y)
## create a constant series
z <- zoo(1, seq(4)[-2])
## create a 0-dimensional zoo series
z0 <- zoo(, 1:4)
## create a 2-dimensional zoo series
z2 <- zoo(matrix(1:12, 4, 3), as.Date("2003-01-01") + 0:3)
## create a factor zoo object
fz <- zoo(gl(2,5), as.Date("2004-01-01") + 0:9)
## create a zoo series with 0 columns
z20 <- zoo(matrix(nrow = 4, ncol = 0), 1:4)
## arithmetic on zoo objects intersects them first
x1 <- zoo(1:5, 1:5)
x2 <- zoo(2:6, 2:6)
10 * x1 + x2
## $ extractor for multivariate zoo series with column names
z <- zoo(cbind(foo = rnorm(5), bar = rnorm(5)))
z$foo
z$xyz <- zoo(rnorm(3), 2:4)
z
## add comments to a zoo object
comment(x1) <- c("This is a very simple example of a zoo object.",
"It can be recreated using this R code: example(zoo)")
## comments are not output by default but are still there
x1
comment(x1)
# ifelse does not work with zoo but this works
# to create a zoo object which equals x1 at
# time i if x1[i] > x1[i-1] and 0 otherwise
(diff(x1) > 0) * x1
## zoo series with duplicated indexes
z3 <- zoo(1:8, c(1, 2, 2, 2, 3, 4, 5, 5))
plot(z3)
## remove duplicated indexes by averaging
lines(aggregate(z3, index(z3), mean), col = 2)
## or by using the last observation
lines(aggregate(z3, index(z3), tail, 1), col = 4)
## x1[x1 > 3] is not officially supported since
## x1 > 3 is of class "zoo", not "logical".
## Use one of these instead:
x1[which(x1 > 3)]
x1[coredata(x1 > 3)]
x1[as.logical(x1 > 3)]
subset(x1, x1 > 3)
## any class supporting the methods discussed can be used
## as an index class. Here are examples using complex numbers
## and letters as the time class.
z4 <- zoo(11:15, complex(real = c(1, 3, 4, 5, 6), imag = c(0, 1, 0, 0, 1)))
merge(z4, lag(z4))
z5 <- zoo(11:15, letters[1:5])
merge(z5, lag(z5))
# index values relative to 2001Q1
zz <- zooreg(cbind(a = 1:10, b = 11:20), start = as.yearqtr(2000), freq = 4)
zz[] <- mapply("/", as.data.frame(zz), coredata(zz[as.yearqtr("2001Q1")]))
## even though time index must be unique zoo (and read.zoo)
## will both allow creation of such illegal objects with
## a warning (rather than ana error) to give the user a
## chance to fix them up. Extracting and replacing times
## and aggregate.zoo will still work.
## Not run:
##D # this gives a warning
##D # and then creates an illegal zoo object
##D z6 <- zoo(11:15, c(1, 1, 2, 2, 5))
##D z6
##D
##D # fix it up by averaging duplicates
##D aggregate(z6, identity, mean)
##D
##D # or, fix it up by taking last in each set of duplicates
##D aggregate(z6, identity, tail, 1)
##D
##D # fix it up via interpolation of duplicate times
##D time(z6) <- na.approx(ifelse(duplicated(time(z6)), NA, time(z6)), na.rm = FALSE)
##D # if there is a run of equal times at end they
##D # wind up as NAs and we cannot have NA times
##D z6 <- z6[!is.na(time(z6))]
##D z6
##D
##D x1. <- x1 <- zoo (matrix (1:12, nrow = 3), as.Date("2008-08-01") + 0:2)
##D colnames (x1) <- c ("A", "B", "C", "D")
##D x2 <- zoo (matrix (1:12, nrow = 3), as.Date("2008-08-01") + 1:3)
##D colnames (x2) <- c ("B", "C", "D", "E")
##D
##D both.dates = as.Date (intersect (index (t1), index (t2)))
##D both.cols = intersect (colnames (t1), colnames (t2))
##D
##D x1[both.dates, both.cols]
##D ## there is "[.zoo" but no "[<-.zoo" however four of the following
##D ## five examples work
##D
##D ## wrong
##D ## x1[both.dates, both.cols] <- x2[both.dates, both.cols]
##D
##D # 4 correct alternatives
##D # #1
##D window(x1, both.dates)[, both.cols] <- x2[both.dates, both.cols]
##D
##D # #2. restore x1 and show a different way
##D x1 <- x1.
##D window(x1, both.dates)[, both.cols] <- window(x2, both.dates)[, both.cols]
##D
##D # #3. restore x1 and show a different way
##D x1 <- x1.
##D x1[time(x1) ##D
##D
##D # #4. restore x1 and show a different way
##D x1 <- x1.
##D x1[time(x1) ##D
##D
## End(Not run)
|
## This file contains two functions: 1. makeCacheMatrix, and 2. cacheSolve
## makeCacheMatrix takes a matrix as an argument returns a list of functions
## cacheSolve takes the list returned by makeCacheMatrix as an argument
## makeCacheMatrix function creates a special "matrix" object than can cache its inverse
## It allows the user to set a matrix, get a matrix, set an inverse and get the invers
## Note that cacheSolve as it is written does not require the "set a matrix" functionality
## naming of variables and functions: m_inv is the inverted matrix
## set_mat is a function that stores the original matrix
## get_mat is a function that returns the stored original matrix
## set_inv is a function that stores an inverted matrix
## get_inv is a function that returns the stored inverted matrix or NA
makeCacheMatrix <- function(x = matrix()) {
m_inv <- NA ## m_inv is the inverted matrix. start with NA
set_mat <- function(y) {
x <<- y ## over write x in the function argument to store a matrix
m_inv <- NA ## reset m_inv to NA since we now have a new matrix
}
get_mat <- function() x ## retrieve the original matrix
set_inv <- function(inv) m_inv <<- inv ## over write m_inv in the above
##environment and store the inverted matrix
get_inv <- function () m_inv ## retrieve the inverted matrix
list(set_mat = set_mat, get_mat = get_mat, set_inv = set_inv, get_inv = get_inv)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix
cacheSolve <- function(x, ...) {
## cacheSolve receives a list of functions as the arguments
## each of these functions can be referenced with "argument$function" convention
m_inv <- x$get_inv() ## retrieve what is stored in the inverted matrix variable
if(!is.na(m_inv)) { ## if the inverted matrix is not NA, then we must have cached
## the inverted matrix
message("getting cached data")
return(m_inv) ## return the inverted matrix and exit
}
## if we did not cache the inverted matrix, then do the following
m <- x$get_mat() ## first get the matrix
m_inv <- solve(m) ## use solve to invert the matrix. note this assumes that the
## matrix is non-singular (i.e., invertible)
x$set_inv(m_inv) ## now, store the inverted matrix so that the next time we can
## use the cached data
return(m_inv) ## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | sndsjh2014/ProgrammingAssignment2 | R | false | false | 2,484 | r | ## This file contains two functions: 1. makeCacheMatrix, and 2. cacheSolve
## makeCacheMatrix takes a matrix as an argument returns a list of functions
## cacheSolve takes the list returned by makeCacheMatrix as an argument
## makeCacheMatrix function creates a special "matrix" object than can cache its inverse
## It allows the user to set a matrix, get a matrix, set an inverse and get the invers
## Note that cacheSolve as it is written does not require the "set a matrix" functionality
## naming of variables and functions: m_inv is the inverted matrix
## set_mat is a function that stores the original matrix
## get_mat is a function that returns the stored original matrix
## set_inv is a function that stores an inverted matrix
## get_inv is a function that returns the stored inverted matrix or NA
makeCacheMatrix <- function(x = matrix()) {
m_inv <- NA ## m_inv is the inverted matrix. start with NA
set_mat <- function(y) {
x <<- y ## over write x in the function argument to store a matrix
m_inv <- NA ## reset m_inv to NA since we now have a new matrix
}
get_mat <- function() x ## retrieve the original matrix
set_inv <- function(inv) m_inv <<- inv ## over write m_inv in the above
##environment and store the inverted matrix
get_inv <- function () m_inv ## retrieve the inverted matrix
list(set_mat = set_mat, get_mat = get_mat, set_inv = set_inv, get_inv = get_inv)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix
cacheSolve <- function(x, ...) {
## cacheSolve receives a list of functions as the arguments
## each of these functions can be referenced with "argument$function" convention
m_inv <- x$get_inv() ## retrieve what is stored in the inverted matrix variable
if(!is.na(m_inv)) { ## if the inverted matrix is not NA, then we must have cached
## the inverted matrix
message("getting cached data")
return(m_inv) ## return the inverted matrix and exit
}
## if we did not cache the inverted matrix, then do the following
m <- x$get_mat() ## first get the matrix
m_inv <- solve(m) ## use solve to invert the matrix. note this assumes that the
## matrix is non-singular (i.e., invertible)
x$set_inv(m_inv) ## now, store the inverted matrix so that the next time we can
## use the cached data
return(m_inv) ## Return a matrix that is the inverse of 'x'
}
|
library("VariantAnnotation")
library(TxDb.Hsapiens.UCSC.hg19.knownGene)
txdb_hg19 <- TxDb.Hsapiens.UCSC.hg19.knownGene
#1. Want to look at the methylation percentages of
#the jd and hpne samples at cpg sites.
#amit said the meth percentages looked a little high
#for the jd sample(over half had 100%)
#the mc histograms of the hmcSites should be high
#in the BS sample, and low in the OxBS sample
options(stringsAsFactors=F)
#load in the merged data
x = read.table("hmcSites_fixed2/cpg/hmcSites_BOTH_all_cpg_common4.txt")
colnames(x) = c("chr", "pos",
"jd.bs.meth", "jd.bs.tot", "jd.oxbs.meth", "jd.oxbs.tot", "jd.or", "jd.pval",
"hpne.bs.meth", "hpne.bs.tot", "hpne.oxbs.meth", "hpne.oxbs.tot", "hpne.or", "hpne.pval"
)
head(x)
tail(x)
dim(x)
mean(x$jd.bs.tot)
mean(x$jd.oxbs.tot)
mean(x$hpne.bs.tot)
mean(x$hpne.oxbs.tot)
#for jd and hpne
#how many sites have at least 4 reads
#amongst those sites, what is avearge depth?
#try to get measures of overall methylation between samples
#what percentage of cpg sites have methylation less than 50%
ix.jd.lt50 = (x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"] <= .50)
ix.hpne.lt50 = (x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"] <= .50)
mean(x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"] <= .50)
mean(x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"] <= .50)
#what percentage of cpg sites have methylation greater than 50%
ix.jd.gt50 = (x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"] > .50)
ix.hpne.gt50 = (x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"] > .50)
mean(x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"] > .50)
mean(x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"] > .50)
#what percentage of cpg sites have total methylation
ix.jd.100 = (x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"] == 1)
ix.hpne.100 = (x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"] == 1)
mean(ix.jd.100)
mean(ix.hpne.100)
mean(x[,"jd.pval"] < 0.05)
mean(x[,"hpne.pval"] < 0.05)
#get indices of the signif hmc sites
jd.hmc.ix = which(x[,"jd.pval"] < 0.05)
hpne.hmc.ix = which(x[,"hpne.pval"] < 0.05)
length(jd.hmc.ix) / nrow(x)
length(hpne.hmc.ix) / nrow(x)
length(jd.hmc.ix)
length(hpne.hmc.ix)
#write the site lists
write.table(x[jd.hmc.ix,1:2], file="jd_hmc_p05.txt", col.names=F, row.names=F, quote=F, sep="\t")
write.table(x[hpne.hmc.ix,1:2], file="hpne_hmc_p05.txt", col.names=F, row.names=F, quote=F, sep="\t")
write.table(x[ix.jd.gt50,1:2], file="jd_mc_gt50.txt", col.names=F, row.names=F, quote=F, sep="\t")
write.table(x[ix.hpne.gt50,1:2], file="hpne_mc_gt50.txt", col.names=F, row.names=F, quote=F, sep="\t")
write.table(x[ix.jd.lt50,1:2], file="jd_mc_lt50.txt", col.names=F, row.names=F, quote=F, sep="\t")
write.table(x[ix.hpne.lt50,1:2], file="hpne_mc_lt50.txt", col.names=F, row.names=F, quote=F, sep="\t")
write.table(x[ix.jd.100,1:2], file="jd_mc_100.txt", col.names=F, row.names=F, quote=F, sep="\t")
write.table(x[ix.hpne.100,1:2], file="hpne_mc_100.txt", col.names=F, row.names=F, quote=F, sep="\t")
#get percentage of sites that lie within exons introns promoters etc
getSiteCounts <- function(x){
input = GRanges(seqnames=x[,1], ranges=IRanges(x[,2], x[,2]+1), strand="*")
loc_hg19 <- locateVariants(input, txdb_hg19, AllVariants())
loc_hg19 = loc_hg19[!duplicated(loc_hg19)]
table(loc_hg19$LOCATION)
}
sCounts.jd.hmc = getSiteCounts(x[jd.hmc.ix,])
sCounts.hpne.hmc = getSiteCounts(x[hpne.hmc.ix,])
sCounts.jd.hmc
sCounts.hpne.hmc
sCounts.jd.hmc/sum(sCounts.jd.hmc)
sCounts.hpne.hmc/sum(sCounts.hpne.hmc)
#meth > 50%
sCounts.jd.mc = getSiteCounts(x[ix.jd.gt50,])
sCounts.hpne.mc = getSiteCounts(x[ix.hpne.gt50,])
sCounts.jd.mc
sCounts.hpne.mc
sCounts.jd.mc/sum(sCounts.jd.mc)
sCounts.hpne.mc/sum(sCounts.hpne.mc)
#histograms of methylation across samples
png("methPerc_allCpg.png")
par(mfcol=c(2,2))
hist(x[,"jd.bs.meth"]/x[,"jd.bs.tot"], 100, main="all cpg\njd bs meth %" )
hist(x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"], 100, main="all cpg\njd oxbs meth %" )
hist(x[,"hpne.bs.meth"]/x[,"hpne.bs.tot"], 100, main="all cpg\nhpne bs meth %" )
hist(x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"], 100, main="all cpg\nhpne oxbs meth %")
dev.off()
png("methPerc_hmcCpg.png")
par(mfcol=c(2,2))
hist(x[jd.hmc.ix,"jd.bs.meth"]/x[jd.hmc.ix,"jd.bs.tot"], 100 , main="hmc cpg\njd bs meth %" )
hist(x[jd.hmc.ix,"jd.oxbs.meth"]/x[jd.hmc.ix,"jd.oxbs.tot"], 100 , main="hmc cpg\njd oxbs meth %" )
hist(x[hpne.hmc.ix,"hpne.bs.meth"]/x[hpne.hmc.ix,"hpne.bs.tot"], 100 , main="hmc cpg\nhpne bs meth %" )
hist(x[hpne.hmc.ix,"hpne.oxbs.meth"]/x[hpne.hmc.ix,"hpne.oxbs.tot"], 100, main="hmc cpg\nhpne oxbs meth %")
dev.off()
#get hydroxy meth percentages in and out of hmcsites
#hmc% is meth% bs - meth% oxbs
jd.hmcPerc = x[,"jd.bs.meth"]/x[,"jd.bs.tot"] - x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"]
jd.hmcPerc.hmcSites = x[jd.hmc.ix,"jd.bs.meth"]/x[jd.hmc.ix,"jd.bs.tot"] - x[jd.hmc.ix,"jd.oxbs.meth"]/x[jd.hmc.ix,"jd.oxbs.tot"]
hpne.hmcPerc = x[,"hpne.bs.meth"]/x[,"hpne.bs.tot"] - x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"]
hpne.hmcPerc.hmcSites = x[hpne.hmc.ix,"hpne.bs.meth"]/x[hpne.hmc.ix,"hpne.bs.tot"] - x[hpne.hmc.ix,"hpne.oxbs.meth"]/x[hpne.hmc.ix,"hpne.oxbs.tot"]
#how many sites with hmc > 50%?
mean(jd.hmcPerc > .50)
mean(hpne.hmcPerc > .50)
mean(x[,"jd.pval"] < 0.05)
mean(x[,"jd.pval"] < 0.01)
sum(jd.hmcPerc > 0 )
sum(jd.hmcPerc < 0 )
length(jd.hmc.ix)
jd.mcPerc.hmcSites
png("hmcPerc_cpg.png", 1000, 1000)
par(mfcol=c(2,2))
hist(jd.hmcPerc, 100)
hist(jd.hmcPerc.hmcSites, 100)
hist(hpne.hmcPerc, 100)
hist(hpne.hmcPerc.hmcSites, 100)
dev.off()
#overlay histograms of MC percentages
#with HMC percentages (of significant sites)
jd.mcPerc = x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"]
hpne.mcPerc = x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"]
jd.mcPerc.hmcSites = x[jd.hmc.ix,"jd.oxbs.meth"]/x[jd.hmc.ix,"jd.oxbs.tot"]
hpne.mcPerc.hmcSites = x[hpne.hmc.ix,"hpne.oxbs.meth"]/x[hpne.hmc.ix,"hpne.oxbs.tot"]
mean(jd.mcPerc)
sd(jd.mcPerc)
mean(hpne.mcPerc)
sd(hpne.mcPerc)
mean(jd.hmcPerc.hmcSites)
sd(jd.hmcPerc.hmcSites)
mean(hpne.hmcPerc.hmcSites)
sd(hpne.hmcPerc.hmcSites)
#examine the effect of smoothing on density plots
png("test1.png")
par(mfcol=c(3, 1))
plot(density(jd.mcPerc), col="red")
lines(density(jd.hmcPerc.hmcSites), col="blue")
plot(density(jd.mcPerc), col="red", ylim=c(0, 4))
lines(density(jd.hmcPerc.hmcSites), col="blue")
plot(density(jd.mcPerc, width=0.1),ylim=c(0, 4), col="red")
lines(density(jd.hmcPerc.hmcSites, width=0.1), col="blue")
dev.off()
png("jd_histMCHmc_smooth.png")
plot(density(jd.mcPerc, width=0.1), ylim=c(0, 4), col="red", main="Methylation Percentage Densities: JD", xlab="Methylation Percentage")
lines(density(jd.hmcPerc.hmcSites, width=0.1), col="blue")
legend("topleft", c("MC", "HMC"), pch=16, col=c("red", "blue"))
dev.off()
png("hpne_histMCHmc_smooth.png")
plot(density(hpne.mcPerc, width=0.1), ylim=c(0, 4), col="red", main="Methylation Percentage Densities: HPNE", xlab="Methylation Percentage")
lines(density(hpne.hmcPerc.hmcSites, width=0.1), col="blue")
legend("topleft", c("MC", "HMC"), pch=16, col=c("red", "blue"))
dev.off()
png("hists_McPerc_smooth.png")
plot(density(jd.mcPerc, width=0.1), col="red", main="MC Percentage Densities", xlab="Methylation Percentage")
lines(density(hpne.mcPerc, width=.1), col="blue")
legend("topleft", c("JD", "HPNE"), pch=16, col=c("red", "blue"))
dev.off()
png("hists_HmcPerc_smooth.png")
plot(density(jd.hmcPerc.hmcSites, width=0.10), col="red", ylim=c(0, 4), main="HMC Percentage Densities", xlab="Hydroxy-Methylation Percentage")
lines(density(hpne.hmcPerc.hmcSites, width=.10), col="blue")
legend("topleft", c("JD", "HPNE"), pch=16, col=c("red", "blue"))
dev.off()
#JD
png("jd_histMcHmc.png")
#methylation histogram over all cpg sites
hist.mc = hist(x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"], plot=F , 100 )
#hmc histogram over all significant cpg sites
hist.hmc.sig = hist(jd.hmcPerc.hmcSites, plot=F, 100)
xlim <- range(hist.mc$breaks,hist.hmc.sig$breaks)
ylim <- range(0,hist.mc$density, hist.hmc.sig$density)
## plot the first graph
plot(hist.mc,xlim = xlim, ylim = ylim,
col = rgb(1,0,0,0.4),xlab = 'Lengths',
freq = FALSE, ## relative, not absolute frequency
main = 'JD: Methylation')
## plot the second graph on top of this
opar <- par(new = FALSE)
plot(hist.hmc.sig,xlim = xlim, ylim = ylim,
xaxt = 'n', yaxt = 'n', ## don't add axes
col = rgb(0,0,1,0.4), add = TRUE,
freq = FALSE) ## relative, not absolute frequency
## add a legend in the corner
legend('topleft',c('MC','HMC'),
fill = rgb(1:0,0,0:1,0.4), bty = 'n',
border = NA)
par(opar)
dev.off()
#HPNE
png("hpne_histMcHmc.png")
#methylation histogram over all cpg sites
hist.mc = hist(x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"], plot=F , 100 )
#hmc histogram over all significant cpg sites
hist.hmc.sig = hist(hpne.hmcPerc.hmcSites, plot=F, 100)
xlim <- range(hist.mc$breaks,hist.hmc.sig$breaks)
ylim <- range(0,hist.mc$density, hist.hmc.sig$density)
## plot the first graph
plot(hist.mc,xlim = xlim, ylim = ylim,
col = rgb(1,0,0,0.4),xlab = 'Lengths',
freq = FALSE, ## relative, not absolute frequency
main = 'HPNE: Methylation')
## plot the second graph on top of this
opar <- par(new = FALSE)
plot(hist.hmc.sig,xlim = xlim, ylim = ylim,
xaxt = 'n', yaxt = 'n', ## don't add axes
col = rgb(0,0,1,0.4), add = TRUE,
freq = FALSE) ## relative, not absolute frequency
## add a legend in the corner
legend('topleft',c('MC','HMC'),
fill = rgb(1:0,0,0:1,0.4), bty = 'n',
border = NA)
par(opar)
dev.off()
#overlay hmc jd and hpne
png("hists_HmcPerc.png")
hist.hmc.jd= hist(jd.hmcPerc.hmcSites, plot=F, 100)
hist.hmc.hpne= hist(hpne.hmcPerc.hmcSites, plot=F, 100)
xlim <- range(hist.hmc.jd$breaks,hist.hmc.hpne$breaks)
ylim <- range(0,hist.hmc.jd$density, hist.hmc.hpne$density)
## plot the first graph
plot(hist.hmc.jd,xlim = xlim, ylim = ylim,
col = rgb(1,0,0,0.4),xlab = 'Lengths',
freq = FALSE, ## relative, not absolute frequency
main = 'HMC: JD, HPNE')
## plot the second graph on top of this
opar <- par(new = FALSE)
plot(hist.hmc.hpne,xlim = xlim, ylim = ylim,
xaxt = 'n', yaxt = 'n', ## don't add axes
col = rgb(0,0,1,0.4), add = TRUE,
freq = FALSE) ## relative, not absolute frequency
## add a legend in the corner
legend('topleft',c('JD','HPNE'),
fill = rgb(1:0,0,0:1,0.4), bty = 'n',
border = NA)
par(opar)
dev.off()
#overlay mc: jd, hpne
png("hists_McPerc.png")
hist.mc.jd = hist(x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"], plot=F, 100 )
hist.mc.hpne = hist(x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"], plot=F, 100)
xlim <- range(hist.mc.jd$breaks,hist.mc.hpne$breaks)
ylim <- range(0,hist.mc.jd$density, hist.mc.hpne$density)
## plot the first graph
plot(hist.mc.jd,xlim = xlim, ylim = ylim,
col = rgb(1,0,0,0.4),xlab = 'Lengths',
freq = FALSE, ## relative, not absolute frequency
main = 'MC: JD, HPNE')
## plot the second graph on top of this
opar <- par(new = FALSE)
plot(hist.mc.hpne,xlim = xlim, ylim = ylim,
xaxt = 'n', yaxt = 'n', ## don't add axes
col = rgb(0,0,1,0.4), add = TRUE,
freq = FALSE) ## relative, not absolute frequency
## add a legend in the corner
legend('topleft',c('JD','HPNE'),
fill = rgb(1:0,0,0:1,0.4), bty = 'n',
border = NA)
par(opar)
dev.off()
# there are a total of 27,999,538 cpg sites in genome
# we have data for 11,296,328 of them
#brd promoter region
#look at hmc on chr19 between these points
p1 = 15391262 - 1500
p2 = 15443342 + 1500
head(x)
x.19 = x[x[,1] == "chr19",]
ix.brd4 = which((x.19$pos >= p1) & (x.19$pos < p2))
length(ix.brd4)
#plot out mc and hmc over this region
x.brd4 = x.19[ix.brd4,]
head(x.brd4)
jd.mc = x.brd4$jd.oxbs.meth/x.brd4$jd.oxbs.tot
jd.hmc = x.brd4[,"jd.bs.meth"]/x.brd4[,"jd.bs.tot"] - x.brd4[,"jd.oxbs.meth"]/x.brd4[,"jd.oxbs.tot"]
hpne.mc = x.brd4$hpne.oxbs.meth/x.brd4$hpne.oxbs.tot
hpne.hmc = x.brd4[,"hpne.bs.meth"]/x.brd4[,"hpne.bs.tot"] - x.brd4[,"hpne.oxbs.meth"]/x.brd4[,"hpne.oxbs.tot"]
hmc
stem(hmc)
#draw the less confidence dots with transparency
blues = sapply(1:100, function(i){
rgb(0,0,1,i/100.0)
})
jd.hmc.cols =blues[101 - round(x.brd4$jd.pval, 2)*100]
hpne.hmc.cols =blues[101 - round(x.brd4$hpne.pval, 2)*100]
reds = sapply(1:100, function(i){
rgb(1,0,0,i/100.0)
})
#more reads -> more confidences
n = max(x.brd4$jd.oxbs.tot)
jd.mc.cols = reds[round(x.brd4$jd.oxbs.tot / n * 100)]
hpne.mc.cols = reds[round(x.brd4$hpne.oxbs.tot / n * 100)]
png("jd_brd4_mc_hmc_conf.png")
plot(x.brd4$pos, jd.hmc, col=jd.hmc.cols, ylim=c(-1, 1), ylab="methylation")
points(x.brd4$pos, jd.mc, col=jd.mc.cols)
dev.off()
png("hpne_brd4_mc_hmc_conf.png")
plot(x.brd4$pos, hpne.hmc, col=hpne.hmc.cols, ylim=c(-1, 1), ylab="methylation")
points(x.brd4$pos, hpne.mc, col=hpne.mc.cols)
dev.off()
png("jd_brd4_mc_hmc.png")
plot(x.brd4$pos, hmc, col="blue", ylim=c(-1, 1), ylab="methylation")
points(x.brd4$pos, mc, col="red")
dev.off()
x11()
plot(x.brd4$pos, hpne.hmc, col="blue", ylim=c(-1, 1), ylab="methylation")
points(x.brd4$pos, hpne.mc, col="red")
plot(x.brd4$pos, ylim=c(-1, 1), xlim=c(min(x.brd4$pos), max(x.brd4$pos)))
segments(x0=x.brd4$pos, y0=x.brd4$pos*0, y1=mc, col="red")
segments(x0=x.brd4$pos, y0=x.brd4$pos*0, y1=hmc, col="blue")
#find places where methylation is significantly different between
#hpne and jd
diffmeth.pvals = NA*numeric(nrow(x))
diffmeth.or = NA*numeric(nrow(x))
for (i in 1:nrow(x)){
if (i %% 10000 == 0){
print(i)
}
#a = 10
#b = 20
#c = 30
#d = 40
a = x[i,"jd.bs.meth"] #num converted reads
b = x[i,"jd.bs.tot"] - a #num of converted reads
c = x[i,"hpne.bs.meth"]
d = x[i,"hpne.bs.tot"] - c
#see if there's a significant different in the ratios
#of methylated reads between jd and hpne
res = fisher.test(matrix(c(a, b ,c, d), ncol=2))
p = res$p.value
est = res$estimate
meth.pvals[i] = p
meth.or[i] =est
}
save.image("work_diffMeth.rData")
qvals = p.adjust(meth.pvals)
ix.sig = qvals < 0.05
#is this hmc loss and gain?
#more meth in JD, sig hmc in HPNE
sum(ix.sig & meth.or > 1 & x[,"hpne.pval"] < 0.05)
head(x[ix.sig & meth.or > 1 & x[,"hpne.pval"] < 0.05,])
#more meth in hpne, sig hmc in JD
sum(ix.sig & meth.or < 1 & x[,"jd.pval"] < 0.05)
x[ix.sig & meth.or < 1 & x[,"jd.pval"] < 0.05,]
#collect the four sets
#sites with significant HMC in JD
#sites with significant HMC in HPNE
#sites with significantly more MC in JD than HPNE
#sites with significantly more MC in HPNE than JD
#get site distributions for these four sets
sum(x[,"jd.pval"] < 0.05)
sum(x[,"jd.pval"] < 0.01)
sum(x[,"hpne.pval"] < 0.01)
sum(x[,"hpne.pval"] < 0.05)
png("methSigDiff_hist.png")
par(mfcol=c(2,1))
hist(log(meth.or), 1000)
hist(log(meth.or[qvals < 0.2]), 1000)
dev.off()
qvals = p.adjust(meth.pvals)
sum(meth.or < 1 & qvals < 0.2)
sum(meth.or > 1 & qvals < 0.2)
sum(qvals < 0.2)
ix = meth.or > 1 & qvals < 0.2
head(x[ix,])
jd.mc.sites = x[meth.or > 1 & qvals < 0.2,1:2]
hpne.mc.sites = x[meth.or < 1 & qvals < 0.2,1:2]
jd.hmc.sites = x[(x[,"jd.or"] > 1) & (x[,"jd.pval"] < 0.05),1:2]
hpne.hmc.sites = x[(x[,"hpne.or"] > 1) & (x[,"hpne.pval"] < 0.05),1:2]
dim(jd.mc.sites)
dim(hpne.mc.sites)
dim(jd.hmc.sites)
dim(hpne.hmc.sites)
write.table(file="sites_fixed2_jdMc.txt", jd.mc.sites, col.names=F, row.names=F, quote=F, sep="\t")
write.table(file="sites_fixed2_hpneMc.txt", hpne.mc.sites, col.names=F, row.names=F, quote=F, sep="\t")
write.table(file="sites_fixed2_jdHmc_p05.txt", jd.hmc.sites, col.names=F, row.names=F, quote=F, sep="\t")
write.table(file="sites_fixed2_hpneHmc_p05.txt", hpne.hmc.sites, col.names=F, row.names=F, quote=F, sep="\t")
#what if we only look at the sites with significant meth
x[,"jd.meth"]
i = 1
a = x[i,"jd.bs.meth"]
b = x[i,"jd.bs.tot"] - a
c = x[i,"hpne.bs.meth"]
d = x[i,"hpne.bs.tot"] - c
matrix(c(a,b,c,d), 2)
ix = which((qvals < 0.05) & (x[,"jd.pval"] < 0.05))
a = x[ix, "jd.pval"]
b = p.adjust(a)
c = which(b < 0.05)
x[ix[c],]
which(p.adjust(x[(meth.pvals < 0.05) & (x[,"jd.pval"] < 0.05), "jd.pval"]) < 0.05)
x[c(19880, 24423),]
sum((meth.pvals > 0.05) & (x[,"jd.pval"] < 0.05))
sum((qvals >= 0.05) & (x[,"jd.pval"] < 0.05))
sum((qvals < 0.05) & (x[qvals < 0.05,"hpne.pval"] < 0.05))
q1 = p.adjust(x[qvals < 0.05,"hpne.pval"])
sum(q1 < 0.15)
sum(meth.pvals < 0.05)
fisher.test(
######################################
##scratch
#view the histograms of odds ratios.
#write out the places with significant difference between jd and hpne
n = 320000
hist(meth.pvals, 100)
hist(meth.pvals, 100)
hist(log(meth.or[meth.or != 1]), 1000)
hist(log(meth.or[meth.or != 1]), 100)
hist(log(meth.or[1:n][meth.or[1:n] != 1]), 1000)
hist(log(meth.or[1:n][meth.or[1:n] != 1]), 100)
sum(meth.pvals[1:n] < 0.05, na.rm=T)
qvals = p.adjust(meth.pvals)
mean(qvals < 0.05)
sum(qvals < 0.05)
hist(log(meth.or[qvals < 0.05]), 100)
ix = which(qvals < 0.05)
length(ix)
x[ix,]
| /sanchari/j432_j446/examineFixedOxbs.r | no_license | chungtseng/hmcPaper_scripts2 | R | false | false | 16,862 | r | library("VariantAnnotation")
library(TxDb.Hsapiens.UCSC.hg19.knownGene)
txdb_hg19 <- TxDb.Hsapiens.UCSC.hg19.knownGene
#1. Want to look at the methylation percentages of
#the jd and hpne samples at cpg sites.
#amit said the meth percentages looked a little high
#for the jd sample(over half had 100%)
#the mc histograms of the hmcSites should be high
#in the BS sample, and low in the OxBS sample
options(stringsAsFactors=F)
#load in the merged data
x = read.table("hmcSites_fixed2/cpg/hmcSites_BOTH_all_cpg_common4.txt")
colnames(x) = c("chr", "pos",
"jd.bs.meth", "jd.bs.tot", "jd.oxbs.meth", "jd.oxbs.tot", "jd.or", "jd.pval",
"hpne.bs.meth", "hpne.bs.tot", "hpne.oxbs.meth", "hpne.oxbs.tot", "hpne.or", "hpne.pval"
)
head(x)
tail(x)
dim(x)
mean(x$jd.bs.tot)
mean(x$jd.oxbs.tot)
mean(x$hpne.bs.tot)
mean(x$hpne.oxbs.tot)
#for jd and hpne
#how many sites have at least 4 reads
#amongst those sites, what is avearge depth?
#try to get measures of overall methylation between samples
#what percentage of cpg sites have methylation less than 50%
ix.jd.lt50 = (x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"] <= .50)
ix.hpne.lt50 = (x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"] <= .50)
mean(x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"] <= .50)
mean(x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"] <= .50)
#what percentage of cpg sites have methylation greater than 50%
ix.jd.gt50 = (x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"] > .50)
ix.hpne.gt50 = (x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"] > .50)
mean(x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"] > .50)
mean(x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"] > .50)
#what percentage of cpg sites have total methylation
ix.jd.100 = (x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"] == 1)
ix.hpne.100 = (x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"] == 1)
mean(ix.jd.100)
mean(ix.hpne.100)
mean(x[,"jd.pval"] < 0.05)
mean(x[,"hpne.pval"] < 0.05)
#get indices of the signif hmc sites
jd.hmc.ix = which(x[,"jd.pval"] < 0.05)
hpne.hmc.ix = which(x[,"hpne.pval"] < 0.05)
length(jd.hmc.ix) / nrow(x)
length(hpne.hmc.ix) / nrow(x)
length(jd.hmc.ix)
length(hpne.hmc.ix)
#write the site lists
write.table(x[jd.hmc.ix,1:2], file="jd_hmc_p05.txt", col.names=F, row.names=F, quote=F, sep="\t")
write.table(x[hpne.hmc.ix,1:2], file="hpne_hmc_p05.txt", col.names=F, row.names=F, quote=F, sep="\t")
write.table(x[ix.jd.gt50,1:2], file="jd_mc_gt50.txt", col.names=F, row.names=F, quote=F, sep="\t")
write.table(x[ix.hpne.gt50,1:2], file="hpne_mc_gt50.txt", col.names=F, row.names=F, quote=F, sep="\t")
write.table(x[ix.jd.lt50,1:2], file="jd_mc_lt50.txt", col.names=F, row.names=F, quote=F, sep="\t")
write.table(x[ix.hpne.lt50,1:2], file="hpne_mc_lt50.txt", col.names=F, row.names=F, quote=F, sep="\t")
write.table(x[ix.jd.100,1:2], file="jd_mc_100.txt", col.names=F, row.names=F, quote=F, sep="\t")
write.table(x[ix.hpne.100,1:2], file="hpne_mc_100.txt", col.names=F, row.names=F, quote=F, sep="\t")
#get percentage of sites that lie within exons introns promoters etc
getSiteCounts <- function(x){
input = GRanges(seqnames=x[,1], ranges=IRanges(x[,2], x[,2]+1), strand="*")
loc_hg19 <- locateVariants(input, txdb_hg19, AllVariants())
loc_hg19 = loc_hg19[!duplicated(loc_hg19)]
table(loc_hg19$LOCATION)
}
sCounts.jd.hmc = getSiteCounts(x[jd.hmc.ix,])
sCounts.hpne.hmc = getSiteCounts(x[hpne.hmc.ix,])
sCounts.jd.hmc
sCounts.hpne.hmc
sCounts.jd.hmc/sum(sCounts.jd.hmc)
sCounts.hpne.hmc/sum(sCounts.hpne.hmc)
#meth > 50%
sCounts.jd.mc = getSiteCounts(x[ix.jd.gt50,])
sCounts.hpne.mc = getSiteCounts(x[ix.hpne.gt50,])
sCounts.jd.mc
sCounts.hpne.mc
sCounts.jd.mc/sum(sCounts.jd.mc)
sCounts.hpne.mc/sum(sCounts.hpne.mc)
#histograms of methylation across samples
png("methPerc_allCpg.png")
par(mfcol=c(2,2))
hist(x[,"jd.bs.meth"]/x[,"jd.bs.tot"], 100, main="all cpg\njd bs meth %" )
hist(x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"], 100, main="all cpg\njd oxbs meth %" )
hist(x[,"hpne.bs.meth"]/x[,"hpne.bs.tot"], 100, main="all cpg\nhpne bs meth %" )
hist(x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"], 100, main="all cpg\nhpne oxbs meth %")
dev.off()
png("methPerc_hmcCpg.png")
par(mfcol=c(2,2))
hist(x[jd.hmc.ix,"jd.bs.meth"]/x[jd.hmc.ix,"jd.bs.tot"], 100 , main="hmc cpg\njd bs meth %" )
hist(x[jd.hmc.ix,"jd.oxbs.meth"]/x[jd.hmc.ix,"jd.oxbs.tot"], 100 , main="hmc cpg\njd oxbs meth %" )
hist(x[hpne.hmc.ix,"hpne.bs.meth"]/x[hpne.hmc.ix,"hpne.bs.tot"], 100 , main="hmc cpg\nhpne bs meth %" )
hist(x[hpne.hmc.ix,"hpne.oxbs.meth"]/x[hpne.hmc.ix,"hpne.oxbs.tot"], 100, main="hmc cpg\nhpne oxbs meth %")
dev.off()
#get hydroxy meth percentages in and out of hmcsites
#hmc% is meth% bs - meth% oxbs
jd.hmcPerc = x[,"jd.bs.meth"]/x[,"jd.bs.tot"] - x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"]
jd.hmcPerc.hmcSites = x[jd.hmc.ix,"jd.bs.meth"]/x[jd.hmc.ix,"jd.bs.tot"] - x[jd.hmc.ix,"jd.oxbs.meth"]/x[jd.hmc.ix,"jd.oxbs.tot"]
hpne.hmcPerc = x[,"hpne.bs.meth"]/x[,"hpne.bs.tot"] - x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"]
hpne.hmcPerc.hmcSites = x[hpne.hmc.ix,"hpne.bs.meth"]/x[hpne.hmc.ix,"hpne.bs.tot"] - x[hpne.hmc.ix,"hpne.oxbs.meth"]/x[hpne.hmc.ix,"hpne.oxbs.tot"]
#how many sites with hmc > 50%?
mean(jd.hmcPerc > .50)
mean(hpne.hmcPerc > .50)
mean(x[,"jd.pval"] < 0.05)
mean(x[,"jd.pval"] < 0.01)
sum(jd.hmcPerc > 0 )
sum(jd.hmcPerc < 0 )
length(jd.hmc.ix)
jd.mcPerc.hmcSites
png("hmcPerc_cpg.png", 1000, 1000)
par(mfcol=c(2,2))
hist(jd.hmcPerc, 100)
hist(jd.hmcPerc.hmcSites, 100)
hist(hpne.hmcPerc, 100)
hist(hpne.hmcPerc.hmcSites, 100)
dev.off()
#overlay histograms of MC percentages
#with HMC percentages (of significant sites)
jd.mcPerc = x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"]
hpne.mcPerc = x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"]
jd.mcPerc.hmcSites = x[jd.hmc.ix,"jd.oxbs.meth"]/x[jd.hmc.ix,"jd.oxbs.tot"]
hpne.mcPerc.hmcSites = x[hpne.hmc.ix,"hpne.oxbs.meth"]/x[hpne.hmc.ix,"hpne.oxbs.tot"]
mean(jd.mcPerc)
sd(jd.mcPerc)
mean(hpne.mcPerc)
sd(hpne.mcPerc)
mean(jd.hmcPerc.hmcSites)
sd(jd.hmcPerc.hmcSites)
mean(hpne.hmcPerc.hmcSites)
sd(hpne.hmcPerc.hmcSites)
#examine the effect of smoothing on density plots
png("test1.png")
par(mfcol=c(3, 1))
plot(density(jd.mcPerc), col="red")
lines(density(jd.hmcPerc.hmcSites), col="blue")
plot(density(jd.mcPerc), col="red", ylim=c(0, 4))
lines(density(jd.hmcPerc.hmcSites), col="blue")
plot(density(jd.mcPerc, width=0.1),ylim=c(0, 4), col="red")
lines(density(jd.hmcPerc.hmcSites, width=0.1), col="blue")
dev.off()
png("jd_histMCHmc_smooth.png")
plot(density(jd.mcPerc, width=0.1), ylim=c(0, 4), col="red", main="Methylation Percentage Densities: JD", xlab="Methylation Percentage")
lines(density(jd.hmcPerc.hmcSites, width=0.1), col="blue")
legend("topleft", c("MC", "HMC"), pch=16, col=c("red", "blue"))
dev.off()
png("hpne_histMCHmc_smooth.png")
plot(density(hpne.mcPerc, width=0.1), ylim=c(0, 4), col="red", main="Methylation Percentage Densities: HPNE", xlab="Methylation Percentage")
lines(density(hpne.hmcPerc.hmcSites, width=0.1), col="blue")
legend("topleft", c("MC", "HMC"), pch=16, col=c("red", "blue"))
dev.off()
png("hists_McPerc_smooth.png")
plot(density(jd.mcPerc, width=0.1), col="red", main="MC Percentage Densities", xlab="Methylation Percentage")
lines(density(hpne.mcPerc, width=.1), col="blue")
legend("topleft", c("JD", "HPNE"), pch=16, col=c("red", "blue"))
dev.off()
png("hists_HmcPerc_smooth.png")
plot(density(jd.hmcPerc.hmcSites, width=0.10), col="red", ylim=c(0, 4), main="HMC Percentage Densities", xlab="Hydroxy-Methylation Percentage")
lines(density(hpne.hmcPerc.hmcSites, width=.10), col="blue")
legend("topleft", c("JD", "HPNE"), pch=16, col=c("red", "blue"))
dev.off()
#JD
png("jd_histMcHmc.png")
#methylation histogram over all cpg sites
hist.mc = hist(x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"], plot=F , 100 )
#hmc histogram over all significant cpg sites
hist.hmc.sig = hist(jd.hmcPerc.hmcSites, plot=F, 100)
xlim <- range(hist.mc$breaks,hist.hmc.sig$breaks)
ylim <- range(0,hist.mc$density, hist.hmc.sig$density)
## plot the first graph
plot(hist.mc,xlim = xlim, ylim = ylim,
col = rgb(1,0,0,0.4),xlab = 'Lengths',
freq = FALSE, ## relative, not absolute frequency
main = 'JD: Methylation')
## plot the second graph on top of this
opar <- par(new = FALSE)
plot(hist.hmc.sig,xlim = xlim, ylim = ylim,
xaxt = 'n', yaxt = 'n', ## don't add axes
col = rgb(0,0,1,0.4), add = TRUE,
freq = FALSE) ## relative, not absolute frequency
## add a legend in the corner
legend('topleft',c('MC','HMC'),
fill = rgb(1:0,0,0:1,0.4), bty = 'n',
border = NA)
par(opar)
dev.off()
#HPNE
png("hpne_histMcHmc.png")
#methylation histogram over all cpg sites
hist.mc = hist(x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"], plot=F , 100 )
#hmc histogram over all significant cpg sites
hist.hmc.sig = hist(hpne.hmcPerc.hmcSites, plot=F, 100)
xlim <- range(hist.mc$breaks,hist.hmc.sig$breaks)
ylim <- range(0,hist.mc$density, hist.hmc.sig$density)
## plot the first graph
plot(hist.mc,xlim = xlim, ylim = ylim,
col = rgb(1,0,0,0.4),xlab = 'Lengths',
freq = FALSE, ## relative, not absolute frequency
main = 'HPNE: Methylation')
## plot the second graph on top of this
opar <- par(new = FALSE)
plot(hist.hmc.sig,xlim = xlim, ylim = ylim,
xaxt = 'n', yaxt = 'n', ## don't add axes
col = rgb(0,0,1,0.4), add = TRUE,
freq = FALSE) ## relative, not absolute frequency
## add a legend in the corner
legend('topleft',c('MC','HMC'),
fill = rgb(1:0,0,0:1,0.4), bty = 'n',
border = NA)
par(opar)
dev.off()
#overlay hmc jd and hpne
png("hists_HmcPerc.png")
hist.hmc.jd= hist(jd.hmcPerc.hmcSites, plot=F, 100)
hist.hmc.hpne= hist(hpne.hmcPerc.hmcSites, plot=F, 100)
xlim <- range(hist.hmc.jd$breaks,hist.hmc.hpne$breaks)
ylim <- range(0,hist.hmc.jd$density, hist.hmc.hpne$density)
## plot the first graph
plot(hist.hmc.jd,xlim = xlim, ylim = ylim,
col = rgb(1,0,0,0.4),xlab = 'Lengths',
freq = FALSE, ## relative, not absolute frequency
main = 'HMC: JD, HPNE')
## plot the second graph on top of this
opar <- par(new = FALSE)
plot(hist.hmc.hpne,xlim = xlim, ylim = ylim,
xaxt = 'n', yaxt = 'n', ## don't add axes
col = rgb(0,0,1,0.4), add = TRUE,
freq = FALSE) ## relative, not absolute frequency
## add a legend in the corner
legend('topleft',c('JD','HPNE'),
fill = rgb(1:0,0,0:1,0.4), bty = 'n',
border = NA)
par(opar)
dev.off()
#overlay mc: jd, hpne
png("hists_McPerc.png")
hist.mc.jd = hist(x[,"jd.oxbs.meth"]/x[,"jd.oxbs.tot"], plot=F, 100 )
hist.mc.hpne = hist(x[,"hpne.oxbs.meth"]/x[,"hpne.oxbs.tot"], plot=F, 100)
xlim <- range(hist.mc.jd$breaks,hist.mc.hpne$breaks)
ylim <- range(0,hist.mc.jd$density, hist.mc.hpne$density)
## plot the first graph
plot(hist.mc.jd,xlim = xlim, ylim = ylim,
col = rgb(1,0,0,0.4),xlab = 'Lengths',
freq = FALSE, ## relative, not absolute frequency
main = 'MC: JD, HPNE')
## plot the second graph on top of this
opar <- par(new = FALSE)
plot(hist.mc.hpne,xlim = xlim, ylim = ylim,
xaxt = 'n', yaxt = 'n', ## don't add axes
col = rgb(0,0,1,0.4), add = TRUE,
freq = FALSE) ## relative, not absolute frequency
## add a legend in the corner
legend('topleft',c('JD','HPNE'),
fill = rgb(1:0,0,0:1,0.4), bty = 'n',
border = NA)
par(opar)
dev.off()
# there are a total of 27,999,538 cpg sites in genome
# we have data for 11,296,328 of them
#brd promoter region
#look at hmc on chr19 between these points
p1 = 15391262 - 1500
p2 = 15443342 + 1500
head(x)
x.19 = x[x[,1] == "chr19",]
ix.brd4 = which((x.19$pos >= p1) & (x.19$pos < p2))
length(ix.brd4)
#plot out mc and hmc over this region
x.brd4 = x.19[ix.brd4,]
head(x.brd4)
jd.mc = x.brd4$jd.oxbs.meth/x.brd4$jd.oxbs.tot
jd.hmc = x.brd4[,"jd.bs.meth"]/x.brd4[,"jd.bs.tot"] - x.brd4[,"jd.oxbs.meth"]/x.brd4[,"jd.oxbs.tot"]
hpne.mc = x.brd4$hpne.oxbs.meth/x.brd4$hpne.oxbs.tot
hpne.hmc = x.brd4[,"hpne.bs.meth"]/x.brd4[,"hpne.bs.tot"] - x.brd4[,"hpne.oxbs.meth"]/x.brd4[,"hpne.oxbs.tot"]
hmc
stem(hmc)
#draw the less confidence dots with transparency
blues = sapply(1:100, function(i){
rgb(0,0,1,i/100.0)
})
jd.hmc.cols =blues[101 - round(x.brd4$jd.pval, 2)*100]
hpne.hmc.cols =blues[101 - round(x.brd4$hpne.pval, 2)*100]
reds = sapply(1:100, function(i){
rgb(1,0,0,i/100.0)
})
#more reads -> more confidences
n = max(x.brd4$jd.oxbs.tot)
jd.mc.cols = reds[round(x.brd4$jd.oxbs.tot / n * 100)]
hpne.mc.cols = reds[round(x.brd4$hpne.oxbs.tot / n * 100)]
png("jd_brd4_mc_hmc_conf.png")
plot(x.brd4$pos, jd.hmc, col=jd.hmc.cols, ylim=c(-1, 1), ylab="methylation")
points(x.brd4$pos, jd.mc, col=jd.mc.cols)
dev.off()
png("hpne_brd4_mc_hmc_conf.png")
plot(x.brd4$pos, hpne.hmc, col=hpne.hmc.cols, ylim=c(-1, 1), ylab="methylation")
points(x.brd4$pos, hpne.mc, col=hpne.mc.cols)
dev.off()
png("jd_brd4_mc_hmc.png")
plot(x.brd4$pos, hmc, col="blue", ylim=c(-1, 1), ylab="methylation")
points(x.brd4$pos, mc, col="red")
dev.off()
x11()
plot(x.brd4$pos, hpne.hmc, col="blue", ylim=c(-1, 1), ylab="methylation")
points(x.brd4$pos, hpne.mc, col="red")
plot(x.brd4$pos, ylim=c(-1, 1), xlim=c(min(x.brd4$pos), max(x.brd4$pos)))
segments(x0=x.brd4$pos, y0=x.brd4$pos*0, y1=mc, col="red")
segments(x0=x.brd4$pos, y0=x.brd4$pos*0, y1=hmc, col="blue")
#find places where methylation is significantly different between
#hpne and jd
diffmeth.pvals = NA*numeric(nrow(x))
diffmeth.or = NA*numeric(nrow(x))
for (i in 1:nrow(x)){
if (i %% 10000 == 0){
print(i)
}
#a = 10
#b = 20
#c = 30
#d = 40
a = x[i,"jd.bs.meth"] #num converted reads
b = x[i,"jd.bs.tot"] - a #num of converted reads
c = x[i,"hpne.bs.meth"]
d = x[i,"hpne.bs.tot"] - c
#see if there's a significant different in the ratios
#of methylated reads between jd and hpne
res = fisher.test(matrix(c(a, b ,c, d), ncol=2))
p = res$p.value
est = res$estimate
meth.pvals[i] = p
meth.or[i] =est
}
save.image("work_diffMeth.rData")
qvals = p.adjust(meth.pvals)
ix.sig = qvals < 0.05
#is this hmc loss and gain?
#more meth in JD, sig hmc in HPNE
sum(ix.sig & meth.or > 1 & x[,"hpne.pval"] < 0.05)
head(x[ix.sig & meth.or > 1 & x[,"hpne.pval"] < 0.05,])
#more meth in hpne, sig hmc in JD
sum(ix.sig & meth.or < 1 & x[,"jd.pval"] < 0.05)
x[ix.sig & meth.or < 1 & x[,"jd.pval"] < 0.05,]
#collect the four sets
#sites with significant HMC in JD
#sites with significant HMC in HPNE
#sites with significantly more MC in JD than HPNE
#sites with significantly more MC in HPNE than JD
#get site distributions for these four sets
sum(x[,"jd.pval"] < 0.05)
sum(x[,"jd.pval"] < 0.01)
sum(x[,"hpne.pval"] < 0.01)
sum(x[,"hpne.pval"] < 0.05)
png("methSigDiff_hist.png")
par(mfcol=c(2,1))
hist(log(meth.or), 1000)
hist(log(meth.or[qvals < 0.2]), 1000)
dev.off()
qvals = p.adjust(meth.pvals)
sum(meth.or < 1 & qvals < 0.2)
sum(meth.or > 1 & qvals < 0.2)
sum(qvals < 0.2)
ix = meth.or > 1 & qvals < 0.2
head(x[ix,])
jd.mc.sites = x[meth.or > 1 & qvals < 0.2,1:2]
hpne.mc.sites = x[meth.or < 1 & qvals < 0.2,1:2]
jd.hmc.sites = x[(x[,"jd.or"] > 1) & (x[,"jd.pval"] < 0.05),1:2]
hpne.hmc.sites = x[(x[,"hpne.or"] > 1) & (x[,"hpne.pval"] < 0.05),1:2]
dim(jd.mc.sites)
dim(hpne.mc.sites)
dim(jd.hmc.sites)
dim(hpne.hmc.sites)
write.table(file="sites_fixed2_jdMc.txt", jd.mc.sites, col.names=F, row.names=F, quote=F, sep="\t")
write.table(file="sites_fixed2_hpneMc.txt", hpne.mc.sites, col.names=F, row.names=F, quote=F, sep="\t")
write.table(file="sites_fixed2_jdHmc_p05.txt", jd.hmc.sites, col.names=F, row.names=F, quote=F, sep="\t")
write.table(file="sites_fixed2_hpneHmc_p05.txt", hpne.hmc.sites, col.names=F, row.names=F, quote=F, sep="\t")
#what if we only look at the sites with significant meth
x[,"jd.meth"]
i = 1
a = x[i,"jd.bs.meth"]
b = x[i,"jd.bs.tot"] - a
c = x[i,"hpne.bs.meth"]
d = x[i,"hpne.bs.tot"] - c
matrix(c(a,b,c,d), 2)
ix = which((qvals < 0.05) & (x[,"jd.pval"] < 0.05))
a = x[ix, "jd.pval"]
b = p.adjust(a)
c = which(b < 0.05)
x[ix[c],]
which(p.adjust(x[(meth.pvals < 0.05) & (x[,"jd.pval"] < 0.05), "jd.pval"]) < 0.05)
x[c(19880, 24423),]
sum((meth.pvals > 0.05) & (x[,"jd.pval"] < 0.05))
sum((qvals >= 0.05) & (x[,"jd.pval"] < 0.05))
sum((qvals < 0.05) & (x[qvals < 0.05,"hpne.pval"] < 0.05))
q1 = p.adjust(x[qvals < 0.05,"hpne.pval"])
sum(q1 < 0.15)
sum(meth.pvals < 0.05)
fisher.test(
######################################
##scratch
#view the histograms of odds ratios.
#write out the places with significant difference between jd and hpne
n = 320000
hist(meth.pvals, 100)
hist(meth.pvals, 100)
hist(log(meth.or[meth.or != 1]), 1000)
hist(log(meth.or[meth.or != 1]), 100)
hist(log(meth.or[1:n][meth.or[1:n] != 1]), 1000)
hist(log(meth.or[1:n][meth.or[1:n] != 1]), 100)
sum(meth.pvals[1:n] < 0.05, na.rm=T)
qvals = p.adjust(meth.pvals)
mean(qvals < 0.05)
sum(qvals < 0.05)
hist(log(meth.or[qvals < 0.05]), 100)
ix = which(qvals < 0.05)
length(ix)
x[ix,]
|
#source("/nr/project/stat/Smitte/Lakselus_stad/Rfunc-magne/make.weight.new.r")
make.weight <- function(antall0,datadir.lok){
#moving fish from one cage to another
#We have an array of dimension 3.
#First dimension is time
#Second dimension is the cage we move from and
#third dimension is the dimension we move to.
#cage 0 means fish that are slaugtered, while cage 1 are fish that are "utsett"
### if (datadir.lok!="2009-2010") {
if (datadir.lok!="Langskjaera0910") {
stop("make.weight ikke generell ennå")
}
### if(datadir.lok=="2009-2010"){
if(datadir.lok=="Langskjaera0910"){
ii<-0
for(i in c(2,3,4,6,7,8,9,10)){
ii<-ii+1
colnames(antall0)[ii]<-paste("Antall",i,sep="")
}
row.names0 <- rownames(antall0)
row.names <- c(paste(as.numeric(row.names0[1])-1,sep=""),row.names0)
n <- length(antall0[,1])
antall <- matrix(0,ncol=16,nrow=nrow(antall0)+1, dimnames=list(row.names,c(paste("Antall1.",c(2,3,4,6,7,8,9,10),sep=""),paste("Antall",c(2,3,4,6,7,8,9,10),sep=""))))
for(i in c(2,3,4,6,7,8,9,10)){
col1 <- paste("Antall",i,sep="")
antall[row.names0,col1] <- c(antall0[,col1])
}
antall["20090504",c("Antall1.3")] <- 360500+3871 #3871 er lagt til for at det ikke blir feil ved flytting den 20091106
antall["20090511",c("Antall1.2")] <- c(424651)
antall["20090521",c("Antall1.8")] <- c(422000)
antall["20090505",c("Antall1.9")] <- c(404730)
utsett.antall <- matrix(NA,ncol=2,nrow=4,dimnames=list(c("20090511","20090504","20090521","20090505"),c("Antall","Merd")))
utsett.antall["20090504",] <- c(360500,3)
utsett.antall["20090511",] <- c(424651,2)
utsett.antall["20090521",] <- c(422000,8)
utsett.antall["20090505",] <- c(404730,9)
weight2 <- matrix(0,ncol=length(c(0,1,2,3,4,6,7,8,9,10)),nrow=dim(antall0)[1]+1, dimnames=list(row.names,paste(c(0,1,2,3,4,6,7,8,9,10))))
weight2 <- weight2[-(n+1),]
weight3 <- matrix(0,ncol=length(c(0,1,2,3,4,6,7,8,9,10)),nrow=dim(antall0)[1]+1, dimnames=list(row.names,paste(c(0,1,2,3,4,6,7,8,9,10))))
weight3 <- weight3[-(n+1),]
weight4 <- matrix(0,ncol=length(c(0,1,2,3,4,6,7,8,9,10)),nrow=dim(antall0)[1]+1, dimnames=list(row.names,paste(c(0,1,2,3,4,6,7,8,9,10))))
weight4 <- weight4[-(n+1),]
weight6 <- matrix(0,ncol=length(c(0,1,2,3,4,6,7,8,9,10)),nrow=dim(antall0)[1]+1, dimnames=list(row.names,paste(c(0,1,2,3,4,6,7,8,9,10))))
weight6 <- weight6[-(n+1),]
weight7 <- matrix(0,ncol=length(c(0,1,2,3,4,6,7,8,9,10)),nrow=dim(antall0)[1]+1, dimnames=list(row.names,paste(c(0,1,2,3,4,6,7,8,9,10))))
weight7 <- weight7[-(n+1),]
weight8 <- matrix(0,ncol=length(c(0,1,2,3,4,6,7,8,9,10)),nrow=dim(antall0)[1]+1, dimnames=list(row.names,paste(c(0,1,2,3,4,6,7,8,9,10))))
weight8 <- weight8[-(n+1),]
weight9 <- matrix(0,ncol=length(c(0,1,2,3,4,6,7,8,9,10)),nrow=dim(antall0)[1]+1, dimnames=list(row.names,paste(c(0,1,2,3,4,6,7,8,9,10))))
weight9 <- weight9[-(n+1),]
weight10<- matrix(0,ncol=length(c(0,1,2,3,4,6,7,8,9,10)),nrow=dim(antall0)[1]+1,dimnames=list(row.names,paste(c(0,1,2,3,4,6,7,8,9,10))))
weight10<- weight10[-(n+1),]
row.names0 <- rownames(antall0[-n,])
w <- c(antall0[-1,"Antall2"]/antall0[-n,"Antall2"])
weight2["20090511","1"] <- 1
weight2[row.names0,"2"] <- w
weight2[row.names0,"0"] <- (1-w)
w <- c(antall0[-1,"Antall2"]/antall0[-n,"Antall2"])
weight2["20090511","1"] <- 1
weight2[row.names0,"2"] <- w
weight2[row.names0,"0"] <- (1-w)
w <- c(antall0[-1,"Antall3"]/antall0[-n,"Antall3"])
weight3["20090504","1"] <- 1
weight3[row.names0,"3"] <- w
weight3[row.names0,"0"] <- 1-w
w <- c(antall0[-1,"Antall4"]/antall0[-n,"Antall4"])
weight4[row.names0,"4"] <- w
weight4[row.names0,"0"] <- 1-w
w <- c(antall0[-1,"Antall6"]/antall0[-n,"Antall6"])
weight6[row.names0,"6"] <- w
weight6[row.names0,"0"] <- 1-w
w <- c(antall0[-1,"Antall7"]/antall0[-n,"Antall7"])
weight7[row.names0,"7"] <- w
weight7[row.names0,"0"] <- 1-w
w <- c(antall0[-1,"Antall8"]/antall0[-n,"Antall8"])
weight8["20090521","1"] <- 1
weight8[row.names0,"8"] <- w
weight8[row.names0,"0"] <- 1-w
w <- c(antall0[-1,"Antall9"]/antall0[-n,"Antall9"])
weight9["20090505","1"] <- 1
weight9[row.names0,"9"] <- w
weight9[row.names0,"0"] <- 1-w
w <- c(antall0[-1,"Antall10"]/antall0[-n,"Antall10"])
weight10[row.names0,"10"] <- w
weight10[row.names0,"0"] <- 1-w
id <- antall0[,"Antall2"]==0
weight2[id,"2"] <- 0
id <- weight2==Inf | weight2==-Inf | is.na(weight2)
weight2[id] <- 0
id <- antall0[,"Antall3"]==0
weight3[id,"3"] <- 0
id <- weight3==Inf | weight3==-Inf | is.na(weight3)
weight3[id] <- 0
id <- antall0[,"Antall4"]==0
weight4[id,"4"] <- 0
id <- weight4==Inf | weight4==-Inf | is.na(weight4)
weight4[id] <- 0
id <- antall0[,"Antall6"]==0
weight6[id,"6"] <- 0
id <- weight6==Inf | weight6==-Inf | is.na(weight6)
weight6[id] <- 0
id <- antall0[,"Antall7"]==0
weight7[id,"7"] <- 0
id <- weight7==Inf | weight7==-Inf | is.na(weight7)
weight7[id] <- 0
id <- antall0[,"Antall8"]==0
weight8[id,"8"] <- 0
id <- weight8==Inf | weight8==-Inf | is.na(weight8)
weight8[id] <- 0
id <- antall0[,"Antall9"]==0
weight9[id,"9"] <- 0
id <- weight9==Inf | weight9==-Inf | is.na(weight9)
weight9[id] <- 0
id <- antall0[,"Antall10"]==0
weight10[id,"10"] <- 0
id <- weight10==Inf | weight10==-Inf | is.na(weight10)
weight10[id] <- 0
#utsett
weight2["20090511","1"] <- 1
weight3["20090504","1"] <- 1
weight8["20090521","1"] <- 1
weight9["20090505","1"] <- 1
q <- antall0["20100522","Antall7"]/antall0["20100521","Antall7"]
weight7["20100521","7"] <- q
weight7["20100521","10"] <- 1-q
weight7["20100521","0"] <- 0
q <- antall0["20100701","Antall8"]/antall0["20100630","Antall8"]
q1 <- antall0["20100701","Antall2"]/((1-q)*antall0["20100630","Antall8"])
weight8["20100630","8"] <- q
weight8["20100630","2"] <- (1-q)*q1
weight8["20100630","0"] <- (1-q)*(1-q1)
q1 <- antall0["20100325","Antall2"]/(antall0["20100324","Antall2"])
q3 <- antall0["20100325","Antall8"]/(antall0["20100324","Antall8"])
q2 <- (antall0["20100325","Antall8"]-antall0["20100324","Antall8"])/((1-q1)*antall0["20100324","Antall2"]+1*antall0["20100324","Antall10"])
q4 <- antall0["20100325","Antall8"]/((1-q1)*antall0["20100324","Antall2"]+antall0["20100324","Antall10"]+antall0["20100324","Antall8"])
weight2["20100324","8"] <- (1-q1)*q4
weight2["20100324","0"] <- (1-q1)*(1-q4)
weight8["20100324","8"] <- q4
weight8["20100324","0"] <- (1-q4)
weight10["20100324","8"] <- q4
q <- (antall0["20100325","Antall6"]-antall0["20100324","Antall6"])/((1-q4)*antall0["20100324","Antall10"])
weight10["20100324","0"] <- (1-q4)*(1-q)
weight10["20100324","6"] <- (1-q4)*q
weight6["20100324","6"] <- 1
weight6["20100324","0"] <- 0
q <- antall0["20101110","Antall3"]/antall0["20101109","Antall3"]
weight3["20101109","3"] <- q
weight3["20101109","0"] <- (1-q)
q <- antall0["20101111","Antall3"]/antall0["20101110","Antall3"]
weight3["20101110","3"] <- q
weight3["20101110","0"] <- (1-q)
weight3["20101111","0"] <- 1
q <- (antall0["20100819","Antall3"])/antall0["20100818","Antall3"]
q1 <- (antall0["20100819","Antall9"])/antall0["20100818","Antall9"]
q2 <- antall0["20100819","Antall6"]/((1-q)*antall0["20100818","Antall3"]+(1-q1)*antall0["20100818","Antall9"])
weight3["20100818","3"] <- q
weight3["20100818","6"] <- (1-q)*q2
weight3["20100818","0"] <- (1-q)*(1-q2)
weight9["20100818","9"] <- q1
weight9["20100818","6"] <- (1-q1)*q2
weight9["20100818","0"] <- (1-q1)*(1-q2)
q <- antall0["20100812","Antall4"]/antall0["20100811","Antall4"]
weight4["20100811","4"] <- q
weight4["20100811","0"] <- (1-q)
q <- antall0["20100813","Antall4"]/antall0["20100812","Antall4"]
weight4["20100812","4"] <- q
weight4["20100812","0"] <- (1-q)
q <- antall0["20100914","Antall4"]/antall0["20100913","Antall4"]
weight4["20100913","4"] <- q
weight4["20100913","0"] <- (1-q)
q <- antall0["20100915","Antall4"]/antall0["20100914","Antall4"]
weight4["20100914","4"] <- q
weight4["20100914","0"] <- (1-q)
q <- antall0["20100916","Antall4"]/antall0["20100915","Antall4"]
weight4["20100915","4"] <- q
weight4["20100915","0"] <- (1-q)
weight4["20100916","0"] <- 1
q <- antall0["20100813","Antall6"]/antall0["20100812","Antall6"]
weight6["20100812","6"] <- q
weight6["20100812","0"] <- (1-q)
q <- antall0["20100816","Antall6"]/antall0["20100815","Antall6"]
weight6["20100815","6"] <- q
weight6["20100815","0"] <- (1-q)
q <- antall0["20100817","Antall6"]/antall0["20100816","Antall6"]
weight6["20100816","6"] <- q
weight6["20100816","0"] <- (1-q)
weight6["20100817","0"] <- 1
q <- antall0["20100907","Antall7"]/antall0["20100906","Antall7"]
weight7["20100906","7"] <- q
weight7["20100906","0"] <- (1-q)
q <- antall0["20100908","Antall7"]/antall0["20100907","Antall7"]
weight7["20100907","7"] <- q
weight7["20100907","0"] <- (1-q)
weight7["20100908","0"] <- 1
q <- antall0["20100907","Antall7"]/antall0["20100906","Antall7"]
weight7["20100906","7"] <- q
weight7["20100906","0"] <- (1-q)
qq22 <- antall0["20091124","Antall2"]/antall0["20091123","Antall2"]
qq88 <- antall0["20091124","Antall8"]/(antall0["20091123","Antall8"])
weight2["20091123","2"] <- qq22 # 166494 antall fisk i Merd2
weight2["20091123","3"] <- (1-qq22)*0.5 # 120385 antall fisk flyttet fra merd2 til merd3
weight2["20091123","4"] <- (1-qq22)*0.5 # 120385 antall fisk flyttet fra merd2 til merd4
weight2["20091123","0"] <- 0
q4 <- (antall0["20091124","Antall4"]-(1-qq22)*0.5*antall0["20091123","Antall2"])/((1-qq88)*antall0["20091123","Antall8"])
q3 <- (antall0["20091124","Antall3"]-(1-qq22)*0.5*antall0["20091123","Antall2"])/((1-qq88)*antall0["20091123","Antall8"])
weight8["20091123","8"] <- qq88 # 168859 antall fisk flyttet fra merd8 til merd8
weight8["20091123","3"] <- (1-qq88)*q3 # 121013 antall fisk flyttet fra merd8 til merd3
weight8["20091123","4"] <- (1-qq88)*q4 # 42489 antall fisk flyttet fra merd8 til merd3
weight8["20091123","0"] <- (1-qq88)*(1-q3-q4) # 3044 antall fisk flyttet fra merd8 til merd0
q <- antall0["20091107","Antall9"]/antall0["20091106","Antall9"]
q1 <- antall0["20091107","Antall10"]/((1-q)*antall0["20091106","Antall9"])
weight9["20091106","9"] <- q
weight9["20091106","10"] <- (1-q)*q1
weight9["20091106","6"] <- (1-q)*(1-q1)
weight9["20091106","0"] <- 0
q0 <- antall0["20091107","Antall7"]/antall0["20091106","Antall3"]
weight3["20091106","7"] <- q0
weight3["20091106","6"] <- (1-q0)
weight3["20091106","0"] <- 0
row.names.w <- rownames(weight2)
w <- array(0,c(length(weight2[,1])+1,8+1,8+1))
n <- length(antall0[,1])
row.names0 <- rownames(antall0)
row.names <- c(paste(as.numeric(row.names0[1])-1,sep=""),row.names0)
dimnames(w) <- list(row.names,paste("Merd",c(1,2:4,6:10),sep=""),paste("Merd",c(0,2:4,6:10),sep=""))
row.names.w <- dimnames(weight2)[[1]]
r.n.w <- rownames(w)[is.element(rownames(w),row.names.w)]
w[r.n.w,"Merd1","Merd2"] <- c(weight2[r.n.w,paste(1,sep="")])
w[r.n.w,"Merd2","Merd2"] <- c(weight2[r.n.w,paste(2,sep="")])
w[r.n.w,"Merd2","Merd0"] <- c(weight2[r.n.w,paste(0,sep="")])
w[r.n.w,"Merd3","Merd2"] <- c(weight3[r.n.w,paste(2,sep="")])
w[r.n.w,"Merd4","Merd2"] <- c(weight4[r.n.w,paste(2,sep="")])
w[r.n.w,"Merd6","Merd2"] <- c(weight6[r.n.w,paste(2,sep="")])
w[r.n.w,"Merd7","Merd2"] <- c(weight7[r.n.w,paste(2,sep="")])
w[r.n.w,"Merd8","Merd2"] <- c(weight8[r.n.w,paste(2,sep="")])
w[r.n.w,"Merd9","Merd2"] <- c(weight9[r.n.w,paste(2,sep="")])
w[r.n.w,"Merd10","Merd2"] <- c(weight10[r.n.w,paste(2,sep="")])
w[r.n.w,"Merd1","Merd3"] <- c(weight3[r.n.w,paste(1,sep="")])
w[r.n.w,"Merd3","Merd3"] <- c(weight3[r.n.w,paste(3,sep="")])
w[r.n.w,"Merd3","Merd0"] <- c(weight3[r.n.w,paste(0,sep="")])
w[r.n.w,"Merd2","Merd3"] <- c(weight2[r.n.w,paste(3,sep="")])
w[r.n.w,"Merd4","Merd3"] <- c(weight4[r.n.w,paste(3,sep="")])
w[r.n.w,"Merd6","Merd3"] <- c(weight6[r.n.w,paste(3,sep="")])
w[r.n.w,"Merd7","Merd3"] <- c(weight7[r.n.w,paste(3,sep="")])
w[r.n.w,"Merd8","Merd3"] <- c(weight8[r.n.w,paste(3,sep="")])
w[r.n.w,"Merd9","Merd3"] <- c(weight9[r.n.w,paste(3,sep="")])
w[r.n.w,"Merd10","Merd3"] <- c(weight10[r.n.w,paste(3,sep="")])
w[r.n.w,"Merd1","Merd4"] <- c(weight4[r.n.w,paste(1,sep="")])
w[r.n.w,"Merd4","Merd4"] <- c(weight4[r.n.w,paste(4,sep="")])
w[r.n.w,"Merd4","Merd0"] <- c(weight4[r.n.w,paste(0,sep="")])
w[r.n.w,"Merd2","Merd4"] <- c(weight2[r.n.w,paste(4,sep="")])
w[r.n.w,"Merd3","Merd4"] <- c(weight3[r.n.w,paste(4,sep="")])
w[r.n.w,"Merd6","Merd4"] <- c(weight6[r.n.w,paste(4,sep="")])
w[r.n.w,"Merd7","Merd4"] <- c(weight7[r.n.w,paste(4,sep="")])
w[r.n.w,"Merd8","Merd4"] <- c(weight8[r.n.w,paste(4,sep="")])
w[r.n.w,"Merd9","Merd4"] <- c(weight9[r.n.w,paste(4,sep="")])
w[r.n.w,"Merd10","Merd4"] <- c(weight10[r.n.w,paste(4,sep="")])
w[r.n.w,"Merd1","Merd6"] <- c(weight6[r.n.w,paste(1,sep="")])
w[r.n.w,"Merd6","Merd6"] <- c(weight6[r.n.w,paste(6,sep="")])
w[r.n.w,"Merd6","Merd0"] <- c(weight6[r.n.w,paste(0,sep="")])
w[r.n.w,"Merd2","Merd6"] <- c(weight2[r.n.w,paste(6,sep="")])
w[r.n.w,"Merd3","Merd6"] <- c(weight3[r.n.w,paste(6,sep="")])
w[r.n.w,"Merd4","Merd6"] <- c(weight4[r.n.w,paste(6,sep="")])
w[r.n.w,"Merd7","Merd6"] <- c(weight7[r.n.w,paste(6,sep="")])
w[r.n.w,"Merd8","Merd6"] <- c(weight8[r.n.w,paste(6,sep="")])
w[r.n.w,"Merd9","Merd6"] <- c(weight9[r.n.w,paste(6,sep="")])
w[r.n.w,"Merd10","Merd6"] <- c(weight10[r.n.w,paste(6,sep="")])
w[r.n.w,"Merd1","Merd7"] <- c(weight7[r.n.w,paste(1,sep="")])
w[r.n.w,"Merd7","Merd7"] <- c(weight7[r.n.w,paste(7,sep="")])
w[r.n.w,"Merd7","Merd0"] <- c(weight7[r.n.w,paste(0,sep="")])
w[r.n.w,"Merd2","Merd7"] <- c(weight2[r.n.w,paste(7,sep="")])
w[r.n.w,"Merd3","Merd7"] <- c(weight3[r.n.w,paste(7,sep="")])
w[r.n.w,"Merd4","Merd7"] <- c(weight4[r.n.w,paste(7,sep="")])
w[r.n.w,"Merd6","Merd7"] <- c(weight6[r.n.w,paste(7,sep="")])
w[r.n.w,"Merd8","Merd7"] <- c(weight8[r.n.w,paste(7,sep="")])
w[r.n.w,"Merd9","Merd7"] <- c(weight9[r.n.w,paste(7,sep="")])
w[r.n.w,"Merd10","Merd7"] <- c(weight10[r.n.w,paste(7,sep="")])
w[r.n.w,"Merd1","Merd8"] <- c(weight8[r.n.w,paste(1,sep="")])
w[r.n.w,"Merd8","Merd8"] <- c(weight8[r.n.w,paste(8,sep="")])
w[r.n.w,"Merd8","Merd0"] <- c(weight8[r.n.w,paste(0,sep="")])
w[r.n.w,"Merd2","Merd8"] <- c(weight2[r.n.w,paste(8,sep="")])
w[r.n.w,"Merd3","Merd8"] <- c(weight3[r.n.w,paste(8,sep="")])
w[r.n.w,"Merd4","Merd8"] <- c(weight4[r.n.w,paste(8,sep="")])
w[r.n.w,"Merd6","Merd8"] <- c(weight6[r.n.w,paste(8,sep="")])
w[r.n.w,"Merd7","Merd8"] <- c(weight7[r.n.w,paste(8,sep="")])
w[r.n.w,"Merd9","Merd8"] <- c(weight9[r.n.w,paste(8,sep="")])
w[r.n.w,"Merd10","Merd8"] <- c(weight10[r.n.w,paste(8,sep="")])
w[r.n.w,"Merd1","Merd9"] <- c(weight9[r.n.w,paste(1,sep="")])
w[r.n.w,"Merd9","Merd9"] <- c(weight9[r.n.w,paste(9,sep="")])
w[r.n.w,"Merd9","Merd0"] <- c(weight9[r.n.w,paste(0,sep="")])
w[r.n.w,"Merd2","Merd9"] <- c(weight2[r.n.w,paste(9,sep="")])
w[r.n.w,"Merd3","Merd9"] <- c(weight3[r.n.w,paste(9,sep="")])
w[r.n.w,"Merd4","Merd9"] <- c(weight4[r.n.w,paste(9,sep="")])
w[r.n.w,"Merd6","Merd9"] <- c(weight6[r.n.w,paste(9,sep="")])
w[r.n.w,"Merd7","Merd9"] <- c(weight7[r.n.w,paste(9,sep="")])
w[r.n.w,"Merd8","Merd9"] <- c(weight8[r.n.w,paste(9,sep="")])
w[r.n.w,"Merd10","Merd9"] <- c(weight10[r.n.w,paste(9,sep="")])
w[r.n.w,"Merd1","Merd10"] <- c(weight10[r.n.w,paste(1,sep="")])
w[r.n.w,"Merd10","Merd10"] <- c(weight10[r.n.w,paste(10,sep="")])
w[r.n.w,"Merd10","Merd0"] <- c(weight10[r.n.w,paste(0,sep="")])
w[r.n.w,"Merd2","Merd10"] <- c(weight2[r.n.w,paste(10,sep="")])
w[r.n.w,"Merd3","Merd10"] <- c(weight3[r.n.w,paste(10,sep="")])
w[r.n.w,"Merd4","Merd10"] <- c(weight4[r.n.w,paste(10,sep="")])
w[r.n.w,"Merd6","Merd10"] <- c(weight6[r.n.w,paste(10,sep="")])
w[r.n.w,"Merd7","Merd10"] <- c(weight7[r.n.w,paste(10,sep="")])
w[r.n.w,"Merd8","Merd10"] <- c(weight8[r.n.w,paste(10,sep="")])
w[r.n.w,"Merd9","Merd10"] <- c(weight9[r.n.w,paste(10,sep="")])
return(list(w=w,antall=antall))
}
}
| /R/MagnesRutiner/make.weight.r | no_license | Kotkot/RecaSimfish | R | false | false | 17,070 | r | #source("/nr/project/stat/Smitte/Lakselus_stad/Rfunc-magne/make.weight.new.r")
make.weight <- function(antall0,datadir.lok){
#moving fish from one cage to another
#We have an array of dimension 3.
#First dimension is time
#Second dimension is the cage we move from and
#third dimension is the dimension we move to.
#cage 0 means fish that are slaugtered, while cage 1 are fish that are "utsett"
### if (datadir.lok!="2009-2010") {
if (datadir.lok!="Langskjaera0910") {
stop("make.weight ikke generell ennå")
}
### if(datadir.lok=="2009-2010"){
if(datadir.lok=="Langskjaera0910"){
ii<-0
for(i in c(2,3,4,6,7,8,9,10)){
ii<-ii+1
colnames(antall0)[ii]<-paste("Antall",i,sep="")
}
row.names0 <- rownames(antall0)
row.names <- c(paste(as.numeric(row.names0[1])-1,sep=""),row.names0)
n <- length(antall0[,1])
antall <- matrix(0,ncol=16,nrow=nrow(antall0)+1, dimnames=list(row.names,c(paste("Antall1.",c(2,3,4,6,7,8,9,10),sep=""),paste("Antall",c(2,3,4,6,7,8,9,10),sep=""))))
for(i in c(2,3,4,6,7,8,9,10)){
col1 <- paste("Antall",i,sep="")
antall[row.names0,col1] <- c(antall0[,col1])
}
antall["20090504",c("Antall1.3")] <- 360500+3871 #3871 er lagt til for at det ikke blir feil ved flytting den 20091106
antall["20090511",c("Antall1.2")] <- c(424651)
antall["20090521",c("Antall1.8")] <- c(422000)
antall["20090505",c("Antall1.9")] <- c(404730)
utsett.antall <- matrix(NA,ncol=2,nrow=4,dimnames=list(c("20090511","20090504","20090521","20090505"),c("Antall","Merd")))
utsett.antall["20090504",] <- c(360500,3)
utsett.antall["20090511",] <- c(424651,2)
utsett.antall["20090521",] <- c(422000,8)
utsett.antall["20090505",] <- c(404730,9)
weight2 <- matrix(0,ncol=length(c(0,1,2,3,4,6,7,8,9,10)),nrow=dim(antall0)[1]+1, dimnames=list(row.names,paste(c(0,1,2,3,4,6,7,8,9,10))))
weight2 <- weight2[-(n+1),]
weight3 <- matrix(0,ncol=length(c(0,1,2,3,4,6,7,8,9,10)),nrow=dim(antall0)[1]+1, dimnames=list(row.names,paste(c(0,1,2,3,4,6,7,8,9,10))))
weight3 <- weight3[-(n+1),]
weight4 <- matrix(0,ncol=length(c(0,1,2,3,4,6,7,8,9,10)),nrow=dim(antall0)[1]+1, dimnames=list(row.names,paste(c(0,1,2,3,4,6,7,8,9,10))))
weight4 <- weight4[-(n+1),]
weight6 <- matrix(0,ncol=length(c(0,1,2,3,4,6,7,8,9,10)),nrow=dim(antall0)[1]+1, dimnames=list(row.names,paste(c(0,1,2,3,4,6,7,8,9,10))))
weight6 <- weight6[-(n+1),]
weight7 <- matrix(0,ncol=length(c(0,1,2,3,4,6,7,8,9,10)),nrow=dim(antall0)[1]+1, dimnames=list(row.names,paste(c(0,1,2,3,4,6,7,8,9,10))))
weight7 <- weight7[-(n+1),]
weight8 <- matrix(0,ncol=length(c(0,1,2,3,4,6,7,8,9,10)),nrow=dim(antall0)[1]+1, dimnames=list(row.names,paste(c(0,1,2,3,4,6,7,8,9,10))))
weight8 <- weight8[-(n+1),]
weight9 <- matrix(0,ncol=length(c(0,1,2,3,4,6,7,8,9,10)),nrow=dim(antall0)[1]+1, dimnames=list(row.names,paste(c(0,1,2,3,4,6,7,8,9,10))))
weight9 <- weight9[-(n+1),]
weight10<- matrix(0,ncol=length(c(0,1,2,3,4,6,7,8,9,10)),nrow=dim(antall0)[1]+1,dimnames=list(row.names,paste(c(0,1,2,3,4,6,7,8,9,10))))
weight10<- weight10[-(n+1),]
row.names0 <- rownames(antall0[-n,])
w <- c(antall0[-1,"Antall2"]/antall0[-n,"Antall2"])
weight2["20090511","1"] <- 1
weight2[row.names0,"2"] <- w
weight2[row.names0,"0"] <- (1-w)
w <- c(antall0[-1,"Antall2"]/antall0[-n,"Antall2"])
weight2["20090511","1"] <- 1
weight2[row.names0,"2"] <- w
weight2[row.names0,"0"] <- (1-w)
w <- c(antall0[-1,"Antall3"]/antall0[-n,"Antall3"])
weight3["20090504","1"] <- 1
weight3[row.names0,"3"] <- w
weight3[row.names0,"0"] <- 1-w
w <- c(antall0[-1,"Antall4"]/antall0[-n,"Antall4"])
weight4[row.names0,"4"] <- w
weight4[row.names0,"0"] <- 1-w
w <- c(antall0[-1,"Antall6"]/antall0[-n,"Antall6"])
weight6[row.names0,"6"] <- w
weight6[row.names0,"0"] <- 1-w
w <- c(antall0[-1,"Antall7"]/antall0[-n,"Antall7"])
weight7[row.names0,"7"] <- w
weight7[row.names0,"0"] <- 1-w
w <- c(antall0[-1,"Antall8"]/antall0[-n,"Antall8"])
weight8["20090521","1"] <- 1
weight8[row.names0,"8"] <- w
weight8[row.names0,"0"] <- 1-w
w <- c(antall0[-1,"Antall9"]/antall0[-n,"Antall9"])
weight9["20090505","1"] <- 1
weight9[row.names0,"9"] <- w
weight9[row.names0,"0"] <- 1-w
w <- c(antall0[-1,"Antall10"]/antall0[-n,"Antall10"])
weight10[row.names0,"10"] <- w
weight10[row.names0,"0"] <- 1-w
id <- antall0[,"Antall2"]==0
weight2[id,"2"] <- 0
id <- weight2==Inf | weight2==-Inf | is.na(weight2)
weight2[id] <- 0
id <- antall0[,"Antall3"]==0
weight3[id,"3"] <- 0
id <- weight3==Inf | weight3==-Inf | is.na(weight3)
weight3[id] <- 0
id <- antall0[,"Antall4"]==0
weight4[id,"4"] <- 0
id <- weight4==Inf | weight4==-Inf | is.na(weight4)
weight4[id] <- 0
id <- antall0[,"Antall6"]==0
weight6[id,"6"] <- 0
id <- weight6==Inf | weight6==-Inf | is.na(weight6)
weight6[id] <- 0
id <- antall0[,"Antall7"]==0
weight7[id,"7"] <- 0
id <- weight7==Inf | weight7==-Inf | is.na(weight7)
weight7[id] <- 0
id <- antall0[,"Antall8"]==0
weight8[id,"8"] <- 0
id <- weight8==Inf | weight8==-Inf | is.na(weight8)
weight8[id] <- 0
id <- antall0[,"Antall9"]==0
weight9[id,"9"] <- 0
id <- weight9==Inf | weight9==-Inf | is.na(weight9)
weight9[id] <- 0
id <- antall0[,"Antall10"]==0
weight10[id,"10"] <- 0
id <- weight10==Inf | weight10==-Inf | is.na(weight10)
weight10[id] <- 0
#utsett
weight2["20090511","1"] <- 1
weight3["20090504","1"] <- 1
weight8["20090521","1"] <- 1
weight9["20090505","1"] <- 1
q <- antall0["20100522","Antall7"]/antall0["20100521","Antall7"]
weight7["20100521","7"] <- q
weight7["20100521","10"] <- 1-q
weight7["20100521","0"] <- 0
q <- antall0["20100701","Antall8"]/antall0["20100630","Antall8"]
q1 <- antall0["20100701","Antall2"]/((1-q)*antall0["20100630","Antall8"])
weight8["20100630","8"] <- q
weight8["20100630","2"] <- (1-q)*q1
weight8["20100630","0"] <- (1-q)*(1-q1)
q1 <- antall0["20100325","Antall2"]/(antall0["20100324","Antall2"])
q3 <- antall0["20100325","Antall8"]/(antall0["20100324","Antall8"])
q2 <- (antall0["20100325","Antall8"]-antall0["20100324","Antall8"])/((1-q1)*antall0["20100324","Antall2"]+1*antall0["20100324","Antall10"])
q4 <- antall0["20100325","Antall8"]/((1-q1)*antall0["20100324","Antall2"]+antall0["20100324","Antall10"]+antall0["20100324","Antall8"])
weight2["20100324","8"] <- (1-q1)*q4
weight2["20100324","0"] <- (1-q1)*(1-q4)
weight8["20100324","8"] <- q4
weight8["20100324","0"] <- (1-q4)
weight10["20100324","8"] <- q4
q <- (antall0["20100325","Antall6"]-antall0["20100324","Antall6"])/((1-q4)*antall0["20100324","Antall10"])
weight10["20100324","0"] <- (1-q4)*(1-q)
weight10["20100324","6"] <- (1-q4)*q
weight6["20100324","6"] <- 1
weight6["20100324","0"] <- 0
q <- antall0["20101110","Antall3"]/antall0["20101109","Antall3"]
weight3["20101109","3"] <- q
weight3["20101109","0"] <- (1-q)
q <- antall0["20101111","Antall3"]/antall0["20101110","Antall3"]
weight3["20101110","3"] <- q
weight3["20101110","0"] <- (1-q)
weight3["20101111","0"] <- 1
q <- (antall0["20100819","Antall3"])/antall0["20100818","Antall3"]
q1 <- (antall0["20100819","Antall9"])/antall0["20100818","Antall9"]
q2 <- antall0["20100819","Antall6"]/((1-q)*antall0["20100818","Antall3"]+(1-q1)*antall0["20100818","Antall9"])
weight3["20100818","3"] <- q
weight3["20100818","6"] <- (1-q)*q2
weight3["20100818","0"] <- (1-q)*(1-q2)
weight9["20100818","9"] <- q1
weight9["20100818","6"] <- (1-q1)*q2
weight9["20100818","0"] <- (1-q1)*(1-q2)
q <- antall0["20100812","Antall4"]/antall0["20100811","Antall4"]
weight4["20100811","4"] <- q
weight4["20100811","0"] <- (1-q)
q <- antall0["20100813","Antall4"]/antall0["20100812","Antall4"]
weight4["20100812","4"] <- q
weight4["20100812","0"] <- (1-q)
q <- antall0["20100914","Antall4"]/antall0["20100913","Antall4"]
weight4["20100913","4"] <- q
weight4["20100913","0"] <- (1-q)
q <- antall0["20100915","Antall4"]/antall0["20100914","Antall4"]
weight4["20100914","4"] <- q
weight4["20100914","0"] <- (1-q)
q <- antall0["20100916","Antall4"]/antall0["20100915","Antall4"]
weight4["20100915","4"] <- q
weight4["20100915","0"] <- (1-q)
weight4["20100916","0"] <- 1
q <- antall0["20100813","Antall6"]/antall0["20100812","Antall6"]
weight6["20100812","6"] <- q
weight6["20100812","0"] <- (1-q)
q <- antall0["20100816","Antall6"]/antall0["20100815","Antall6"]
weight6["20100815","6"] <- q
weight6["20100815","0"] <- (1-q)
q <- antall0["20100817","Antall6"]/antall0["20100816","Antall6"]
weight6["20100816","6"] <- q
weight6["20100816","0"] <- (1-q)
weight6["20100817","0"] <- 1
q <- antall0["20100907","Antall7"]/antall0["20100906","Antall7"]
weight7["20100906","7"] <- q
weight7["20100906","0"] <- (1-q)
q <- antall0["20100908","Antall7"]/antall0["20100907","Antall7"]
weight7["20100907","7"] <- q
weight7["20100907","0"] <- (1-q)
weight7["20100908","0"] <- 1
q <- antall0["20100907","Antall7"]/antall0["20100906","Antall7"]
weight7["20100906","7"] <- q
weight7["20100906","0"] <- (1-q)
qq22 <- antall0["20091124","Antall2"]/antall0["20091123","Antall2"]
qq88 <- antall0["20091124","Antall8"]/(antall0["20091123","Antall8"])
weight2["20091123","2"] <- qq22 # 166494 antall fisk i Merd2
weight2["20091123","3"] <- (1-qq22)*0.5 # 120385 antall fisk flyttet fra merd2 til merd3
weight2["20091123","4"] <- (1-qq22)*0.5 # 120385 antall fisk flyttet fra merd2 til merd4
weight2["20091123","0"] <- 0
q4 <- (antall0["20091124","Antall4"]-(1-qq22)*0.5*antall0["20091123","Antall2"])/((1-qq88)*antall0["20091123","Antall8"])
q3 <- (antall0["20091124","Antall3"]-(1-qq22)*0.5*antall0["20091123","Antall2"])/((1-qq88)*antall0["20091123","Antall8"])
weight8["20091123","8"] <- qq88 # 168859 antall fisk flyttet fra merd8 til merd8
weight8["20091123","3"] <- (1-qq88)*q3 # 121013 antall fisk flyttet fra merd8 til merd3
weight8["20091123","4"] <- (1-qq88)*q4 # 42489 antall fisk flyttet fra merd8 til merd3
weight8["20091123","0"] <- (1-qq88)*(1-q3-q4) # 3044 antall fisk flyttet fra merd8 til merd0
q <- antall0["20091107","Antall9"]/antall0["20091106","Antall9"]
q1 <- antall0["20091107","Antall10"]/((1-q)*antall0["20091106","Antall9"])
weight9["20091106","9"] <- q
weight9["20091106","10"] <- (1-q)*q1
weight9["20091106","6"] <- (1-q)*(1-q1)
weight9["20091106","0"] <- 0
q0 <- antall0["20091107","Antall7"]/antall0["20091106","Antall3"]
weight3["20091106","7"] <- q0
weight3["20091106","6"] <- (1-q0)
weight3["20091106","0"] <- 0
row.names.w <- rownames(weight2)
w <- array(0,c(length(weight2[,1])+1,8+1,8+1))
n <- length(antall0[,1])
row.names0 <- rownames(antall0)
row.names <- c(paste(as.numeric(row.names0[1])-1,sep=""),row.names0)
dimnames(w) <- list(row.names,paste("Merd",c(1,2:4,6:10),sep=""),paste("Merd",c(0,2:4,6:10),sep=""))
row.names.w <- dimnames(weight2)[[1]]
r.n.w <- rownames(w)[is.element(rownames(w),row.names.w)]
w[r.n.w,"Merd1","Merd2"] <- c(weight2[r.n.w,paste(1,sep="")])
w[r.n.w,"Merd2","Merd2"] <- c(weight2[r.n.w,paste(2,sep="")])
w[r.n.w,"Merd2","Merd0"] <- c(weight2[r.n.w,paste(0,sep="")])
w[r.n.w,"Merd3","Merd2"] <- c(weight3[r.n.w,paste(2,sep="")])
w[r.n.w,"Merd4","Merd2"] <- c(weight4[r.n.w,paste(2,sep="")])
w[r.n.w,"Merd6","Merd2"] <- c(weight6[r.n.w,paste(2,sep="")])
w[r.n.w,"Merd7","Merd2"] <- c(weight7[r.n.w,paste(2,sep="")])
w[r.n.w,"Merd8","Merd2"] <- c(weight8[r.n.w,paste(2,sep="")])
w[r.n.w,"Merd9","Merd2"] <- c(weight9[r.n.w,paste(2,sep="")])
w[r.n.w,"Merd10","Merd2"] <- c(weight10[r.n.w,paste(2,sep="")])
w[r.n.w,"Merd1","Merd3"] <- c(weight3[r.n.w,paste(1,sep="")])
w[r.n.w,"Merd3","Merd3"] <- c(weight3[r.n.w,paste(3,sep="")])
w[r.n.w,"Merd3","Merd0"] <- c(weight3[r.n.w,paste(0,sep="")])
w[r.n.w,"Merd2","Merd3"] <- c(weight2[r.n.w,paste(3,sep="")])
w[r.n.w,"Merd4","Merd3"] <- c(weight4[r.n.w,paste(3,sep="")])
w[r.n.w,"Merd6","Merd3"] <- c(weight6[r.n.w,paste(3,sep="")])
w[r.n.w,"Merd7","Merd3"] <- c(weight7[r.n.w,paste(3,sep="")])
w[r.n.w,"Merd8","Merd3"] <- c(weight8[r.n.w,paste(3,sep="")])
w[r.n.w,"Merd9","Merd3"] <- c(weight9[r.n.w,paste(3,sep="")])
w[r.n.w,"Merd10","Merd3"] <- c(weight10[r.n.w,paste(3,sep="")])
w[r.n.w,"Merd1","Merd4"] <- c(weight4[r.n.w,paste(1,sep="")])
w[r.n.w,"Merd4","Merd4"] <- c(weight4[r.n.w,paste(4,sep="")])
w[r.n.w,"Merd4","Merd0"] <- c(weight4[r.n.w,paste(0,sep="")])
w[r.n.w,"Merd2","Merd4"] <- c(weight2[r.n.w,paste(4,sep="")])
w[r.n.w,"Merd3","Merd4"] <- c(weight3[r.n.w,paste(4,sep="")])
w[r.n.w,"Merd6","Merd4"] <- c(weight6[r.n.w,paste(4,sep="")])
w[r.n.w,"Merd7","Merd4"] <- c(weight7[r.n.w,paste(4,sep="")])
w[r.n.w,"Merd8","Merd4"] <- c(weight8[r.n.w,paste(4,sep="")])
w[r.n.w,"Merd9","Merd4"] <- c(weight9[r.n.w,paste(4,sep="")])
w[r.n.w,"Merd10","Merd4"] <- c(weight10[r.n.w,paste(4,sep="")])
w[r.n.w,"Merd1","Merd6"] <- c(weight6[r.n.w,paste(1,sep="")])
w[r.n.w,"Merd6","Merd6"] <- c(weight6[r.n.w,paste(6,sep="")])
w[r.n.w,"Merd6","Merd0"] <- c(weight6[r.n.w,paste(0,sep="")])
w[r.n.w,"Merd2","Merd6"] <- c(weight2[r.n.w,paste(6,sep="")])
w[r.n.w,"Merd3","Merd6"] <- c(weight3[r.n.w,paste(6,sep="")])
w[r.n.w,"Merd4","Merd6"] <- c(weight4[r.n.w,paste(6,sep="")])
w[r.n.w,"Merd7","Merd6"] <- c(weight7[r.n.w,paste(6,sep="")])
w[r.n.w,"Merd8","Merd6"] <- c(weight8[r.n.w,paste(6,sep="")])
w[r.n.w,"Merd9","Merd6"] <- c(weight9[r.n.w,paste(6,sep="")])
w[r.n.w,"Merd10","Merd6"] <- c(weight10[r.n.w,paste(6,sep="")])
w[r.n.w,"Merd1","Merd7"] <- c(weight7[r.n.w,paste(1,sep="")])
w[r.n.w,"Merd7","Merd7"] <- c(weight7[r.n.w,paste(7,sep="")])
w[r.n.w,"Merd7","Merd0"] <- c(weight7[r.n.w,paste(0,sep="")])
w[r.n.w,"Merd2","Merd7"] <- c(weight2[r.n.w,paste(7,sep="")])
w[r.n.w,"Merd3","Merd7"] <- c(weight3[r.n.w,paste(7,sep="")])
w[r.n.w,"Merd4","Merd7"] <- c(weight4[r.n.w,paste(7,sep="")])
w[r.n.w,"Merd6","Merd7"] <- c(weight6[r.n.w,paste(7,sep="")])
w[r.n.w,"Merd8","Merd7"] <- c(weight8[r.n.w,paste(7,sep="")])
w[r.n.w,"Merd9","Merd7"] <- c(weight9[r.n.w,paste(7,sep="")])
w[r.n.w,"Merd10","Merd7"] <- c(weight10[r.n.w,paste(7,sep="")])
w[r.n.w,"Merd1","Merd8"] <- c(weight8[r.n.w,paste(1,sep="")])
w[r.n.w,"Merd8","Merd8"] <- c(weight8[r.n.w,paste(8,sep="")])
w[r.n.w,"Merd8","Merd0"] <- c(weight8[r.n.w,paste(0,sep="")])
w[r.n.w,"Merd2","Merd8"] <- c(weight2[r.n.w,paste(8,sep="")])
w[r.n.w,"Merd3","Merd8"] <- c(weight3[r.n.w,paste(8,sep="")])
w[r.n.w,"Merd4","Merd8"] <- c(weight4[r.n.w,paste(8,sep="")])
w[r.n.w,"Merd6","Merd8"] <- c(weight6[r.n.w,paste(8,sep="")])
w[r.n.w,"Merd7","Merd8"] <- c(weight7[r.n.w,paste(8,sep="")])
w[r.n.w,"Merd9","Merd8"] <- c(weight9[r.n.w,paste(8,sep="")])
w[r.n.w,"Merd10","Merd8"] <- c(weight10[r.n.w,paste(8,sep="")])
w[r.n.w,"Merd1","Merd9"] <- c(weight9[r.n.w,paste(1,sep="")])
w[r.n.w,"Merd9","Merd9"] <- c(weight9[r.n.w,paste(9,sep="")])
w[r.n.w,"Merd9","Merd0"] <- c(weight9[r.n.w,paste(0,sep="")])
w[r.n.w,"Merd2","Merd9"] <- c(weight2[r.n.w,paste(9,sep="")])
w[r.n.w,"Merd3","Merd9"] <- c(weight3[r.n.w,paste(9,sep="")])
w[r.n.w,"Merd4","Merd9"] <- c(weight4[r.n.w,paste(9,sep="")])
w[r.n.w,"Merd6","Merd9"] <- c(weight6[r.n.w,paste(9,sep="")])
w[r.n.w,"Merd7","Merd9"] <- c(weight7[r.n.w,paste(9,sep="")])
w[r.n.w,"Merd8","Merd9"] <- c(weight8[r.n.w,paste(9,sep="")])
w[r.n.w,"Merd10","Merd9"] <- c(weight10[r.n.w,paste(9,sep="")])
w[r.n.w,"Merd1","Merd10"] <- c(weight10[r.n.w,paste(1,sep="")])
w[r.n.w,"Merd10","Merd10"] <- c(weight10[r.n.w,paste(10,sep="")])
w[r.n.w,"Merd10","Merd0"] <- c(weight10[r.n.w,paste(0,sep="")])
w[r.n.w,"Merd2","Merd10"] <- c(weight2[r.n.w,paste(10,sep="")])
w[r.n.w,"Merd3","Merd10"] <- c(weight3[r.n.w,paste(10,sep="")])
w[r.n.w,"Merd4","Merd10"] <- c(weight4[r.n.w,paste(10,sep="")])
w[r.n.w,"Merd6","Merd10"] <- c(weight6[r.n.w,paste(10,sep="")])
w[r.n.w,"Merd7","Merd10"] <- c(weight7[r.n.w,paste(10,sep="")])
w[r.n.w,"Merd8","Merd10"] <- c(weight8[r.n.w,paste(10,sep="")])
w[r.n.w,"Merd9","Merd10"] <- c(weight9[r.n.w,paste(10,sep="")])
return(list(w=w,antall=antall))
}
}
|
library(ggplot2)
NEI <- readRDS("data/summarySCC_PM25.rds")
SCC <- readRDS("data/Source_Classification_Code.rds")
SCCcoal <- SCC[grepl("coal", SCC$Short.Name, ignore.case = T),]
NEIcoal <- NEI[NEI$SCC %in% SCCcoal$SCC,]
totalCoal <- aggregate(Emissions ~ year + type, NEIcoal, sum)
png("plot4.png",height=480,width=480)
ggplot(totalCoal, aes(year, Emissions, col = type)) +
geom_line() +
geom_point() +
ggtitle(expression("Total US" ~ PM[2.5] ~ "Coal Emission by Type and Year")) +
xlab("Year") +
ylab(expression("US " ~ PM[2.5] ~ "Coal Emission")) +
scale_colour_discrete(name = "Type of sources") +
theme(legend.title = element_text(face = "bold"))
dev.off() | /week4/plot4.R | no_license | parthrai/exploratory-data-analysis | R | false | false | 706 | r | library(ggplot2)
NEI <- readRDS("data/summarySCC_PM25.rds")
SCC <- readRDS("data/Source_Classification_Code.rds")
SCCcoal <- SCC[grepl("coal", SCC$Short.Name, ignore.case = T),]
NEIcoal <- NEI[NEI$SCC %in% SCCcoal$SCC,]
totalCoal <- aggregate(Emissions ~ year + type, NEIcoal, sum)
png("plot4.png",height=480,width=480)
ggplot(totalCoal, aes(year, Emissions, col = type)) +
geom_line() +
geom_point() +
ggtitle(expression("Total US" ~ PM[2.5] ~ "Coal Emission by Type and Year")) +
xlab("Year") +
ylab(expression("US " ~ PM[2.5] ~ "Coal Emission")) +
scale_colour_discrete(name = "Type of sources") +
theme(legend.title = element_text(face = "bold"))
dev.off() |
\name{source_gist}
\alias{source_gist}
\title{Run a script on gist}
\usage{
source_gist(entry, ...)
}
\arguments{
\item{entry}{either full url (character), gist ID
(numeric or character of numeric). If only an entry ID is
specified and the entry has multiple code block, the
first entry is sourced.}
\item{...}{other options passed to \code{\link{source}}}
}
\description{
\dQuote{Gist is a simple way to share snippets and pastes
with others. All gists are git repositories, so they are
automatically versioned, forkable and usable as a git
repository.} \url{https://gist.github.com/}
}
\details{
A gist entry can have multiple code blocks (one file for
one block). Gist is based on git, which means gist has
commit histories (i.e., revisions). You can specify a
commit by giving SHA.
}
\examples{
\dontrun{
source_gist(1654919)
source_gist("1654919")
source_gist("https://gist.github.com/1654919")
source_gist("https://gist.github.com/kohske/1654919")
source_gist("gist.github.com/1654919")
source_gist("https://raw.github.com/gist/1654919/8161f74fb0ec26d1ba9fd54473a96f768ed76f56/test2.r")
}
}
| /devtools/man/source_gist.Rd | no_license | radfordneal/R-package-mods | R | false | false | 1,128 | rd | \name{source_gist}
\alias{source_gist}
\title{Run a script on gist}
\usage{
source_gist(entry, ...)
}
\arguments{
\item{entry}{either full url (character), gist ID
(numeric or character of numeric). If only an entry ID is
specified and the entry has multiple code block, the
first entry is sourced.}
\item{...}{other options passed to \code{\link{source}}}
}
\description{
\dQuote{Gist is a simple way to share snippets and pastes
with others. All gists are git repositories, so they are
automatically versioned, forkable and usable as a git
repository.} \url{https://gist.github.com/}
}
\details{
A gist entry can have multiple code blocks (one file for
one block). Gist is based on git, which means gist has
commit histories (i.e., revisions). You can specify a
commit by giving SHA.
}
\examples{
\dontrun{
source_gist(1654919)
source_gist("1654919")
source_gist("https://gist.github.com/1654919")
source_gist("https://gist.github.com/kohske/1654919")
source_gist("gist.github.com/1654919")
source_gist("https://raw.github.com/gist/1654919/8161f74fb0ec26d1ba9fd54473a96f768ed76f56/test2.r")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/files.R
\name{list_files_in_folder}
\alias{list_files_in_folder}
\title{Get list of files from a folder}
\usage{
list_files_in_folder(id = "root", page_size = NULL, page_token = NULL,
order_by = NULL, spaces = NULL, corpus = NULL)
}
\arguments{
\item{id}{ID of the drive folder}
\item{page_size}{Optional. The maximum number of files to return per page. Acceptable values are 1 to 1000,
inclusive. (Default: 100)}
\item{page_token}{Optional. The token for continuing a previous list request on the next page. This should be
set to the value of 'nextPageToken' from the previous response.}
\item{order_by}{Optional. A comma-separated list of sort keys. Valid keys are 'createdTime', 'folder',
'modifiedByMeTime', 'modifiedTime', 'name', 'quotaBytesUsed', 'recency', 'sharedWithMeTime',
'starred', and 'viewedByMeTime'. Each key sorts ascending by default, but may be reversed with the
'desc' modifier. Example usage: ?order_by=folder,modifiedTime desc,name.}
\item{spaces}{Optional. A comma-separated list of spaces to query within the corpus. Supported
values are 'drive', 'appDataFolder' and 'photos'.}
\item{corpus}{Optional. The source of files to list. Acceptable values are domain and user}
}
\description{
Get list of files from a folder
}
\examples{
\dontrun{
library(googledrive)
authorize()
# Folder id is 0XXXXXXXX
list_files_in_folder('0XXXXXXXX')
# If id is not specified, list of files would be obtained from root Google drive folder
list_files_in_folder()
}
}
| /man/list_files_in_folder.Rd | permissive | hairizuanbinnoorazman/googledrive | R | false | true | 1,562 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/files.R
\name{list_files_in_folder}
\alias{list_files_in_folder}
\title{Get list of files from a folder}
\usage{
list_files_in_folder(id = "root", page_size = NULL, page_token = NULL,
order_by = NULL, spaces = NULL, corpus = NULL)
}
\arguments{
\item{id}{ID of the drive folder}
\item{page_size}{Optional. The maximum number of files to return per page. Acceptable values are 1 to 1000,
inclusive. (Default: 100)}
\item{page_token}{Optional. The token for continuing a previous list request on the next page. This should be
set to the value of 'nextPageToken' from the previous response.}
\item{order_by}{Optional. A comma-separated list of sort keys. Valid keys are 'createdTime', 'folder',
'modifiedByMeTime', 'modifiedTime', 'name', 'quotaBytesUsed', 'recency', 'sharedWithMeTime',
'starred', and 'viewedByMeTime'. Each key sorts ascending by default, but may be reversed with the
'desc' modifier. Example usage: ?order_by=folder,modifiedTime desc,name.}
\item{spaces}{Optional. A comma-separated list of spaces to query within the corpus. Supported
values are 'drive', 'appDataFolder' and 'photos'.}
\item{corpus}{Optional. The source of files to list. Acceptable values are domain and user}
}
\description{
Get list of files from a folder
}
\examples{
\dontrun{
library(googledrive)
authorize()
# Folder id is 0XXXXXXXX
list_files_in_folder('0XXXXXXXX')
# If id is not specified, list of files would be obtained from root Google drive folder
list_files_in_folder()
}
}
|
## Reading the data in from the course website.
fileURL<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL, "household_power_consumption.zip", method='curl')
data<-read.table(unz('household_power_consumption.zip', 'household_power_consumption.txt'),
sep=";", header=TRUE, na.strings='?', colClasses=c('character', 'character', rep('numeric',7)))
## Add a new column with the date and time and format as POSIXlt.
data$DateTime<-paste(data$Date, data$Time)
data$DateTime<-strptime(data$DateTime, '%d/%m/%Y %H:%M:%S')
## Keep only the data from February 1-2, 2007.
keep1<-format(data$DateTime, '%Y')=='2007' & format(data$DateTime, '%m')=='02'
data<-data[keep1,]
keep2<-format(data$DateTime, '%d')=='01' | format(data$DateTime, '%d')=='02'
data<-data[keep2,]
## Create the plot and save to png file.
png(file='plot3.png', width=480, height=480)
plot(data$DateTime, data$Sub_metering_1, type='n', xlab="", ylab="Energy sub metering")
lines(data$DateTime, data$Sub_metering_1)
lines(data$DateTime, data$Sub_metering_2, col='red')
lines(data$DateTime, data$Sub_metering_3, col='blue')
legend('topright', legend=c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'),
lty=c(1,1,1), lwd=c(2.5,2.5, 2.5), col=c("black","red", "blue") )
dev.off() | /plot3.r | no_license | m2d4dj/ExData_Plotting1 | R | false | false | 1,327 | r | ## Reading the data in from the course website.
fileURL<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL, "household_power_consumption.zip", method='curl')
data<-read.table(unz('household_power_consumption.zip', 'household_power_consumption.txt'),
sep=";", header=TRUE, na.strings='?', colClasses=c('character', 'character', rep('numeric',7)))
## Add a new column with the date and time and format as POSIXlt.
data$DateTime<-paste(data$Date, data$Time)
data$DateTime<-strptime(data$DateTime, '%d/%m/%Y %H:%M:%S')
## Keep only the data from February 1-2, 2007.
keep1<-format(data$DateTime, '%Y')=='2007' & format(data$DateTime, '%m')=='02'
data<-data[keep1,]
keep2<-format(data$DateTime, '%d')=='01' | format(data$DateTime, '%d')=='02'
data<-data[keep2,]
## Create the plot and save to png file.
png(file='plot3.png', width=480, height=480)
plot(data$DateTime, data$Sub_metering_1, type='n', xlab="", ylab="Energy sub metering")
lines(data$DateTime, data$Sub_metering_1)
lines(data$DateTime, data$Sub_metering_2, col='red')
lines(data$DateTime, data$Sub_metering_3, col='blue')
legend('topright', legend=c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'),
lty=c(1,1,1), lwd=c(2.5,2.5, 2.5), col=c("black","red", "blue") )
dev.off() |
## What Gaul Has
## Editing Author: oldenkam
## Date: 3/14/2017
## Purpose:
## whatGaulHas is a function that returns a closest string match to a user's string. This function returns that match's
## relavant information. The first argument, country is a correctly spelled and correctly cased Admin 0 name string that subsets
## possible matches to increase spatial accuracy and curate results. The second argument, input is a string the user is seeking
## to validate against our dictionary of existing strings. near is an optional argument that still includes close matches
## even if a exact match was detected. recent is an optional arguement that curates guesses to only the most recent year that it exists.
#########################################################################################################
### Step 1: Script Prep #################################################################################
## Set directory and load libraries
rm(list=ls())
pacman::p_load(data.table, ggplot2, magrittr, stringr, plyr, Hmisc, dplyr, fuzzyjoin)
## OS locals
os <- .Platform$OS.type
if (os == "windows") {
j <- "J:/"
} else {
j <- "/home/j/"
}
work_dir <- paste0(j, "temp/oldena/projects/AdminDictionary")
setwd(work_dir)
### Step 2: Import Data #################################################################################
### Define function #####################################################################################
whatGaulHas <- function(input, country, near = FALSE, recent = TRUE, strDist = 2){
if(exists("GaulDict")){
gauler <- GaulDict
if(hasArg("input")){
if(hasArg("country")){
if(is.numeric(country) & country %in% gauler$ADM0_CODE){
gauler <- gauler[gauler$ADM0_CODE == country,]
}else if(country %in% gauler$ADM0_NAME){
gauler <- gauler[gauler$ADM0_NAME == country,]
}else{
message("The value you've passed to country does not correlate with any country in the shapefile library. Try a new value: a new string or number OR do not pass a value to country.")
break
}
}
input <- as.data.frame(input)
input$matcher <- str_replace_all(tolower(gsub(" ", "", input$input, fixed = TRUE)), "[[:punct:]]", '')
input <- as.data.frame(input) %>%
stringdist_left_join(gauler, by = c("matcher" = "matcher"), max_dist = strDist)
input <- subset(input, select=c(-matcher.x, -matcher.y, -V1))
}else{
message('You are missing the critical argument: input')
}
if(any(!is.na(input$matchfeature.name))){
if(near == FALSE){
if(any(input$input == input$matchfeature.name)){
input <- input[input$input == input$matchfeature.name,]
}
}
if(recent == TRUE){
input <- do.call(rbind, by(input, input$matchfeature.name, function(x) x[which.max(x$poly_year), ]))
}
}else{
print("No partial or exact match was detected for this input.")
}
return(tbl_df(input))
}else{
GaulDict <<- fread("GaulDict.csv", stringsAsFactors = FALSE)
GaulDict$matcher <<- str_replace_all(tolower(gsub(" ", "", GaulDict$matchfeature.name, fixed = TRUE)), "[[:punct:]]", '')
message('Please rerun script, dictionary has been loaded')
}
}
| /ihme/R/what_gaulhas.r | no_license | oldenkam/compost-the-most | R | false | false | 3,312 | r | ## What Gaul Has
## Editing Author: oldenkam
## Date: 3/14/2017
## Purpose:
## whatGaulHas is a function that returns a closest string match to a user's string. This function returns that match's
## relavant information. The first argument, country is a correctly spelled and correctly cased Admin 0 name string that subsets
## possible matches to increase spatial accuracy and curate results. The second argument, input is a string the user is seeking
## to validate against our dictionary of existing strings. near is an optional argument that still includes close matches
## even if a exact match was detected. recent is an optional arguement that curates guesses to only the most recent year that it exists.
#########################################################################################################
### Step 1: Script Prep #################################################################################
## Set directory and load libraries
rm(list=ls())
pacman::p_load(data.table, ggplot2, magrittr, stringr, plyr, Hmisc, dplyr, fuzzyjoin)
## OS locals
os <- .Platform$OS.type
if (os == "windows") {
j <- "J:/"
} else {
j <- "/home/j/"
}
work_dir <- paste0(j, "temp/oldena/projects/AdminDictionary")
setwd(work_dir)
### Step 2: Import Data #################################################################################
### Define function #####################################################################################
whatGaulHas <- function(input, country, near = FALSE, recent = TRUE, strDist = 2){
if(exists("GaulDict")){
gauler <- GaulDict
if(hasArg("input")){
if(hasArg("country")){
if(is.numeric(country) & country %in% gauler$ADM0_CODE){
gauler <- gauler[gauler$ADM0_CODE == country,]
}else if(country %in% gauler$ADM0_NAME){
gauler <- gauler[gauler$ADM0_NAME == country,]
}else{
message("The value you've passed to country does not correlate with any country in the shapefile library. Try a new value: a new string or number OR do not pass a value to country.")
break
}
}
input <- as.data.frame(input)
input$matcher <- str_replace_all(tolower(gsub(" ", "", input$input, fixed = TRUE)), "[[:punct:]]", '')
input <- as.data.frame(input) %>%
stringdist_left_join(gauler, by = c("matcher" = "matcher"), max_dist = strDist)
input <- subset(input, select=c(-matcher.x, -matcher.y, -V1))
}else{
message('You are missing the critical argument: input')
}
if(any(!is.na(input$matchfeature.name))){
if(near == FALSE){
if(any(input$input == input$matchfeature.name)){
input <- input[input$input == input$matchfeature.name,]
}
}
if(recent == TRUE){
input <- do.call(rbind, by(input, input$matchfeature.name, function(x) x[which.max(x$poly_year), ]))
}
}else{
print("No partial or exact match was detected for this input.")
}
return(tbl_df(input))
}else{
GaulDict <<- fread("GaulDict.csv", stringsAsFactors = FALSE)
GaulDict$matcher <<- str_replace_all(tolower(gsub(" ", "", GaulDict$matchfeature.name, fixed = TRUE)), "[[:punct:]]", '')
message('Please rerun script, dictionary has been loaded')
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/abundance.R
\name{abundance}
\alias{abundance}
\title{Abundance}
\usage{
abundance(individuals)
}
\arguments{
\item{individuals}{data frame; a data frame of individual mussel records.}
}
\value{
A data frame of sampled sites with the calculated abundance
MCAT metric
}
\description{
Calculates the abundance MCAT metric for the input
individual mussel data frame.
}
\examples{
# Create the individual mussel data frame
individuals <- mcat::individuals
# Calculate abundance for the individuals data frame
a <- abundance(individuals)
}
| /man/abundance.Rd | permissive | mpdougherty/mcat | R | false | true | 623 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/abundance.R
\name{abundance}
\alias{abundance}
\title{Abundance}
\usage{
abundance(individuals)
}
\arguments{
\item{individuals}{data frame; a data frame of individual mussel records.}
}
\value{
A data frame of sampled sites with the calculated abundance
MCAT metric
}
\description{
Calculates the abundance MCAT metric for the input
individual mussel data frame.
}
\examples{
# Create the individual mussel data frame
individuals <- mcat::individuals
# Calculate abundance for the individuals data frame
a <- abundance(individuals)
}
|
# Correlation heatmap with Pearson method, to visualize correlation matrix
#' Function to obtain correlation heatmap
#' @import ggplot2
#'
#' @export
heatmap <- function(data){
cormat <- round(cor(data),3)
cormat <- reorder_cormat(cormat) # Reorder the correlation matrix
upper_tri <- get_upper_tri(cormat)
melted_cormat <- melt(upper_tri, na.rm = TRUE) # Melt the correlation matrix
ggheatmap <- ggplot(melted_cormat, aes(Var2, Var1, fill = value))+
geom_tile(color = "white")+
scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = 0, limit = c(-1,1), space = "Lab",
name="Pearson\nCorrelation") +
theme_minimal()+ # minimal theme
theme(axis.text.x = element_text(angle = 45, vjust = 1,
size = 12, hjust = 1))+
coord_fixed()
ggheatmap +
geom_text(aes(Var2, Var1, label = value), color = "black", size = 4) +
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank(),
legend.justification = c(1, 0),
legend.position = c(0.6, 0.7),
legend.direction = "horizontal")+
guides(fill = guide_colorbar(barwidth = 7, barheight = 1,
title.position = "top", title.hjust = 0.5))
}
| /R/Heatmap.R | no_license | tanxuezhi/HourlyPrecipExtr | R | false | false | 1,478 | r | # Correlation heatmap with Pearson method, to visualize correlation matrix
#' Function to obtain correlation heatmap
#' @import ggplot2
#'
#' @export
heatmap <- function(data){
cormat <- round(cor(data),3)
cormat <- reorder_cormat(cormat) # Reorder the correlation matrix
upper_tri <- get_upper_tri(cormat)
melted_cormat <- melt(upper_tri, na.rm = TRUE) # Melt the correlation matrix
ggheatmap <- ggplot(melted_cormat, aes(Var2, Var1, fill = value))+
geom_tile(color = "white")+
scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = 0, limit = c(-1,1), space = "Lab",
name="Pearson\nCorrelation") +
theme_minimal()+ # minimal theme
theme(axis.text.x = element_text(angle = 45, vjust = 1,
size = 12, hjust = 1))+
coord_fixed()
ggheatmap +
geom_text(aes(Var2, Var1, label = value), color = "black", size = 4) +
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank(),
legend.justification = c(1, 0),
legend.position = c(0.6, 0.7),
legend.direction = "horizontal")+
guides(fill = guide_colorbar(barwidth = 7, barheight = 1,
title.position = "top", title.hjust = 0.5))
}
|
my_theme <- theme(legend.title = element_blank(),
legend.position = 'top',
legend.text = element_text(color = 'white', family = 'ubuntu', size = 12),
plot.background = element_rect(color = 'black',
fill = 'black'),
legend.background = element_rect(color = 'black',
fill = 'black'),
axis.text = element_text(color = 'white'),
panel.background = element_rect(fill = 'black', color = 'black'),
panel.grid.major.y = element_line(color = 'gray30',
linetype = 'longdash'),
axis.text.x = element_text(face = 'bold', family = 'ubuntu', size = 14),
axis.text.y = element_text(face = 'bold', family = 'ubuntu', size = 12),
plot.title = element_text(face = 'bold', family = 'ubuntu', size = 14, color = 'white', hjust = 0.5)
)
| /Statistica/Part2/my_theme.R | no_license | xooxoo/Courses | R | false | false | 1,049 | r | my_theme <- theme(legend.title = element_blank(),
legend.position = 'top',
legend.text = element_text(color = 'white', family = 'ubuntu', size = 12),
plot.background = element_rect(color = 'black',
fill = 'black'),
legend.background = element_rect(color = 'black',
fill = 'black'),
axis.text = element_text(color = 'white'),
panel.background = element_rect(fill = 'black', color = 'black'),
panel.grid.major.y = element_line(color = 'gray30',
linetype = 'longdash'),
axis.text.x = element_text(face = 'bold', family = 'ubuntu', size = 14),
axis.text.y = element_text(face = 'bold', family = 'ubuntu', size = 12),
plot.title = element_text(face = 'bold', family = 'ubuntu', size = 14, color = 'white', hjust = 0.5)
)
|
################################################################################
# Barcelona Graduate School of Economics
# Master's Degree in Data Science
################################################################################
# Course : Statistical Modelling and Inference
# Title : BVS Practical Workshop
# Author : Miquel Torrens
# Date : 2017.01.13
################################################################################
# source('/Users/Miquel/Dropbox/bvsworkshop/bvsw/code/bvsw.R')
################################################################################
################################################################################
# Preliminaries
################################################################################
# Path (adapt it to your local path)
PATH <- '../'
# Global parameters
force <- TRUE # Turn to TRUE to force installation of missing packages
compute <- FALSE # Turn to TRUE to make all computations
# Load functions
source(paste(PATH, 'code/bvsf.R', sep = ''))
# Dependencies
pkgs <- c('mombf', 'ncvreg', 'devtools', 'screening', 'parallel', 'monomvn')
for (pkg in pkgs) {
if (! require(pkg, character.only = TRUE)) {
if (force == TRUE) {
if (pkg == 'screening') {
devtools::install_github('wwrechard/screening')
} else {
install.packages(pkg, repos = 'https://cloud.r-project.org')
}
if (! require(pkg, character.only = TRUE)) {
warning('Package "', pkg, '" could not be installed.', sep = '')
}
}
}
}
cat('Loaded dependencies:', paste(pkgs, collapse = ', '), '\n')
# Load auxiliary data (Gene expression data)
file <- paste(PATH, 'data/tgfb.txt', sep = '')
tgfb <- read.table(file, header = TRUE, sep = '\t')
cat('Loaded file:', file, '\n')
################################################################################
################################################################################
# Small number of predictors: full enumeration
################################################################################
# Generate random data
set.seed(666)
n <- 100 # Observations: 100
p <- 5 # Predictors: 5
X <- matrix(rnorm(n * p), nrow = n, ncol = p)
w <- matrix(c(1, 0.5, 0, 2, 0), ncol = 1)
y <- X %*% w + rnorm(n) # Assumption of normal residuals with q = 1
# Prior especification
prior.w <- mombf::imomprior(tau = 0.131) # iMOM prior (tau = .131)
prior.M <- mombf::modelbbprior(alpha.p = 1, beta.p = 1) # BetaBin(1, 1)
prior.q <- mombf::igprior(1e-3, 1e-3) # IG prior (a = b = 0.001)
# Model selection (Full enumeration: enumerate = TRUE, FALSE does Gibbs)
ms00 <- modelSelection(y = y, x = X, priorCoef = prior.w, priorDelta = prior.M,
priorVar = prior.q, enumerate = TRUE)
# Results
ms00[['postMode']] # Posterior mode model (most frequented model)
ms00[['margpp']] # Marginal posterior probs. of inclusion
pp00 <- postProb(ms00)
# Pretty outcomes
round(ms00[['margpp']], 4)
head(postProb.(pp00))
# R^2 Comparison
summary(lm(y ~ X[, 1] + X[, 2] + X[, 3] + X[, 4] + X[, 5]))$adj.r.squared
summary(lm(y ~ X[, 1] + X[, 2] + X[, 4]))$adj.r.squared
# Estimated coefficients
wh00 <- rnlp(y = y[, 1], x = X, msfit = ms00, priorCoef = prior.w, niter = 1e4)
bma00 <- apply(wh00, 2, mean)
# Comparing BMA vs. Frequentist point coefficients
round(bma00, 4) # BMA
round(c(coef(summary(lm(y ~ X)))[2:(1 + p), 1], summary(lm(y ~ X))$sigma), 4)
# Comparing Bayesian vs. Frequentist dispersion of coefficients
round(apply(wh00, 2, sd), 4) # Irrelevant coefficients shrunk to zero!
round(coef(summary(lm(y ~ X)))[2:(1 + p), 2], 4)
# Probability with which you use every model
w00 <- apply(wh00[, 1:p] != 0, 1, function(z) {
paste(which(z), collapse = ',')
})
table(w00)
round(table(w00) / sum(table(w00)), 5) # Proportions of Gibbs visits
# Results for specific model, e.g. HPM
whpm00 <- apply(wh00[w00 == pp00[1, 1], ], 2, mean)
bma00 # Compare to BMA
# BMA prediction example (for the last row)
(pr00 <- as.numeric(X[n, ] %*% matrix(bma00[1:p]))) # Predicted
(ob00 <- y[n, 1]) # Observed
################################################################################
################################################################################
# Comparing Full Enumeration vs. Gibbs sampler
################################################################################
# Use colon cancer data
y <- scale(tgfb[, 'tgfb'])
X <- scale(tgfb[, -1])[, 1:20] # Pick first 20 predictors (+1M models)
n <- nrow(X)
# Prior especifications
prior.w <- zellnerprior(tau = n) # Unit Information Prior (parameters)
prior.U <- modelunifprior() # Uniform prior (model space)
prior.B <- modelbbprior() # Beta-binomial prior (model space)
# Model under uniform prior (with full enumeration and with Gibbs)
ms01 <- modelSelection(y = y, x = X, priorCoef = prior.w,
priorDelta = prior.U, enumerate = TRUE)
ms02 <- modelSelection(y = y, x = X, priorCoef = prior.w,
priorDelta = prior.U, niter = 1e5)
# Model under beta-binomial prior (with full enumeration and with Gibbs)
ms03 <- modelSelection(y = y, x = X, priorCoef = prior.w,
priorDelta = prior.B, enumerate = TRUE)
ms04 <- modelSelection(y = y, x = X, priorCoef = prior.w,
priorDelta = prior.B, niter = 1e5)
# Posterior probabilities
pp01 <- postProb(ms01)
pp02 <- postProb(ms02)
pp03 <- postProb(ms03)
pp04 <- postProb(ms04)
# Best models in each case (based on posterior probabilities)
head(postProb.(pp01))
head(postProb.(pp02)) # Gibbs approximates well full enumeration
head(postProb.(pp03))
head(postProb.(pp04)) # Especially, it preserves model ordering
# Number of variables (under full enumeration)
vs01 <- sapply(strsplit(as.character(pp01[, 1]), split = ','), length)
vs03 <- sapply(strsplit(as.character(pp03[, 1]), split = ','), length)
(mn01 <- round(tapply(pp01[, 'pp'], vs01, sum), 4))
(mn03 <- round(tapply(pp03[, 'pp'], vs03, sum), 4))
# Best candidates
as.vector(which(ms01[['postMode']] == 1)) # HPM
as.vector(which(ms03[['postMode']] == 1))
as.vector(which(ms01[['margpp']] > 0.50)) # Ones with larger Marg. PP
as.vector(which(ms03[['margpp']] > 0.50))
# Compare Beta-Binomial against Uniform
plot(names(mn01), mn01, type = 'l', xlab = 'Number of variables',
ylab = 'Posterior probability', ylim = c(0, 0.3), col = 'red')
lines(names(mn03), mn03, col = 'blue')
legend('topright', c('Uniform','Beta-Binomial(1, 1)'), lty = 1,
col = c('red', 'blue'))
# Estimated BMA coefficients
wh01 <- rnlp(y = y[, 1], x = X, msfit = ms01, priorCoef = prior.w, niter = 1e4)
wh03 <- rnlp(y = y[, 1], x = X, msfit = ms03, priorCoef = prior.w, niter = 1e4)
(bma01 <- round(apply(wh01, 2, mean), 4))
(bma03 <- round(apply(wh03, 2, mean), 4))
# Comparison of the estimates
plot(bma01, pch = 16, col = 'red', xlab = 'Predictor #', ylab = 'BMA')
points(bma03, col = 'darkblue', pch = 16)
title('Coefficient estimates under different priors')
legend('topleft', c('Uniform','Beta-Bin'), pch = 16, col = c('red', 'darkblue'))
################################################################################
################################################################################
# Comparing local and non-local priors
################################################################################
# Full enumeration: off the table
X <- scale(tgfb[, -1]) # Dimension: 262 x 172
p <- ncol(X)
# Various priors
prior.q <- igprior(1e-3, 1e-3) # IG prior (alpha = 0.001 = beta)
prior.M <- modelbbprior() # BetaBin(1, 1)
prior.L <- zellnerprior(tau = n) # Unit Information Prior
prior.N <- imomprior(tau = 0.131) # iMOM prior (tau = .131)
# Compute models
file5 <- paste(PATH, 'data/ms05.RData', sep = '')
file6 <- paste(PATH, 'data/ms06.RData', sep = '')
if (compute == TRUE) {
# Gibbs on a local prior
ms05 <- modelSelection(y = y, x = X, priorCoef = prior.L,
priorDelta = prior.B, priorVar = prior.q, niter = 1e5)
# Gibbs on a non-local prior (careful! Takes a lot of time)
ms06 <- modelSelection(y = y, x = X, priorCoef = prior.N,
priorDelta = prior.B, priorVar = prior.q, niter = 1e5)
# Save results
save(ms05, file = file5); cat('Saved file:', file5, '\n')
save(ms06, file = file6); cat('Saved file:', file6, '\n')
} else {
# Load results
ms05 <- get(load(file = file5)); cat('Loaded file:', file5, '\n')
ms06 <- get(load(file = file6)); cat('Loaded file:', file6, '\n')
}
# Posterior probabilities
pp05 <- postProb(ms05)
pp06 <- postProb(ms06)
head(postProb.(pp05))
head(postProb.(pp06))
as.vector(which(ms05[['postMode']] == 1)) # HPM
as.vector(which(ms06[['postMode']] == 1))
as.vector(which(ms05[['margpp']] > 0.05)) # Ones with larger Marg. PP
as.vector(which(ms06[['margpp']] > 0.05))
# Number of variables
vs05 <- sapply(strsplit(as.character(pp05[, 1]), split = ','), length)
vs06 <- sapply(strsplit(as.character(pp06[, 1]), split = ','), length)
(mn05 <- round(tapply(pp05[, 'pp'], vs05, sum), 4))
(mn06 <- round(tapply(pp06[, 'pp'], vs06, sum), 4))
# Compare Local and Non-local priors
plot(names(mn05), mn05, type = 'l', xlab = 'Number of variables',
ylab = 'Posterior probability', ylim = c(0, 0.45), col = 'red')
lines(names(mn06), mn06, col = 'blue')
legend('topright', c('Local prior','Non-local prior'), lty = 1,
col = c('red', 'blue'))
# What model is chosen Bayesian vs. Frequentist?
(ols <- summary(lm(y ~ X)))
postProb.(pp05)[1, ] # e.g. HPM
# Estimated BMA coefficients
wh05 <- rnlp(y = y[, 1], x = X, msfit = ms05, priorCoef = prior.w, niter = 1e4)
wh06 <- rnlp(y = y[, 1], x = X, msfit = ms06, priorCoef = prior.w, niter = 1e4)
bma05 <- apply(wh05, 2, mean)
bma06 <- apply(wh06, 2, mean)
wols06 <- coef(ols)[, 1]
# Comparing BMA vs. Frequentist point estimations
plot(wols06, col = 'darkgreen', pch = 1, xlab = 'Predictor #', ylab = 'Pred. w')
points(bma06[1:p], col = 'darkblue', pch = 16)
points(bma05[1:p], pch = 16, col = 'red')
title('Coefficient estimates under local and non-local priors')
legend('topleft', c('Local','Non-local', 'OLS'), pch = c(16, 16, 1),
col = c('red', 'darkblue', 'darkgreen'))
################################################################################
################################################################################
# Gains in high dimensions
################################################################################
# Generate random data
set.seed(666)
n <- 500
p <- 200
X <- matrix(rnorm(n * p), nrow = n, ncol = p)
w <- matrix(rep(0, p), ncol = 1)
w[seq(50, p, 50)] <- sample(1:length(seq(50, p, 50)), replace = FALSE)
y <- X %*% w + rnorm(n, sd = 3) # Assumption of normal residuals
# Prior especification
prior.w <- mombf::imomprior(tau = 0.131) # iMOM prior (tau = .131)
prior.M <- mombf::modelbbprior(alpha.p = 1, beta.p = 1) # Beta-binomial(1, 1)
prior.q <- mombf::igprior(1e-3, 1e-3) # IG prior (alpha = 0.001 = beta)
# Compute models
file <- paste(PATH, 'data/ms07.RData', sep = '')
if (compute == TRUE) {
# Model selection (true model has p / 50 active features)
ms07 <- modelSelection(y = y, x = X, priorCoef = prior.w,
priorDelta = prior.M, priorVar = prior.q, niter = 1e5)
# Save results
save(ms07, file = file); cat('Saved file:', file, '\n')
} else {
# Load results
ms07 <- get(load(file = file)); cat('Loaded file:', file, '\n')
}
# Results
pm07 <- ms07[['postMode']] # Posterior mode inclusion
mp07 <- ms07[['margpp']] # Marginal posterior probs. of inclusion
pp07 <- postProb(ms07) # Posterior model probabilities
# Best predictors
margpp07 <- round(sort(mp07, decreasing = TRUE), 5)
names(margpp07) <- order(mp07, decreasing = TRUE)
head(margpp07, 20)
# Best models
head(postProb.(pp07))
as.vector(which(ms07[['postMode']] == 1))
as.vector(which(ms07[['margpp']] > 0.05))
# Model choice compared to lm
summary(lm(y ~ X)) # A lot of significant predictors...
postProb.(ms07)[1, ] # :)
# Comparing BMA vs. Frequentist
wh07 <- rnlp(y = y[, 1], x = X, msfit = ms07, priorCoef = prior.w, niter = 1e4)
bma07 <- apply(wh07, 2, mean)
wols07 <- coef(summary(lm(y ~ X)))[2:(1 + p), 1]
plot(wols07, col = 'red', pch = 16, xlab = 'Predictor #', ylab = 'Pred. w')
points(bma07[1:p], col = 'darkblue', pch = 16)
abline(h = 1:p, lty = 2)
legend('topleft', c('BVS', 'OLS'), pch = 16, col = c('darkblue', 'red'))
################################################################################
# When p >> n
set.seed(666)
n <- 100
p <- 500
X <- matrix(rnorm(n * p), nrow = n, ncol = p)
w <- matrix(rep(0, p), ncol = 1)
w[seq(50, p, 50)] <- sample(1:length(seq(50, p, 50)), replace = FALSE)
y <- X %*% w + rnorm(n, sd = 3) # Assumption of normal residuals
# Compute models
file <- paste(PATH, 'data/ms08.RData', sep = '')
if (compute == TRUE) {
# Model selection (Full enumeration: enumerate = TRUE)
ms08 <- modelSelection(y = y, x = X, priorCoef = prior.w,
priorDelta = prior.M, priorVar = prior.q, niter = 1e5)
# Save results
save(ms08, file = file); cat('Saved file:', file, '\n')
} else {
# Load results
ms08 <- get(load(file = file)); cat('Loaded file:', file, '\n')
}
# Results
pm08 <- ms08[['postMode']] # Posterior mode inclusion
mp08 <- ms08[['margpp']] # Marginal posterior probs. of inclusion
pp08 <- postProb(ms08) # Posterior model probabilities
# Best predictors
margpp08 <- round(sort(ms08[['margpp']], decreasing = TRUE), 5)
names(margpp08) <- order(ms08[['margpp']], decreasing = TRUE)
head(margpp08, 15)
# Best models
head(postProb.(pp08))
# Comparison to usual approach
summary(lm(y ~ X)) # Can't be computed!
postProb.(pp08)[1, ] # Really close!
# Comparing BMA vs. Frequentist point estimations
wh08 <- rnlp(y = y[, 1], x = X, msfit = ms08, priorCoef = prior.w, niter = 1e4)
bma08 <- apply(wh08, 2, mean)[1:p]
sort(round(bma08, 2), decreasing = TRUE)[1:30]
plot(bma08, col = 'darkblue', pch = 16, xlab = 'Predictor #', ylab = 'BMA')
abline(h = 1:p, lty = 2)
################################################################################
################################################################################
# Comparing BMA vs. Median probability model
################################################################################
# Example with p = 20
y <- scale(tgfb[, 'tgfb'])
X <- scale(tgfb[, -1])[, 1:20] # Pick 20 features
prior.w <- zellnerprior(tau = nrow(X)) # Unit information prior
# Compute models
file <- paste(PATH, 'data/loo_fits.RData', sep = '')
if (compute == TRUE) {
# Least squares estimator
fit.mle <- looCV.mle(y = y, x = X)
# BMA
fit.bma <- looCV.bma(y = y, x = X, type = 'bma', priorCoef = prior.w,
priorDelta = prior.M, niter = 1e4)
# Median probability model
fit.med <- looCV.bma(y = y, x = X, type = 'median', priorCoef = prior.w,
priorDelta = prior.M, niter = 1e4)
# Save results
save(fit.mle, fit.bma, fit.med, file = file)
cat('Saved file:', file, '\n')
} else {
aux <- load(file = file); cat('Loaded file:', file, '\n')
fit.mle <- get(aux[1])
fit.bma <- get(aux[2])
fit.med <- get(aux[3])
}
# Compare predictions for different methods
nomx <- 'Bayesian model averaging'
nomy1 <- 'Median probability model'
nomy2 <- 'Least squares'
par(mfrow = c(1, 2))
plot(fit.bma[['pred']], fit.med[['pred']], xlab = nomx, ylab = nomy1)
abline(0,1)
plot(fit.bma[['pred']], fit.mle[['pred']], xlab = nomx, ylab = nomy2)
abline(0,1)
################################################################################
################################################################################
# Heuristics
################################################################################
# Recover TGFB data
y <- scale(tgfb[, 1])
X <- scale(tgfb[, -1]) # Dimension: 262 x 172
Z <- tgfb
colnames(Z) <- c('y', paste('x', 1:(ncol(tgfb) - 1), sep = ''))
################################################################################
# Forward / backward / stepwise regression
m0 <- lm(y ~ 1, data = Z) # Smallest model
mf <- lm(y ~ ., data = Z) # Largest model
# Run regressions
fwd.m <- step(m0, direction = 'forward', scope = formula(mf))
bwd.m <- step(mf, direction = 'backward', data = Z)
hyb.m <- step(m0, scope = list(upper = mf), data = Z, direction = 'both')
# Normal "lm" objects
summary(fwd.m)
summary(bwd.m)
summary(hyb.m)
################################################################################
# Lasso / Scad / Bayesian Lasso
# Run model
lasso.m01 <- ncvreg(X, y, penalty = 'lasso')
scad.m01 <- ncvreg(X, y, penalty = 'SCAD')
# Need to specify some lambda
summary(lasso.m01, lambda = 0.1)
summary(scad.m01, lambda = 0.1)
# Choose the one minimising loss
cl <- makeCluster(detectCores() - 1)
lasso.m02 <- cv.ncvreg(X, y, cluster = cl, nfolds = 10, penalty = 'lasso')
#lasso.m03 <- cv.ncvreg(X, y, cluster = cl, nfolds = nrow(X), penalty = 'lasso')
scad.m02 <- cv.ncvreg(X, y, cluster = cl, nfolds = 10, penalty = 'SCAD')
#scad.m03 <- cv.ncvreg(X, y, cluster = cl, nfolds = nrow(X), penalty = 'SCAD')
# Results
summary(lasso.m02)
#summary(lasso.m03)
summary(scad.m02)
#summary(scad.m03)
# Plot of the paths and cv-scores
par(mfrow = c(1, 2))
plot(lasso.m02)
plot(lasso.m02[['fit']])
par(mfrow = c(1, 2))
plot(scad.m02)
plot(scad.m02[['fit']])
# Model coefficients
plot(coef(lasso.m02), col = 'darkblue', pch = 16,
xlab = 'Predictor', ylab = 'w_hat', main = 'Lasso and Scad')
grid()
points(coef(scad.m02), col = 'red', pch = 16)
legend('topleft', c('Lasso', 'Scad'), pch = 16, col = c('darkblue', 'red'))
# To do Bayesian Lasso (really intensive)
if (compute == TRUE) {
blasso.m01 <- monomvn::blasso(X, y, T = 1e3)
summary(blasso.m01)
plot(blasso.m01)
}
################################################################################
# Prescreening: SIS and HOLP
sis.m01 <- screening(X, y, method = 'sis', num.select = 50)
sis.m02 <- screening(X, y, method = 'sis', num.select = 10)
sis.m03 <- screening(X, y, method = 'sis', num.select = 5)
holp.m01 <- screening(X, y, method = 'holp', num.select = 50)
holp.m02 <- screening(X, y, method = 'holp', num.select = 10)
holp.m03 <- screening(X, y, method = 'holp', num.select = 5)
# Results
sis.m01[['screen']]
sis.m02[['screen']]
sis.m03[['screen']]
holp.m01[['screen']]
holp.m02[['screen']]
holp.m03[['screen']]
# Are results similar?
sort(sis.m02[['screen']])
sort(holp.m02[['screen']])
################################################################################
# Block-diagonality
# CASE 1: Block-orthogonal design
set.seed(666)
p <- 200
n <- 210
X <- scale(matrix(rnorm(n * p), nrow = n, ncol = p))
e <- eigen(cov(X))
X <- t(t(X %*% e[['vectors']]) / sqrt(e[['values']]))
w <- c(rep(0, p - 3), c(0.5, 0.75, 1))
q <- 1
y <- X %*% matrix(w, ncol = 1) + rnorm(n, sd = sqrt(q))
# Priors
prior.M <- modelbinomprior(p = 1 / p)
prior.q <- igprior(1e-3, 1e-3)
prior.w1 <- zellnerprior(tau = n)
prior.w2 <- momprior(tau = 0.348)
# Algorithm
bd.m01 <- postModeOrtho(y, x = X, priorCoef = prior.w1, priorDelta = prior.M,
priorVar = prior.q, bma = TRUE)
bd.m02 <- postModeOrtho(y, x = X, priorCoef = prior.w2, priorDelta = prior.M,
priorVar = prior.q, bma = TRUE)
# Results
head(bd.m01[['models']])
head(bd.m02[['models']])
tail(round(bd.m01[['bma']], 2))
tail(round(bd.m02[['bma']], 2))
# Coefficient BMA estimates
par(mar = c(5, 5, 1, 1))
ols <- (t(X) %*% y) / colSums(X ** 2)
plot(ols, bd.m01[['bma']][, 'coef'], xlab = 'Least squares estimate',
ylab = expression(paste('E(', beta[j], '| y)')), cex.lab = 1.5,
cex.axis = 1.2, col = 'blue', pch = 0)
points(ols, bd.m02[['bma']][, 'coef'], pch = 1, col = 'red')
legend('topleft', c('Zellner', 'MOM'), pch = c(0, 1), col = c('blue', 'red'))
# CASE 2: Block-diagonal design
set.seed(666)
p <- 100
n <- 110
bsize <- 10
blocks <- rep(1:(p / bsize), each = bsize)
X <- scale(matrix(rnorm(n * p), nrow = n, ncol = p))
e <- eigen(cov(X))
X <- t(t(X %*% e[['vectors']]) / sqrt(e[['values']]))
# Build block-diagonal matrix
Sb <- diag(bsize)
Sb[upper.tri(Sb)] <- Sb[lower.tri(Sb)] <- 0.5
vv <- eigen(Sb)[['vectors']]
sqSb <- vv %*% diag(sqrt(eigen(Sb)[['values']])) %*% t(vv)
for (i in 1:(p / bsize)) {
X[, blocks == i] <- X[, blocks == i] %*% sqSb
}
# Parametrize
q <- 1
w <- rep(0, p)
w[blocks == 1] <- c(rep(0, bsize - 3), c(0.5, 0.75, 1))
w[blocks == 2] <- c(rep(0, bsize - 2), c(0.75, -1))
y <- X %*% matrix(w, ncol = 1) + rnorm(n, sd = sqrt(q))
# Run model
bd.m03 <- postModeBlockDiag(y = y, x = X, blocks = blocks, priorCoef = prior.w1,
priorDelta = prior.M, priorVar = prior.q,
bma = TRUE)
# Results
(aux <- head(bd.m03[['models']][, 1:3], 10))
(aux <- cbind.data.frame(aux[, 1:2], round(aux[, 3], 4)))
round(bd.m03[['bma']][1:30, ], 2)
head(bd.m03[['postmean.model']], 10)
bd.m03[['postmean.model']][6, ] # True model
################################################################################
# END OF SCRIPT
| /bigp/code/bvsw.R | no_license | nandanrao/statistical-modelling | R | false | false | 21,178 | r | ################################################################################
# Barcelona Graduate School of Economics
# Master's Degree in Data Science
################################################################################
# Course : Statistical Modelling and Inference
# Title : BVS Practical Workshop
# Author : Miquel Torrens
# Date : 2017.01.13
################################################################################
# source('/Users/Miquel/Dropbox/bvsworkshop/bvsw/code/bvsw.R')
################################################################################
################################################################################
# Preliminaries
################################################################################
# Path (adapt it to your local path)
PATH <- '../'
# Global parameters
force <- TRUE # Turn to TRUE to force installation of missing packages
compute <- FALSE # Turn to TRUE to make all computations
# Load functions
source(paste(PATH, 'code/bvsf.R', sep = ''))
# Dependencies
pkgs <- c('mombf', 'ncvreg', 'devtools', 'screening', 'parallel', 'monomvn')
for (pkg in pkgs) {
if (! require(pkg, character.only = TRUE)) {
if (force == TRUE) {
if (pkg == 'screening') {
devtools::install_github('wwrechard/screening')
} else {
install.packages(pkg, repos = 'https://cloud.r-project.org')
}
if (! require(pkg, character.only = TRUE)) {
warning('Package "', pkg, '" could not be installed.', sep = '')
}
}
}
}
cat('Loaded dependencies:', paste(pkgs, collapse = ', '), '\n')
# Load auxiliary data (Gene expression data)
file <- paste(PATH, 'data/tgfb.txt', sep = '')
tgfb <- read.table(file, header = TRUE, sep = '\t')
cat('Loaded file:', file, '\n')
################################################################################
################################################################################
# Small number of predictors: full enumeration
################################################################################
# Generate random data
set.seed(666)
n <- 100 # Observations: 100
p <- 5 # Predictors: 5
X <- matrix(rnorm(n * p), nrow = n, ncol = p)
w <- matrix(c(1, 0.5, 0, 2, 0), ncol = 1)
y <- X %*% w + rnorm(n) # Assumption of normal residuals with q = 1
# Prior especification
prior.w <- mombf::imomprior(tau = 0.131) # iMOM prior (tau = .131)
prior.M <- mombf::modelbbprior(alpha.p = 1, beta.p = 1) # BetaBin(1, 1)
prior.q <- mombf::igprior(1e-3, 1e-3) # IG prior (a = b = 0.001)
# Model selection (Full enumeration: enumerate = TRUE, FALSE does Gibbs)
ms00 <- modelSelection(y = y, x = X, priorCoef = prior.w, priorDelta = prior.M,
priorVar = prior.q, enumerate = TRUE)
# Results
ms00[['postMode']] # Posterior mode model (most frequented model)
ms00[['margpp']] # Marginal posterior probs. of inclusion
pp00 <- postProb(ms00)
# Pretty outcomes
round(ms00[['margpp']], 4)
head(postProb.(pp00))
# R^2 Comparison
summary(lm(y ~ X[, 1] + X[, 2] + X[, 3] + X[, 4] + X[, 5]))$adj.r.squared
summary(lm(y ~ X[, 1] + X[, 2] + X[, 4]))$adj.r.squared
# Estimated coefficients
wh00 <- rnlp(y = y[, 1], x = X, msfit = ms00, priorCoef = prior.w, niter = 1e4)
bma00 <- apply(wh00, 2, mean)
# Comparing BMA vs. Frequentist point coefficients
round(bma00, 4) # BMA
round(c(coef(summary(lm(y ~ X)))[2:(1 + p), 1], summary(lm(y ~ X))$sigma), 4)
# Comparing Bayesian vs. Frequentist dispersion of coefficients
round(apply(wh00, 2, sd), 4) # Irrelevant coefficients shrunk to zero!
round(coef(summary(lm(y ~ X)))[2:(1 + p), 2], 4)
# Probability with which you use every model
w00 <- apply(wh00[, 1:p] != 0, 1, function(z) {
paste(which(z), collapse = ',')
})
table(w00)
round(table(w00) / sum(table(w00)), 5) # Proportions of Gibbs visits
# Results for specific model, e.g. HPM
whpm00 <- apply(wh00[w00 == pp00[1, 1], ], 2, mean)
bma00 # Compare to BMA
# BMA prediction example (for the last row)
(pr00 <- as.numeric(X[n, ] %*% matrix(bma00[1:p]))) # Predicted
(ob00 <- y[n, 1]) # Observed
################################################################################
################################################################################
# Comparing Full Enumeration vs. Gibbs sampler
################################################################################
# Use colon cancer data
y <- scale(tgfb[, 'tgfb'])
X <- scale(tgfb[, -1])[, 1:20] # Pick first 20 predictors (+1M models)
n <- nrow(X)
# Prior especifications
prior.w <- zellnerprior(tau = n) # Unit Information Prior (parameters)
prior.U <- modelunifprior() # Uniform prior (model space)
prior.B <- modelbbprior() # Beta-binomial prior (model space)
# Model under uniform prior (with full enumeration and with Gibbs)
ms01 <- modelSelection(y = y, x = X, priorCoef = prior.w,
priorDelta = prior.U, enumerate = TRUE)
ms02 <- modelSelection(y = y, x = X, priorCoef = prior.w,
priorDelta = prior.U, niter = 1e5)
# Model under beta-binomial prior (with full enumeration and with Gibbs)
ms03 <- modelSelection(y = y, x = X, priorCoef = prior.w,
priorDelta = prior.B, enumerate = TRUE)
ms04 <- modelSelection(y = y, x = X, priorCoef = prior.w,
priorDelta = prior.B, niter = 1e5)
# Posterior probabilities
pp01 <- postProb(ms01)
pp02 <- postProb(ms02)
pp03 <- postProb(ms03)
pp04 <- postProb(ms04)
# Best models in each case (based on posterior probabilities)
head(postProb.(pp01))
head(postProb.(pp02)) # Gibbs approximates well full enumeration
head(postProb.(pp03))
head(postProb.(pp04)) # Especially, it preserves model ordering
# Number of variables (under full enumeration)
vs01 <- sapply(strsplit(as.character(pp01[, 1]), split = ','), length)
vs03 <- sapply(strsplit(as.character(pp03[, 1]), split = ','), length)
(mn01 <- round(tapply(pp01[, 'pp'], vs01, sum), 4))
(mn03 <- round(tapply(pp03[, 'pp'], vs03, sum), 4))
# Best candidates
as.vector(which(ms01[['postMode']] == 1)) # HPM
as.vector(which(ms03[['postMode']] == 1))
as.vector(which(ms01[['margpp']] > 0.50)) # Ones with larger Marg. PP
as.vector(which(ms03[['margpp']] > 0.50))
# Compare Beta-Binomial against Uniform
plot(names(mn01), mn01, type = 'l', xlab = 'Number of variables',
ylab = 'Posterior probability', ylim = c(0, 0.3), col = 'red')
lines(names(mn03), mn03, col = 'blue')
legend('topright', c('Uniform','Beta-Binomial(1, 1)'), lty = 1,
col = c('red', 'blue'))
# Estimated BMA coefficients
wh01 <- rnlp(y = y[, 1], x = X, msfit = ms01, priorCoef = prior.w, niter = 1e4)
wh03 <- rnlp(y = y[, 1], x = X, msfit = ms03, priorCoef = prior.w, niter = 1e4)
(bma01 <- round(apply(wh01, 2, mean), 4))
(bma03 <- round(apply(wh03, 2, mean), 4))
# Comparison of the estimates
plot(bma01, pch = 16, col = 'red', xlab = 'Predictor #', ylab = 'BMA')
points(bma03, col = 'darkblue', pch = 16)
title('Coefficient estimates under different priors')
legend('topleft', c('Uniform','Beta-Bin'), pch = 16, col = c('red', 'darkblue'))
################################################################################
################################################################################
# Comparing local and non-local priors
################################################################################
# Full enumeration: off the table
X <- scale(tgfb[, -1]) # Dimension: 262 x 172
p <- ncol(X)
# Various priors
prior.q <- igprior(1e-3, 1e-3) # IG prior (alpha = 0.001 = beta)
prior.M <- modelbbprior() # BetaBin(1, 1)
prior.L <- zellnerprior(tau = n) # Unit Information Prior
prior.N <- imomprior(tau = 0.131) # iMOM prior (tau = .131)
# Compute models
file5 <- paste(PATH, 'data/ms05.RData', sep = '')
file6 <- paste(PATH, 'data/ms06.RData', sep = '')
if (compute == TRUE) {
# Gibbs on a local prior
ms05 <- modelSelection(y = y, x = X, priorCoef = prior.L,
priorDelta = prior.B, priorVar = prior.q, niter = 1e5)
# Gibbs on a non-local prior (careful! Takes a lot of time)
ms06 <- modelSelection(y = y, x = X, priorCoef = prior.N,
priorDelta = prior.B, priorVar = prior.q, niter = 1e5)
# Save results
save(ms05, file = file5); cat('Saved file:', file5, '\n')
save(ms06, file = file6); cat('Saved file:', file6, '\n')
} else {
# Load results
ms05 <- get(load(file = file5)); cat('Loaded file:', file5, '\n')
ms06 <- get(load(file = file6)); cat('Loaded file:', file6, '\n')
}
# Posterior probabilities
pp05 <- postProb(ms05)
pp06 <- postProb(ms06)
head(postProb.(pp05))
head(postProb.(pp06))
as.vector(which(ms05[['postMode']] == 1)) # HPM
as.vector(which(ms06[['postMode']] == 1))
as.vector(which(ms05[['margpp']] > 0.05)) # Ones with larger Marg. PP
as.vector(which(ms06[['margpp']] > 0.05))
# Number of variables
vs05 <- sapply(strsplit(as.character(pp05[, 1]), split = ','), length)
vs06 <- sapply(strsplit(as.character(pp06[, 1]), split = ','), length)
(mn05 <- round(tapply(pp05[, 'pp'], vs05, sum), 4))
(mn06 <- round(tapply(pp06[, 'pp'], vs06, sum), 4))
# Compare Local and Non-local priors
plot(names(mn05), mn05, type = 'l', xlab = 'Number of variables',
ylab = 'Posterior probability', ylim = c(0, 0.45), col = 'red')
lines(names(mn06), mn06, col = 'blue')
legend('topright', c('Local prior','Non-local prior'), lty = 1,
col = c('red', 'blue'))
# What model is chosen Bayesian vs. Frequentist?
(ols <- summary(lm(y ~ X)))
postProb.(pp05)[1, ] # e.g. HPM
# Estimated BMA coefficients
wh05 <- rnlp(y = y[, 1], x = X, msfit = ms05, priorCoef = prior.w, niter = 1e4)
wh06 <- rnlp(y = y[, 1], x = X, msfit = ms06, priorCoef = prior.w, niter = 1e4)
bma05 <- apply(wh05, 2, mean)
bma06 <- apply(wh06, 2, mean)
wols06 <- coef(ols)[, 1]
# Comparing BMA vs. Frequentist point estimations
plot(wols06, col = 'darkgreen', pch = 1, xlab = 'Predictor #', ylab = 'Pred. w')
points(bma06[1:p], col = 'darkblue', pch = 16)
points(bma05[1:p], pch = 16, col = 'red')
title('Coefficient estimates under local and non-local priors')
legend('topleft', c('Local','Non-local', 'OLS'), pch = c(16, 16, 1),
col = c('red', 'darkblue', 'darkgreen'))
################################################################################
################################################################################
# Gains in high dimensions
################################################################################
# Generate random data
set.seed(666)
n <- 500
p <- 200
X <- matrix(rnorm(n * p), nrow = n, ncol = p)
w <- matrix(rep(0, p), ncol = 1)
w[seq(50, p, 50)] <- sample(1:length(seq(50, p, 50)), replace = FALSE)
y <- X %*% w + rnorm(n, sd = 3) # Assumption of normal residuals
# Prior especification
prior.w <- mombf::imomprior(tau = 0.131) # iMOM prior (tau = .131)
prior.M <- mombf::modelbbprior(alpha.p = 1, beta.p = 1) # Beta-binomial(1, 1)
prior.q <- mombf::igprior(1e-3, 1e-3) # IG prior (alpha = 0.001 = beta)
# Compute models
file <- paste(PATH, 'data/ms07.RData', sep = '')
if (compute == TRUE) {
# Model selection (true model has p / 50 active features)
ms07 <- modelSelection(y = y, x = X, priorCoef = prior.w,
priorDelta = prior.M, priorVar = prior.q, niter = 1e5)
# Save results
save(ms07, file = file); cat('Saved file:', file, '\n')
} else {
# Load results
ms07 <- get(load(file = file)); cat('Loaded file:', file, '\n')
}
# Results
pm07 <- ms07[['postMode']] # Posterior mode inclusion
mp07 <- ms07[['margpp']] # Marginal posterior probs. of inclusion
pp07 <- postProb(ms07) # Posterior model probabilities
# Best predictors
margpp07 <- round(sort(mp07, decreasing = TRUE), 5)
names(margpp07) <- order(mp07, decreasing = TRUE)
head(margpp07, 20)
# Best models
head(postProb.(pp07))
as.vector(which(ms07[['postMode']] == 1))
as.vector(which(ms07[['margpp']] > 0.05))
# Model choice compared to lm
summary(lm(y ~ X)) # A lot of significant predictors...
postProb.(ms07)[1, ] # :)
# Comparing BMA vs. Frequentist
wh07 <- rnlp(y = y[, 1], x = X, msfit = ms07, priorCoef = prior.w, niter = 1e4)
bma07 <- apply(wh07, 2, mean)
wols07 <- coef(summary(lm(y ~ X)))[2:(1 + p), 1]
plot(wols07, col = 'red', pch = 16, xlab = 'Predictor #', ylab = 'Pred. w')
points(bma07[1:p], col = 'darkblue', pch = 16)
abline(h = 1:p, lty = 2)
legend('topleft', c('BVS', 'OLS'), pch = 16, col = c('darkblue', 'red'))
################################################################################
# When p >> n
set.seed(666)
n <- 100
p <- 500
X <- matrix(rnorm(n * p), nrow = n, ncol = p)
w <- matrix(rep(0, p), ncol = 1)
w[seq(50, p, 50)] <- sample(1:length(seq(50, p, 50)), replace = FALSE)
y <- X %*% w + rnorm(n, sd = 3) # Assumption of normal residuals
# Compute models
file <- paste(PATH, 'data/ms08.RData', sep = '')
if (compute == TRUE) {
# Model selection (Full enumeration: enumerate = TRUE)
ms08 <- modelSelection(y = y, x = X, priorCoef = prior.w,
priorDelta = prior.M, priorVar = prior.q, niter = 1e5)
# Save results
save(ms08, file = file); cat('Saved file:', file, '\n')
} else {
# Load results
ms08 <- get(load(file = file)); cat('Loaded file:', file, '\n')
}
# Results
pm08 <- ms08[['postMode']] # Posterior mode inclusion
mp08 <- ms08[['margpp']] # Marginal posterior probs. of inclusion
pp08 <- postProb(ms08) # Posterior model probabilities
# Best predictors
margpp08 <- round(sort(ms08[['margpp']], decreasing = TRUE), 5)
names(margpp08) <- order(ms08[['margpp']], decreasing = TRUE)
head(margpp08, 15)
# Best models
head(postProb.(pp08))
# Comparison to usual approach
summary(lm(y ~ X)) # Can't be computed!
postProb.(pp08)[1, ] # Really close!
# Comparing BMA vs. Frequentist point estimations
wh08 <- rnlp(y = y[, 1], x = X, msfit = ms08, priorCoef = prior.w, niter = 1e4)
bma08 <- apply(wh08, 2, mean)[1:p]
sort(round(bma08, 2), decreasing = TRUE)[1:30]
plot(bma08, col = 'darkblue', pch = 16, xlab = 'Predictor #', ylab = 'BMA')
abline(h = 1:p, lty = 2)
################################################################################
################################################################################
# Comparing BMA vs. Median probability model
################################################################################
# Example with p = 20
y <- scale(tgfb[, 'tgfb'])
X <- scale(tgfb[, -1])[, 1:20] # Pick 20 features
prior.w <- zellnerprior(tau = nrow(X)) # Unit information prior
# Compute models
file <- paste(PATH, 'data/loo_fits.RData', sep = '')
if (compute == TRUE) {
# Least squares estimator
fit.mle <- looCV.mle(y = y, x = X)
# BMA
fit.bma <- looCV.bma(y = y, x = X, type = 'bma', priorCoef = prior.w,
priorDelta = prior.M, niter = 1e4)
# Median probability model
fit.med <- looCV.bma(y = y, x = X, type = 'median', priorCoef = prior.w,
priorDelta = prior.M, niter = 1e4)
# Save results
save(fit.mle, fit.bma, fit.med, file = file)
cat('Saved file:', file, '\n')
} else {
aux <- load(file = file); cat('Loaded file:', file, '\n')
fit.mle <- get(aux[1])
fit.bma <- get(aux[2])
fit.med <- get(aux[3])
}
# Compare predictions for different methods
nomx <- 'Bayesian model averaging'
nomy1 <- 'Median probability model'
nomy2 <- 'Least squares'
par(mfrow = c(1, 2))
plot(fit.bma[['pred']], fit.med[['pred']], xlab = nomx, ylab = nomy1)
abline(0,1)
plot(fit.bma[['pred']], fit.mle[['pred']], xlab = nomx, ylab = nomy2)
abline(0,1)
################################################################################
################################################################################
# Heuristics
################################################################################
# Recover TGFB data
y <- scale(tgfb[, 1])
X <- scale(tgfb[, -1]) # Dimension: 262 x 172
Z <- tgfb
colnames(Z) <- c('y', paste('x', 1:(ncol(tgfb) - 1), sep = ''))
################################################################################
# Forward / backward / stepwise regression
m0 <- lm(y ~ 1, data = Z) # Smallest model
mf <- lm(y ~ ., data = Z) # Largest model
# Run regressions
fwd.m <- step(m0, direction = 'forward', scope = formula(mf))
bwd.m <- step(mf, direction = 'backward', data = Z)
hyb.m <- step(m0, scope = list(upper = mf), data = Z, direction = 'both')
# Normal "lm" objects
summary(fwd.m)
summary(bwd.m)
summary(hyb.m)
################################################################################
# Lasso / Scad / Bayesian Lasso
# Run model
lasso.m01 <- ncvreg(X, y, penalty = 'lasso')
scad.m01 <- ncvreg(X, y, penalty = 'SCAD')
# Need to specify some lambda
summary(lasso.m01, lambda = 0.1)
summary(scad.m01, lambda = 0.1)
# Choose the one minimising loss
cl <- makeCluster(detectCores() - 1)
lasso.m02 <- cv.ncvreg(X, y, cluster = cl, nfolds = 10, penalty = 'lasso')
#lasso.m03 <- cv.ncvreg(X, y, cluster = cl, nfolds = nrow(X), penalty = 'lasso')
scad.m02 <- cv.ncvreg(X, y, cluster = cl, nfolds = 10, penalty = 'SCAD')
#scad.m03 <- cv.ncvreg(X, y, cluster = cl, nfolds = nrow(X), penalty = 'SCAD')
# Results
summary(lasso.m02)
#summary(lasso.m03)
summary(scad.m02)
#summary(scad.m03)
# Plot of the paths and cv-scores
par(mfrow = c(1, 2))
plot(lasso.m02)
plot(lasso.m02[['fit']])
par(mfrow = c(1, 2))
plot(scad.m02)
plot(scad.m02[['fit']])
# Model coefficients
plot(coef(lasso.m02), col = 'darkblue', pch = 16,
xlab = 'Predictor', ylab = 'w_hat', main = 'Lasso and Scad')
grid()
points(coef(scad.m02), col = 'red', pch = 16)
legend('topleft', c('Lasso', 'Scad'), pch = 16, col = c('darkblue', 'red'))
# To do Bayesian Lasso (really intensive)
if (compute == TRUE) {
blasso.m01 <- monomvn::blasso(X, y, T = 1e3)
summary(blasso.m01)
plot(blasso.m01)
}
################################################################################
# Prescreening: SIS and HOLP
sis.m01 <- screening(X, y, method = 'sis', num.select = 50)
sis.m02 <- screening(X, y, method = 'sis', num.select = 10)
sis.m03 <- screening(X, y, method = 'sis', num.select = 5)
holp.m01 <- screening(X, y, method = 'holp', num.select = 50)
holp.m02 <- screening(X, y, method = 'holp', num.select = 10)
holp.m03 <- screening(X, y, method = 'holp', num.select = 5)
# Results
sis.m01[['screen']]
sis.m02[['screen']]
sis.m03[['screen']]
holp.m01[['screen']]
holp.m02[['screen']]
holp.m03[['screen']]
# Are results similar?
sort(sis.m02[['screen']])
sort(holp.m02[['screen']])
################################################################################
# Block-diagonality
# CASE 1: Block-orthogonal design
set.seed(666)
p <- 200
n <- 210
X <- scale(matrix(rnorm(n * p), nrow = n, ncol = p))
e <- eigen(cov(X))
X <- t(t(X %*% e[['vectors']]) / sqrt(e[['values']]))
w <- c(rep(0, p - 3), c(0.5, 0.75, 1))
q <- 1
y <- X %*% matrix(w, ncol = 1) + rnorm(n, sd = sqrt(q))
# Priors
prior.M <- modelbinomprior(p = 1 / p)
prior.q <- igprior(1e-3, 1e-3)
prior.w1 <- zellnerprior(tau = n)
prior.w2 <- momprior(tau = 0.348)
# Algorithm
bd.m01 <- postModeOrtho(y, x = X, priorCoef = prior.w1, priorDelta = prior.M,
priorVar = prior.q, bma = TRUE)
bd.m02 <- postModeOrtho(y, x = X, priorCoef = prior.w2, priorDelta = prior.M,
priorVar = prior.q, bma = TRUE)
# Results
head(bd.m01[['models']])
head(bd.m02[['models']])
tail(round(bd.m01[['bma']], 2))
tail(round(bd.m02[['bma']], 2))
# Coefficient BMA estimates
par(mar = c(5, 5, 1, 1))
ols <- (t(X) %*% y) / colSums(X ** 2)
plot(ols, bd.m01[['bma']][, 'coef'], xlab = 'Least squares estimate',
ylab = expression(paste('E(', beta[j], '| y)')), cex.lab = 1.5,
cex.axis = 1.2, col = 'blue', pch = 0)
points(ols, bd.m02[['bma']][, 'coef'], pch = 1, col = 'red')
legend('topleft', c('Zellner', 'MOM'), pch = c(0, 1), col = c('blue', 'red'))
# CASE 2: Block-diagonal design
set.seed(666)
p <- 100
n <- 110
bsize <- 10
blocks <- rep(1:(p / bsize), each = bsize)
X <- scale(matrix(rnorm(n * p), nrow = n, ncol = p))
e <- eigen(cov(X))
X <- t(t(X %*% e[['vectors']]) / sqrt(e[['values']]))
# Build block-diagonal matrix
Sb <- diag(bsize)
Sb[upper.tri(Sb)] <- Sb[lower.tri(Sb)] <- 0.5
vv <- eigen(Sb)[['vectors']]
sqSb <- vv %*% diag(sqrt(eigen(Sb)[['values']])) %*% t(vv)
for (i in 1:(p / bsize)) {
X[, blocks == i] <- X[, blocks == i] %*% sqSb
}
# Parametrize
q <- 1
w <- rep(0, p)
w[blocks == 1] <- c(rep(0, bsize - 3), c(0.5, 0.75, 1))
w[blocks == 2] <- c(rep(0, bsize - 2), c(0.75, -1))
y <- X %*% matrix(w, ncol = 1) + rnorm(n, sd = sqrt(q))
# Run model
bd.m03 <- postModeBlockDiag(y = y, x = X, blocks = blocks, priorCoef = prior.w1,
priorDelta = prior.M, priorVar = prior.q,
bma = TRUE)
# Results
(aux <- head(bd.m03[['models']][, 1:3], 10))
(aux <- cbind.data.frame(aux[, 1:2], round(aux[, 3], 4)))
round(bd.m03[['bma']][1:30, ], 2)
head(bd.m03[['postmean.model']], 10)
bd.m03[['postmean.model']][6, ] # True model
################################################################################
# END OF SCRIPT
|
# Get amap lat lon coordinates
#' @title Get the GCJ02 Coordinates Parsed by Amap
#' @param location Location name, vector, maximum length is 10.
#' @param key Application key of amap.com.
#' @importFrom magrittr `%>%`
#' @importFrom jsonlite fromJSON
#' @importFrom tidyr separate
#' @importFrom tibble as_tibble
#' @return A tibble with 4 variables, \code{location}, location parsed, longitude, latitude.
#'
geocode_amap_base <- function(location, key = key){
api = "https://restapi.amap.com/v3/geocode/geo?address="
key = Sys.getenv("key_amap_tmp") ## Your key in amap app
location2char <- paste(location, collapse = "|")
url <- paste0(api, location2char, "&batch=true&key=", key)
try(tmp <- jsonlite::fromJSON(txt = url))
df <- data.frame(loc.input = location,
loc.parse = tmp$geocodes$formatted_address,
lonlat = tmp$geocodes$location) %>%
tidyr::separate(col = lonlat,
into = c("lon", "lat"),
sep = ",",
convert = TRUE) %>%
tibble::as_tibble()
return(df)
}
#' @title Get GCJ02 Coordinates of Large Vector of Locations
#' @description Give a large number of \code{location} vector, the GCJ02 coordinates retrieved from amap.
#'
#' @param location Location name, vector, for more than 10 elements.
#' @param key Application key of amap.com.
#' @importFrom purrr map_dfr
#' @return A combined tibble with 4 variables, \code{location}, location parsed, longitude, latitude.
#'
geocode_amap <- function(location, key = key){
locationlst <- chunk(x = location, n = 10)
df <- purrr::map_dfr(.x = locationlst,
.f = ~geocode_amap_base(location = .x))
return(df)
}
| /R/geocode_amap.R | no_license | jimrpy/getcoords | R | false | false | 1,714 | r | # Get amap lat lon coordinates
#' @title Get the GCJ02 Coordinates Parsed by Amap
#' @param location Location name, vector, maximum length is 10.
#' @param key Application key of amap.com.
#' @importFrom magrittr `%>%`
#' @importFrom jsonlite fromJSON
#' @importFrom tidyr separate
#' @importFrom tibble as_tibble
#' @return A tibble with 4 variables, \code{location}, location parsed, longitude, latitude.
#'
geocode_amap_base <- function(location, key = key){
api = "https://restapi.amap.com/v3/geocode/geo?address="
key = Sys.getenv("key_amap_tmp") ## Your key in amap app
location2char <- paste(location, collapse = "|")
url <- paste0(api, location2char, "&batch=true&key=", key)
try(tmp <- jsonlite::fromJSON(txt = url))
df <- data.frame(loc.input = location,
loc.parse = tmp$geocodes$formatted_address,
lonlat = tmp$geocodes$location) %>%
tidyr::separate(col = lonlat,
into = c("lon", "lat"),
sep = ",",
convert = TRUE) %>%
tibble::as_tibble()
return(df)
}
#' @title Get GCJ02 Coordinates of Large Vector of Locations
#' @description Give a large number of \code{location} vector, the GCJ02 coordinates retrieved from amap.
#'
#' @param location Location name, vector, for more than 10 elements.
#' @param key Application key of amap.com.
#' @importFrom purrr map_dfr
#' @return A combined tibble with 4 variables, \code{location}, location parsed, longitude, latitude.
#'
geocode_amap <- function(location, key = key){
locationlst <- chunk(x = location, n = 10)
df <- purrr::map_dfr(.x = locationlst,
.f = ~geocode_amap_base(location = .x))
return(df)
}
|
library(devtools)
require(roxygen2)
rm(list = ls()) # clear workspace before building
getwd()
setwd("/Users/kprovost/Documents/Github/")
#dir.create('./subsppLabelR/')
setwd(paste(getwd(), '/subsppLabelR/', sep=''))
#setwd(paste(getwd(),"/Documents/Github/speciesPairNicheOverlap/",sep=""))
#create('./')
roxygenize('./') #Builds description file and documentation
## put your functions in the R folder
## to ignore folder
devtools::use_build_ignore("scripts")
roxygenize('./')
check(cran=TRUE)
library(subsppLabelR,verbose=T)
install_github('kaiyaprovost/subsppLabelR')
#####
## make your functions
## you can have one Rscript with many functions, groups of related functions,
## or one script for each function
## to make a function do these steps
## the "#'" is an roxygen comment
## param are the parameters
## export tells you to export it
## examples is how you use the function
## there are different @blah you can do
## this is how to load packages
#' @import raster
#' @import paralell
#' @import stats
NULL
#' Echo
#'
#' This function echos whatever you give it.
#'
#' @param echo A word or sentence to echo
#'
#' @export
#' @examples
#'
#' echo('This is a test')
echo = function(echo){
return(echo)
}
| /howToBuildAPackage.R | no_license | kaiyaprovost/misc_scripts | R | false | false | 1,235 | r | library(devtools)
require(roxygen2)
rm(list = ls()) # clear workspace before building
getwd()
setwd("/Users/kprovost/Documents/Github/")
#dir.create('./subsppLabelR/')
setwd(paste(getwd(), '/subsppLabelR/', sep=''))
#setwd(paste(getwd(),"/Documents/Github/speciesPairNicheOverlap/",sep=""))
#create('./')
roxygenize('./') #Builds description file and documentation
## put your functions in the R folder
## to ignore folder
devtools::use_build_ignore("scripts")
roxygenize('./')
check(cran=TRUE)
library(subsppLabelR,verbose=T)
install_github('kaiyaprovost/subsppLabelR')
#####
## make your functions
## you can have one Rscript with many functions, groups of related functions,
## or one script for each function
## to make a function do these steps
## the "#'" is an roxygen comment
## param are the parameters
## export tells you to export it
## examples is how you use the function
## there are different @blah you can do
## this is how to load packages
#' @import raster
#' @import paralell
#' @import stats
NULL
#' Echo
#'
#' This function echos whatever you give it.
#'
#' @param echo A word or sentence to echo
#'
#' @export
#' @examples
#'
#' echo('This is a test')
echo = function(echo){
return(echo)
}
|
function(data, factors, num) {
# Load package
library(FactoMineR)
# PCA analysis, samples are data points, miRNAs are features
# In data, rows are samples, columns are miRNA features
pca_df = PCA(data, ncp=num, graph=FALSE)
# Find proportion of variance, etc.
prop_of_var <- pca_df$eig[1:num,]
dir.create("Results/PCA")
write.table(prop_of_var, "Results/PCA/prop_of_vars_samples.txt", sep="\t", quote=FALSE)
# Plot cumulative proportion of variance
cum_var <- prop_of_var[ , 3]
postscript(file="Results/PCA/cum_var.eps", width=5, height=5)
plot(1:length(cum_var), cum_var, main="Cumulative Percentage of Variance", xlab="PCs", ylab="Cum. % of Var.", ylim=c(0,100))
lines(1:length(cum_var), cum_var)
dev.off()
# Get correlations b/w variables and PCs
corr <- pca_df$var$coord
save(corr, file="Results/PCA/saved_corr_samples.r")
write.table(corr, "Results/PCA/correlation_samples.txt", sep="\t", quote=FALSE)
# Get first n PCs, store in lists
pca_result <- pca_df$ind$coord
pcs_all <- vector(mode="list", length=num)
pcs_viral_symp <- vector(mode="list", length=num)
pcs_viral_asymp <- vector(mode="list", length=num)
pcs_baseline <- vector(mode="list", length=num)
pcs_bacteria <- vector(mode="list", length=num)
for (i in 1:num) {
pcs_all[[i]] <- pca_result[,i]
pcs_viral_symp[[i]] <- pca_result[sample_factors == "v_s",i]
pcs_viral_asymp[[i]] <- pca_result[sample_factors == "v_as",i]
all_baseline_bool <- sample_factors == "bl_s" | sample_factors == "bl_as"
pcs_baseline[[i]] <- pca_result[all_baseline_bool,i]
pcs_bacteria[[i]] = pca_result[43:52,i]
}
# create plots folder
dir.create("Results/PCA/plots")
# Plot all combinations of PCs, store in files
# red = viral symp, orange = viral asymp, blue = baseline, green = bacteria
from_i <- 1
to_i <- num - 1
to_j <- num
for (i in from_i:to_i) {
from_j <- i + 1
for (j in from_j:to_j) {
plot_name <- paste(i,"vs",j, sep="_")
dir_name <- paste("Results/PCA/plots/",
plot_name, ".jpeg", sep="")
jpeg(dir_name, width=15, height=15, units="cm", res=300)
plot(pcs_viral_symp[[i]], pcs_viral_symp[[j]], col="red", pch=16,
xlab=paste("PC", i), ylab=paste("PC", j),
xlim=c(min(pcs_all[[i]]), max(pcs_all[[i]])),
ylim=c(min(pcs_all[[j]]), max(pcs_all[[j]])))
points(pcs_viral_asymp[[i]], pcs_viral_asymp[[j]], col="orange", pch=16)
points(pcs_baseline[[i]], pcs_baseline[[j]], col="blue", pch=16)
points(pcs_bacteria[[i]], pcs_bacteria[[j]], col="green", pch=16)
dev.off()
}
}
}
| /R_Code/PCA/analyze_PCs_samples.R | permissive | Halmoni100/miRNA_dataPlus | R | false | false | 2,533 | r | function(data, factors, num) {
# Load package
library(FactoMineR)
# PCA analysis, samples are data points, miRNAs are features
# In data, rows are samples, columns are miRNA features
pca_df = PCA(data, ncp=num, graph=FALSE)
# Find proportion of variance, etc.
prop_of_var <- pca_df$eig[1:num,]
dir.create("Results/PCA")
write.table(prop_of_var, "Results/PCA/prop_of_vars_samples.txt", sep="\t", quote=FALSE)
# Plot cumulative proportion of variance
cum_var <- prop_of_var[ , 3]
postscript(file="Results/PCA/cum_var.eps", width=5, height=5)
plot(1:length(cum_var), cum_var, main="Cumulative Percentage of Variance", xlab="PCs", ylab="Cum. % of Var.", ylim=c(0,100))
lines(1:length(cum_var), cum_var)
dev.off()
# Get correlations b/w variables and PCs
corr <- pca_df$var$coord
save(corr, file="Results/PCA/saved_corr_samples.r")
write.table(corr, "Results/PCA/correlation_samples.txt", sep="\t", quote=FALSE)
# Get first n PCs, store in lists
pca_result <- pca_df$ind$coord
pcs_all <- vector(mode="list", length=num)
pcs_viral_symp <- vector(mode="list", length=num)
pcs_viral_asymp <- vector(mode="list", length=num)
pcs_baseline <- vector(mode="list", length=num)
pcs_bacteria <- vector(mode="list", length=num)
for (i in 1:num) {
pcs_all[[i]] <- pca_result[,i]
pcs_viral_symp[[i]] <- pca_result[sample_factors == "v_s",i]
pcs_viral_asymp[[i]] <- pca_result[sample_factors == "v_as",i]
all_baseline_bool <- sample_factors == "bl_s" | sample_factors == "bl_as"
pcs_baseline[[i]] <- pca_result[all_baseline_bool,i]
pcs_bacteria[[i]] = pca_result[43:52,i]
}
# create plots folder
dir.create("Results/PCA/plots")
# Plot all combinations of PCs, store in files
# red = viral symp, orange = viral asymp, blue = baseline, green = bacteria
from_i <- 1
to_i <- num - 1
to_j <- num
for (i in from_i:to_i) {
from_j <- i + 1
for (j in from_j:to_j) {
plot_name <- paste(i,"vs",j, sep="_")
dir_name <- paste("Results/PCA/plots/",
plot_name, ".jpeg", sep="")
jpeg(dir_name, width=15, height=15, units="cm", res=300)
plot(pcs_viral_symp[[i]], pcs_viral_symp[[j]], col="red", pch=16,
xlab=paste("PC", i), ylab=paste("PC", j),
xlim=c(min(pcs_all[[i]]), max(pcs_all[[i]])),
ylim=c(min(pcs_all[[j]]), max(pcs_all[[j]])))
points(pcs_viral_asymp[[i]], pcs_viral_asymp[[j]], col="orange", pch=16)
points(pcs_baseline[[i]], pcs_baseline[[j]], col="blue", pch=16)
points(pcs_bacteria[[i]], pcs_bacteria[[j]], col="green", pch=16)
dev.off()
}
}
}
|
meta.transport <- function(y, x, D, expr = TRUE, simp = TRUE, steps = FALSE, primes = FALSE, stop_on_nonid = TRUE) {
v <- get.vertex.attribute(D[[1]], "name")
s <- v[which(vertex.attributes(D[[1]])$description == "S")]
interventions <- setdiff(v, union(y, s))
D.causal <- induced.subgraph(D[[1]], v[!(v %in% s)])
D.all <- list()
D.all[[1]] <- D.causal
D.all[2:(length(D)+1)] <- D
Z.all <- list()
Z.all[[1]] <- character(0)
Z.all[2:(length(D)+1)] <- rep(list(interventions), length(D))
res <- generalize(y = y, x = x, Z = Z.all, D = D.all, expr = FALSE, simp = simp, steps = TRUE, primes = primes, stop_on_nonid = stop_on_nonid)
res.prob <- res$P
attr(res.prob, "algorithm") <- "usid"
if (res$id) {
if (expr) res.prob <- get.expression(res.prob)
if (steps) return(list(P = res.prob, steps = res$steps, id = TRUE))
return(res.prob)
} else {
if (expr) return("")
if (steps) return(list(P = res.prob, steps = res$steps, id = FALSE))
return(NULL)
}
}
| /R/meta.transport.R | no_license | COVID-19-Causal-Reasoning/causaleffect | R | false | false | 1,002 | r | meta.transport <- function(y, x, D, expr = TRUE, simp = TRUE, steps = FALSE, primes = FALSE, stop_on_nonid = TRUE) {
v <- get.vertex.attribute(D[[1]], "name")
s <- v[which(vertex.attributes(D[[1]])$description == "S")]
interventions <- setdiff(v, union(y, s))
D.causal <- induced.subgraph(D[[1]], v[!(v %in% s)])
D.all <- list()
D.all[[1]] <- D.causal
D.all[2:(length(D)+1)] <- D
Z.all <- list()
Z.all[[1]] <- character(0)
Z.all[2:(length(D)+1)] <- rep(list(interventions), length(D))
res <- generalize(y = y, x = x, Z = Z.all, D = D.all, expr = FALSE, simp = simp, steps = TRUE, primes = primes, stop_on_nonid = stop_on_nonid)
res.prob <- res$P
attr(res.prob, "algorithm") <- "usid"
if (res$id) {
if (expr) res.prob <- get.expression(res.prob)
if (steps) return(list(P = res.prob, steps = res$steps, id = TRUE))
return(res.prob)
} else {
if (expr) return("")
if (steps) return(list(P = res.prob, steps = res$steps, id = FALSE))
return(NULL)
}
}
|
if (requireNamespace("spelling", quietly = TRUE)) {
if (!covr::in_covr()) {
spelling::spell_check_test(
vignettes = TRUE, error = FALSE,
skip_on_cran = TRUE
)
}
}
| /tests/spelling.R | permissive | franzbischoff/sandbox | R | false | false | 187 | r | if (requireNamespace("spelling", quietly = TRUE)) {
if (!covr::in_covr()) {
spelling::spell_check_test(
vignettes = TRUE, error = FALSE,
skip_on_cran = TRUE
)
}
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/dtime.R
\name{dcut}
\alias{dcut}
\alias{dcut.sub}
\title{Function to extract a vector or matrix from EMU-Trackdata at a single time
point of to create another EMU-trackdata object between two times.}
\usage{
dcut(trackdata, left.time, right.time, single = TRUE, average = TRUE,
prop = FALSE)
}
\arguments{
\item{trackdata}{An Emu trackdata object.}
\item{left.time}{Either: a numeric vector of the same length as there are
obsverations in trackdata. Or: a single value between 0 and 1. In the first
case, the left time boundary of trackdata[n,] is cut at left.time[n], in
the second case, and if prop=T, it is cut at that proportional time.}
\item{right.time}{Either: a numeric vector of the same length as there are
obsverations in trackdata. Or: a single value between 0 and 1. In the first
case, the right time boundary of trackdata[n,] is cut at right.time[n], in
the second case, and if prop=T, it is cut at that proportional time.}
\item{single}{If TRUE, one value is returned per segment. This applies when
the requested time falls between two track frames. When single=TRUE, the
preceding value is returned, unless average=TRUE (see below), in which case
the average value of the two frames is returned. when the right.time
argument is omitted}
\item{average}{A single element logical vector - see single above. Applies
only when the right.times argument is omitted and when single = TRUE}
\item{prop}{If TRUE left.time and right.time are interpreted as
proportions, if FALSE, they are interpreted as millisecond times}
}
\value{
A trackdata object if both 'left.time' and 'right.time' are
specified, otherwise a matrix if 'right.time' is unspecified and the
trackdata object has multiple columns of data or a vector if right.time' is
unspecified and the trackdata object has a single column of data.
}
\description{
A general purpose tool for extracting data from track objects either at a
particular time, or between two times. The times can be values in
milliseconds or proportional times between zero (the onset) and one (the
offset).
}
\details{
This function extracts data from each segment of a trackdata object.
If 'prop=FALSE' the time arguments ('left.time' and 'right.time') are
interpreted as millisecond times and each should be a vector with the same
length as the number of segments in 'trackdata'. If 'prop=TRUE' the time
arguments should be single values between zero (the onset of the segment)
and one (the offset).
If 'right.time' is omitted then a single data point correponding to
'left.time' for each segment is returned.
}
\examples{
# the data values of the trackdata object at the temporal midpoint
# (midvals is matrix of F1 and F2 data)
dip.fdat[1:10]
midvals <- dcut(dip.fdat, 0.5, prop=TRUE)
midvals[1:10,]
# the data values of the trackdata object between
# extending from 20
# (bet is a trackdata object of F1 and F2 values)
bet <- dcut(dip.fdat, 0.2, 0.8, prop=TRUE)
bet[1]
# the data values of the trackdata object at 30 ms after
# the start time of the trackdata object
# (time30 is a matrix of F1 and F2 data
times <- dip.fdat$ftime[,1]+30
times[1:10]
time30 <- dcut(dip.fdat, times)
time30[1:10]
# the data values of the trackdata object
# between the start time and 30 ms after the start time
# (int is a trackdata object of F1 and F2 values extending
# from the start of the diphthongs up to 30 ms after the diphthongs)
int <- dcut(dip.fdat, dip.fdat$ftime[,1], times)
int[1]
}
\author{
Jonathan Harrington
}
\seealso{
\code{\link{emu.track}}, \code{\link{dplot}}, \code{\link{eplot}}
}
\keyword{datagen}
| /man/dcut.Rd | no_license | hywel/emuR | R | false | false | 3,766 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/dtime.R
\name{dcut}
\alias{dcut}
\alias{dcut.sub}
\title{Function to extract a vector or matrix from EMU-Trackdata at a single time
point of to create another EMU-trackdata object between two times.}
\usage{
dcut(trackdata, left.time, right.time, single = TRUE, average = TRUE,
prop = FALSE)
}
\arguments{
\item{trackdata}{An Emu trackdata object.}
\item{left.time}{Either: a numeric vector of the same length as there are
obsverations in trackdata. Or: a single value between 0 and 1. In the first
case, the left time boundary of trackdata[n,] is cut at left.time[n], in
the second case, and if prop=T, it is cut at that proportional time.}
\item{right.time}{Either: a numeric vector of the same length as there are
obsverations in trackdata. Or: a single value between 0 and 1. In the first
case, the right time boundary of trackdata[n,] is cut at right.time[n], in
the second case, and if prop=T, it is cut at that proportional time.}
\item{single}{If TRUE, one value is returned per segment. This applies when
the requested time falls between two track frames. When single=TRUE, the
preceding value is returned, unless average=TRUE (see below), in which case
the average value of the two frames is returned. when the right.time
argument is omitted}
\item{average}{A single element logical vector - see single above. Applies
only when the right.times argument is omitted and when single = TRUE}
\item{prop}{If TRUE left.time and right.time are interpreted as
proportions, if FALSE, they are interpreted as millisecond times}
}
\value{
A trackdata object if both 'left.time' and 'right.time' are
specified, otherwise a matrix if 'right.time' is unspecified and the
trackdata object has multiple columns of data or a vector if right.time' is
unspecified and the trackdata object has a single column of data.
}
\description{
A general purpose tool for extracting data from track objects either at a
particular time, or between two times. The times can be values in
milliseconds or proportional times between zero (the onset) and one (the
offset).
}
\details{
This function extracts data from each segment of a trackdata object.
If 'prop=FALSE' the time arguments ('left.time' and 'right.time') are
interpreted as millisecond times and each should be a vector with the same
length as the number of segments in 'trackdata'. If 'prop=TRUE' the time
arguments should be single values between zero (the onset of the segment)
and one (the offset).
If 'right.time' is omitted then a single data point correponding to
'left.time' for each segment is returned.
}
\examples{
# the data values of the trackdata object at the temporal midpoint
# (midvals is matrix of F1 and F2 data)
dip.fdat[1:10]
midvals <- dcut(dip.fdat, 0.5, prop=TRUE)
midvals[1:10,]
# the data values of the trackdata object between
# extending from 20
# (bet is a trackdata object of F1 and F2 values)
bet <- dcut(dip.fdat, 0.2, 0.8, prop=TRUE)
bet[1]
# the data values of the trackdata object at 30 ms after
# the start time of the trackdata object
# (time30 is a matrix of F1 and F2 data
times <- dip.fdat$ftime[,1]+30
times[1:10]
time30 <- dcut(dip.fdat, times)
time30[1:10]
# the data values of the trackdata object
# between the start time and 30 ms after the start time
# (int is a trackdata object of F1 and F2 values extending
# from the start of the diphthongs up to 30 ms after the diphthongs)
int <- dcut(dip.fdat, dip.fdat$ftime[,1], times)
int[1]
}
\author{
Jonathan Harrington
}
\seealso{
\code{\link{emu.track}}, \code{\link{dplot}}, \code{\link{eplot}}
}
\keyword{datagen}
|
#Grafico de Cajas
p = plot_ly(tabla, y = ~Costo, color = ~Ruta, type = "box",boxpoints = 'all',boxmean="sd") | /Code/Ejercicio4/AnalisisExploratorio.R | no_license | CristianPachacama/DisenioExperimental | R | false | false | 108 | r | #Grafico de Cajas
p = plot_ly(tabla, y = ~Costo, color = ~Ruta, type = "box",boxpoints = 'all',boxmean="sd") |
HartiganShapes <- function(array3D,numClust,algSteps=10,niter=10,stopCr=0.0001,simul,initLl,initials,
verbose){
#,computCost
time_iter <- list()
comp_time <- c()
list_ic1 <- list()
list_ic1_step <- list()
vect_all_rate <- c()
ll <- 1 : numClust
dist <- matrix(0, dim(array3D)[3], numClust)
if(verbose){
print(Sys.time())
}
time_ini <- Sys.time()
#Initialize the objective function by a large enough value:
vopt <- 1e+08
#Ramdon restarts:
for(iter in 1 : niter){
wss_step <- list()
if(verbose){
cat("New iteration")
print(iter)
cat("Optimal value with which this iteration starts:")
print(vopt)
}
#STEP 1: For each point I, find its two closest centers, IC1(I) and IC2(I). Assign the point to IC1(I):
meanshapes <- 0 ; mean_sh <- list()
ic1 <- c() ; ic2 <- c() ; dt <- c() ; nc <- c() #number of points in each cluster.
an1 <- c() ; an2 <- c() ; itran <- c() ; ncp <- c()
indx <- c() ; d <- c() ; live <- c() ; wss <- c()
n <- dim(array3D)[3]
initials_hart <- list()
if(initLl){
initials_hart[[iter]] <- initials[[iter]]
}else{
initials_hart[[iter]] <- sample(1:n, numClust, replace = FALSE)
}
if(verbose){
cat("Initial values of this iteration:")
print(initials_hart[[iter]])
}
meanshapes <- array3D[,,initials_hart[[iter]]]
#meanshapes_aux <- array3D[, , initials[[iter]]]
#if(computCost){
#time_ini_dist <- Sys.time()
#dist_aux = riemdist(array3D[,,1], y = meanshapes[,,1])
#time_end_dist <- Sys.time()
#cat("Computational cost of the Procrustes distance:")
#print(time_end_dist - time_ini_dist)
#}
for(i in 1 : n){
ic1[i] = 1
ic2[i] = 2
for(il in 1 : 2){
dt[il] = (riemdist(array3D[,,i], meanshapes[,,il]))^2
}
if(dt[2] < dt[1]){
ic1[i] = 2
ic2[i] = 1
temp = dt[1]
dt[1] = dt[2]
dt[2] = temp
}
if(simul == FALSE){
for(l in 3 : numClust){
db = (riemdist(array3D[,,i], meanshapes[,,l]))^2
if(db < dt[2]){
if(dt[1] <= db){
dt[2] = db
ic2[i] = l
}else{
dt[2] = dt[1]
ic2[i] = ic1[i]
dt[1] = db
ic1[i] = l
}
}
}
}
}
#if(computCost){
#time_ini_mean <- Sys.time()
#meanshapes_aux[,,1] = procGPA(array3D[, , ic1 == 1], distances = TRUE, pcaoutput = TRUE)$mshape
#time_end_mean <- Sys.time()
#cat("Computational cost of the Procrustes mean:")
#print(time_end_mean - time_ini_mean)
#}
#STEP 2: Update the cluster centres to be the averages of points contained within them.
#Check to see if there is any empty cluster at this stage:
for(l in 1 : numClust){
nc[l] <- table(ic1)[l]
#print("Loop numClust")
#print(nc[l])
if(nc[l] <= 3){ # nc[l] == 0
#stop("At least one cluster is empty or has a very few elements after the initial assignment.
# A better set of initial cluster centers is needed.")
#break
return(cat("At least one cluster is empty or has a very few elements after the initial assignment.
A better set of initial cluster centers is needed. No solution provided."))
}
}
#print("Loop checked successfully")
for(l in 1 : numClust){
aa = nc[l]
#print("This is array3D")
#print(array3D)
#print("This is ic1")
#print(ic1)
#print("This is l")
#print(l)
x <- array3D[, , ic1 == l]
#print("This is x")
#print(dim(x))
if (length(dim(x)) != 3) {
#stop("Please ensure that array3D has 3 dimensions.")
#break
return(cat("Please ensure that array3D has 3 dimensions.")) # This is not actually needed
# anymore because the previous return already stops the execution since there are some
# very small clusters.
}else{
meanshapes[,,l] = shapes::procGPA(x, distances = TRUE, pcaoutput = TRUE)$mshape
}
#Initialize AN1, AN2, ITRAN and NCP.
#AN1(L) = NC(L) / (NC(L) - 1)
#AN2(L) = NC(L) / (NC(L) + 1)
#ITRAN(L) = 1 if cluster L is updated in the quick-transfer stage,
# = 0 otherwise
#In the optimal-transfer stage, NCP(L) stores the step at which cluster L is last updated.
#In the quick-transfer stage, NCP(L) stores the step at which cluster L is last updated plus M:
an2[l] = aa / (aa + 1)
if(1 < aa){
an1[l] = aa / (aa - 1)
}else{
an1[l] = Inf
}
itran[l] = 1
ncp[l] = -1
}
indx <- 0
d[1:n] = 0
live[1:numClust] = 0
for(step in 1 : algSteps){
#In this stage, there is only one pass through the data. Each point is re-allocated, if necessary, to the
#cluster that will induce the maximum reduction in within-cluster sum of squares:
lis <- optraShapes(array3D,n,meanshapes,numClust,ic1,ic2,nc,an1,an2,ncp,d,itran,live,indx)
meanshapes <- lis[[1]] ; ic1 <- lis[[2]] ; ic2 <- lis[[3]] ; nc <- lis[[4]] ; an1 <- lis[[5]] ; an2 <- lis[[6]] ; ncp <- lis[[7]]
d <- lis[[8]] ; itran <- lis[[9]] ; live <- lis[[10]] ; indx <- lis[[11]]
#Each point is tested in turn to see if it should be re-allocated to the cluster to which it is most likely
#to be transferred, IC2(I), from its present cluster, IC1(I). Loop through the data until no further change
#is to take place:
lis1 <- qtranShapes(array3D,n,meanshapes,ic1,ic2,nc,an1,an2,ncp,d,itran,indx)
meanshapes <- lis1[[1]] ; ic1 <- lis1[[2]] ; ic2 <- lis1[[3]] ; nc <- lis1[[4]] ; an1 <- lis1[[5]] ; an2 <- lis1[[6]] ; ncp <- lis1[[7]]
d <- lis1[[8]] ; itran <- lis1[[9]] ; indx <- lis1[[10]] ; icoun <- lis1[[11]]
mean_sh[[step]] <- meanshapes
#NCP has to be set to 0 before entering OPTRA:
for( l in 1 : numClust ){
ncp[l] = 0
}
#Compute the within-cluster sum of squares for each cluster:
wss <- vector("list", numClust)
for(num_cl in 1 : numClust){
wss[[num_cl]] <- 0
array3D_cl <- array(0, dim = c(n, 3, table(ic1)[num_cl])) #table(ic1)[num_cl] is the number of observations that
#belong to each cluster.
array3D_cl <- array3D[,,ic1 == num_cl]
distances <- c()
for(num_mujs_cl in 1:table(ic1)[num_cl]){
distances[num_mujs_cl] <- riemdist(array3D_cl[,,num_mujs_cl], meanshapes[,,num_cl])^2
}
wss[[num_cl]] <- sum(distances) / n
}
#Total within-cluster sum of squares:
wss_step[[step]] <- sum(unlist(wss))
list_ic1_step[[step]] <- ic1
if(verbose){
paste(cat("Clustering of the Nstep", step, ":\n"))
print(table(list_ic1_step[[step]]))
}
if(verbose){
if(iter <= 10){
paste(cat("Objective function of the Nstep", step))
print(wss_step[[step]])
}
}
if(step > 1){
aux <- wss_step[[step]]
aux1 <- wss_step[[step-1]]
if( ((aux1 - aux) / aux1) < stopCr ){
break
}
}
}#The algSteps loop ends here.
#Calculus of the objective function (the total within-cluster sum of squares):
wss1 <- vector("list", numClust)
for(num_cl in 1 : numClust){
wss1[[num_cl]] <- 0
array3D_cl1 <- array(0, dim = c(n, 3, table(ic1)[num_cl]))
array3D_cl1 <- array3D[,,ic1 == num_cl]
distances1 <- c()
for(num_mujs_cl in 1:table(ic1)[num_cl]){
distances1[num_mujs_cl] <- riemdist(array3D_cl1[,,num_mujs_cl], meanshapes[,,num_cl])^2
}
wss1[[num_cl]] <- sum(distances1) / n
}
#Total within-cluster sum of squares:
wss_step1 <- 0
wss_step1 <- sum(unlist(wss1))
#Change the optimal value and the optimal centers (copt) if a reduction in the objective function happens:
if(wss_step1 > min(unlist(wss_step))){
if(min(unlist(wss_step)) < vopt){
vopt <- min(unlist(wss_step))
if(verbose){
#Improvements in the objective functions are printed:
cat("optimal")
print(vopt)
}
optim_wss <- which.min(unlist(wss_step))
copt <- mean_sh[[optim_wss]] #optimal centers.
ic1_opt <- list_ic1_step[[optim_wss]]
}
}else if(wss_step1 < vopt){
vopt <- wss_step1
if(verbose){
#Improvements in the objective functions are printed:
cat("optimal")
print(vopt)
}
optim_wss <- which.min(unlist(wss_step))
copt <- mean_sh[[optim_wss]] #optimal centers.
ic1_opt <- list_ic1_step[[optim_wss]]
}
time_iter[[iter]] <- Sys.time()
if(iter == 1){
comp_time[1] <- difftime(time_iter[[iter]], time_ini, units = "mins")
if(verbose){
cat("Computational time of this iteration: \n")
print(time_iter[[iter]] - time_ini)
}
}else{
comp_time[iter] <- difftime(time_iter[[iter]], time_iter[[iter-1]], units = "mins")
if(verbose){
cat("Computational time of this iteration: \n")
print(time_iter[[iter]] - time_iter[[iter - 1]])
}
}
if(verbose){
cat("Optimal clustering of this iteration: \n")
}
optim_wss <- which.min(unlist(wss_step))
list_ic1[[iter]] <- list_ic1_step[[optim_wss]]
if(verbose){
print(table(list_ic1[[iter]]))
}
if(simul){
#Allocation rate:
as1 <- table(list_ic1[[iter]][1:(n/2)])
as2 <- table(list_ic1[[iter]][seq(n/2 + 1,n)])
if( max(as1) != n/2 & max(as2) != n/2 ){
suma <- min(as1) + min(as2)
all_rate <- 1 - suma / n
}else if( (max(as1) == n/2 & max(as2) != n/2) || (max(as1) != n/2 & max(as2) == n/2) ){
minim <- min(min(as1),min(as2))
all_rate <- 1 - minim / n
}else if( max(as1) == n/2 & max(as2) == n/2 ){
all_rate <- 1
}
vect_all_rate[iter] <- all_rate
if(verbose){
cat("Optimal allocation rate in this iteration:")
print(all_rate)
}
}
}#The niter loop ends here.
if(simul){
dimnames(copt) <- NULL
return(list(ic1=ic1_opt,cases=copt,vopt=vopt,compTime=comp_time,
AllRate=vect_all_rate))
}else{
return(list(ic1=ic1_opt,cases=copt,vopt=vopt))
}
}
| /R/HartiganShapes.R | no_license | cran/Anthropometry | R | false | false | 9,906 | r | HartiganShapes <- function(array3D,numClust,algSteps=10,niter=10,stopCr=0.0001,simul,initLl,initials,
verbose){
#,computCost
time_iter <- list()
comp_time <- c()
list_ic1 <- list()
list_ic1_step <- list()
vect_all_rate <- c()
ll <- 1 : numClust
dist <- matrix(0, dim(array3D)[3], numClust)
if(verbose){
print(Sys.time())
}
time_ini <- Sys.time()
#Initialize the objective function by a large enough value:
vopt <- 1e+08
#Ramdon restarts:
for(iter in 1 : niter){
wss_step <- list()
if(verbose){
cat("New iteration")
print(iter)
cat("Optimal value with which this iteration starts:")
print(vopt)
}
#STEP 1: For each point I, find its two closest centers, IC1(I) and IC2(I). Assign the point to IC1(I):
meanshapes <- 0 ; mean_sh <- list()
ic1 <- c() ; ic2 <- c() ; dt <- c() ; nc <- c() #number of points in each cluster.
an1 <- c() ; an2 <- c() ; itran <- c() ; ncp <- c()
indx <- c() ; d <- c() ; live <- c() ; wss <- c()
n <- dim(array3D)[3]
initials_hart <- list()
if(initLl){
initials_hart[[iter]] <- initials[[iter]]
}else{
initials_hart[[iter]] <- sample(1:n, numClust, replace = FALSE)
}
if(verbose){
cat("Initial values of this iteration:")
print(initials_hart[[iter]])
}
meanshapes <- array3D[,,initials_hart[[iter]]]
#meanshapes_aux <- array3D[, , initials[[iter]]]
#if(computCost){
#time_ini_dist <- Sys.time()
#dist_aux = riemdist(array3D[,,1], y = meanshapes[,,1])
#time_end_dist <- Sys.time()
#cat("Computational cost of the Procrustes distance:")
#print(time_end_dist - time_ini_dist)
#}
for(i in 1 : n){
ic1[i] = 1
ic2[i] = 2
for(il in 1 : 2){
dt[il] = (riemdist(array3D[,,i], meanshapes[,,il]))^2
}
if(dt[2] < dt[1]){
ic1[i] = 2
ic2[i] = 1
temp = dt[1]
dt[1] = dt[2]
dt[2] = temp
}
if(simul == FALSE){
for(l in 3 : numClust){
db = (riemdist(array3D[,,i], meanshapes[,,l]))^2
if(db < dt[2]){
if(dt[1] <= db){
dt[2] = db
ic2[i] = l
}else{
dt[2] = dt[1]
ic2[i] = ic1[i]
dt[1] = db
ic1[i] = l
}
}
}
}
}
#if(computCost){
#time_ini_mean <- Sys.time()
#meanshapes_aux[,,1] = procGPA(array3D[, , ic1 == 1], distances = TRUE, pcaoutput = TRUE)$mshape
#time_end_mean <- Sys.time()
#cat("Computational cost of the Procrustes mean:")
#print(time_end_mean - time_ini_mean)
#}
#STEP 2: Update the cluster centres to be the averages of points contained within them.
#Check to see if there is any empty cluster at this stage:
for(l in 1 : numClust){
nc[l] <- table(ic1)[l]
#print("Loop numClust")
#print(nc[l])
if(nc[l] <= 3){ # nc[l] == 0
#stop("At least one cluster is empty or has a very few elements after the initial assignment.
# A better set of initial cluster centers is needed.")
#break
return(cat("At least one cluster is empty or has a very few elements after the initial assignment.
A better set of initial cluster centers is needed. No solution provided."))
}
}
#print("Loop checked successfully")
for(l in 1 : numClust){
aa = nc[l]
#print("This is array3D")
#print(array3D)
#print("This is ic1")
#print(ic1)
#print("This is l")
#print(l)
x <- array3D[, , ic1 == l]
#print("This is x")
#print(dim(x))
if (length(dim(x)) != 3) {
#stop("Please ensure that array3D has 3 dimensions.")
#break
return(cat("Please ensure that array3D has 3 dimensions.")) # This is not actually needed
# anymore because the previous return already stops the execution since there are some
# very small clusters.
}else{
meanshapes[,,l] = shapes::procGPA(x, distances = TRUE, pcaoutput = TRUE)$mshape
}
#Initialize AN1, AN2, ITRAN and NCP.
#AN1(L) = NC(L) / (NC(L) - 1)
#AN2(L) = NC(L) / (NC(L) + 1)
#ITRAN(L) = 1 if cluster L is updated in the quick-transfer stage,
# = 0 otherwise
#In the optimal-transfer stage, NCP(L) stores the step at which cluster L is last updated.
#In the quick-transfer stage, NCP(L) stores the step at which cluster L is last updated plus M:
an2[l] = aa / (aa + 1)
if(1 < aa){
an1[l] = aa / (aa - 1)
}else{
an1[l] = Inf
}
itran[l] = 1
ncp[l] = -1
}
indx <- 0
d[1:n] = 0
live[1:numClust] = 0
for(step in 1 : algSteps){
#In this stage, there is only one pass through the data. Each point is re-allocated, if necessary, to the
#cluster that will induce the maximum reduction in within-cluster sum of squares:
lis <- optraShapes(array3D,n,meanshapes,numClust,ic1,ic2,nc,an1,an2,ncp,d,itran,live,indx)
meanshapes <- lis[[1]] ; ic1 <- lis[[2]] ; ic2 <- lis[[3]] ; nc <- lis[[4]] ; an1 <- lis[[5]] ; an2 <- lis[[6]] ; ncp <- lis[[7]]
d <- lis[[8]] ; itran <- lis[[9]] ; live <- lis[[10]] ; indx <- lis[[11]]
#Each point is tested in turn to see if it should be re-allocated to the cluster to which it is most likely
#to be transferred, IC2(I), from its present cluster, IC1(I). Loop through the data until no further change
#is to take place:
lis1 <- qtranShapes(array3D,n,meanshapes,ic1,ic2,nc,an1,an2,ncp,d,itran,indx)
meanshapes <- lis1[[1]] ; ic1 <- lis1[[2]] ; ic2 <- lis1[[3]] ; nc <- lis1[[4]] ; an1 <- lis1[[5]] ; an2 <- lis1[[6]] ; ncp <- lis1[[7]]
d <- lis1[[8]] ; itran <- lis1[[9]] ; indx <- lis1[[10]] ; icoun <- lis1[[11]]
mean_sh[[step]] <- meanshapes
#NCP has to be set to 0 before entering OPTRA:
for( l in 1 : numClust ){
ncp[l] = 0
}
#Compute the within-cluster sum of squares for each cluster:
wss <- vector("list", numClust)
for(num_cl in 1 : numClust){
wss[[num_cl]] <- 0
array3D_cl <- array(0, dim = c(n, 3, table(ic1)[num_cl])) #table(ic1)[num_cl] is the number of observations that
#belong to each cluster.
array3D_cl <- array3D[,,ic1 == num_cl]
distances <- c()
for(num_mujs_cl in 1:table(ic1)[num_cl]){
distances[num_mujs_cl] <- riemdist(array3D_cl[,,num_mujs_cl], meanshapes[,,num_cl])^2
}
wss[[num_cl]] <- sum(distances) / n
}
#Total within-cluster sum of squares:
wss_step[[step]] <- sum(unlist(wss))
list_ic1_step[[step]] <- ic1
if(verbose){
paste(cat("Clustering of the Nstep", step, ":\n"))
print(table(list_ic1_step[[step]]))
}
if(verbose){
if(iter <= 10){
paste(cat("Objective function of the Nstep", step))
print(wss_step[[step]])
}
}
if(step > 1){
aux <- wss_step[[step]]
aux1 <- wss_step[[step-1]]
if( ((aux1 - aux) / aux1) < stopCr ){
break
}
}
}#The algSteps loop ends here.
#Calculus of the objective function (the total within-cluster sum of squares):
wss1 <- vector("list", numClust)
for(num_cl in 1 : numClust){
wss1[[num_cl]] <- 0
array3D_cl1 <- array(0, dim = c(n, 3, table(ic1)[num_cl]))
array3D_cl1 <- array3D[,,ic1 == num_cl]
distances1 <- c()
for(num_mujs_cl in 1:table(ic1)[num_cl]){
distances1[num_mujs_cl] <- riemdist(array3D_cl1[,,num_mujs_cl], meanshapes[,,num_cl])^2
}
wss1[[num_cl]] <- sum(distances1) / n
}
#Total within-cluster sum of squares:
wss_step1 <- 0
wss_step1 <- sum(unlist(wss1))
#Change the optimal value and the optimal centers (copt) if a reduction in the objective function happens:
if(wss_step1 > min(unlist(wss_step))){
if(min(unlist(wss_step)) < vopt){
vopt <- min(unlist(wss_step))
if(verbose){
#Improvements in the objective functions are printed:
cat("optimal")
print(vopt)
}
optim_wss <- which.min(unlist(wss_step))
copt <- mean_sh[[optim_wss]] #optimal centers.
ic1_opt <- list_ic1_step[[optim_wss]]
}
}else if(wss_step1 < vopt){
vopt <- wss_step1
if(verbose){
#Improvements in the objective functions are printed:
cat("optimal")
print(vopt)
}
optim_wss <- which.min(unlist(wss_step))
copt <- mean_sh[[optim_wss]] #optimal centers.
ic1_opt <- list_ic1_step[[optim_wss]]
}
time_iter[[iter]] <- Sys.time()
if(iter == 1){
comp_time[1] <- difftime(time_iter[[iter]], time_ini, units = "mins")
if(verbose){
cat("Computational time of this iteration: \n")
print(time_iter[[iter]] - time_ini)
}
}else{
comp_time[iter] <- difftime(time_iter[[iter]], time_iter[[iter-1]], units = "mins")
if(verbose){
cat("Computational time of this iteration: \n")
print(time_iter[[iter]] - time_iter[[iter - 1]])
}
}
if(verbose){
cat("Optimal clustering of this iteration: \n")
}
optim_wss <- which.min(unlist(wss_step))
list_ic1[[iter]] <- list_ic1_step[[optim_wss]]
if(verbose){
print(table(list_ic1[[iter]]))
}
if(simul){
#Allocation rate:
as1 <- table(list_ic1[[iter]][1:(n/2)])
as2 <- table(list_ic1[[iter]][seq(n/2 + 1,n)])
if( max(as1) != n/2 & max(as2) != n/2 ){
suma <- min(as1) + min(as2)
all_rate <- 1 - suma / n
}else if( (max(as1) == n/2 & max(as2) != n/2) || (max(as1) != n/2 & max(as2) == n/2) ){
minim <- min(min(as1),min(as2))
all_rate <- 1 - minim / n
}else if( max(as1) == n/2 & max(as2) == n/2 ){
all_rate <- 1
}
vect_all_rate[iter] <- all_rate
if(verbose){
cat("Optimal allocation rate in this iteration:")
print(all_rate)
}
}
}#The niter loop ends here.
if(simul){
dimnames(copt) <- NULL
return(list(ic1=ic1_opt,cases=copt,vopt=vopt,compTime=comp_time,
AllRate=vect_all_rate))
}else{
return(list(ic1=ic1_opt,cases=copt,vopt=vopt))
}
}
|
#! Rscript targatt_competitor_ddos.R --
#### COMPETITOR TARGETED ATTACK: DDoS Attack ----
# Simulate num ddos attacks
numDDoSAttacks <- function(targdos_decision){
num_ddos_attacks <- 0
if (targdos_decision == 0) {0}
else{
num_ddos_attacks = round(rgamma(1,5,1))
}
num_ddos_attacks
}
# DDoS duration (l) IN HOURS
ddosDuration <-function(targdos_decision,
num_ddos_attacks,
cloud){
ddos_duration <-0
if (targdos_decision == 0) {0}
else if (targdos_decision == 1 & num_ddos_attacks>0) {
for (j in 1:num_ddos_attacks){
average_duration = runif(1,3.6,4.8) # v
dispersion = runif(1,0.8,1.2) # v/mu
individual_attack_duration <- round(rgamma(1,average_duration,dispersion))
ddos_duration = ddos_duration + individual_attack_duration
}
if (cloud==1){
cloud_duration <- 0
num_attacks_cloud = round(rgamma(1,10,1))
for (j in 1:num_attacks_cloud){
average_duration = runif(1,3.6,4.8) # v
dispersion = runif(1,0.8,1.2) # v/mu
individual_attack_duration <- round(rgamma(1,average_duration,dispersion))
cloud_duration = cloud_duration + individual_attack_duration
}
ddos_duration = ddos_duration - cloud_duration
if (ddos_duration<0){ddos_duration <- 0}
}
}
ddos_duration
}
cloudReduction <- function(cloud,ddos_duration){
ddos_duration <- ddos_duration
cloud_duration <- 0
num_attacks_cloud = round(rgamma(1,10,1))
if (cloud == 0){ddos_duration}
else{
if (cloud==1){
for (j in 1:num_attacks_cloud){
average_duration = runif(1,3.6,4.8) # v
dispersion = runif(1,0.8,1.2) # v/mu
individual_attack_duration <- round(rgamma(1,average_duration,dispersion))
cloud_duration = cloud_duration + individual_attack_duration
}
ddos_duration = ddos_duration - cloud_duration
}
}
if (ddos_duration<0){
ddos_duration = 0
}
ddos_duration
} | /Attackers/Competitor/targatt_competitor_ddos.R | no_license | cybeco/cybersecandcyberinsurance | R | false | false | 1,978 | r | #! Rscript targatt_competitor_ddos.R --
#### COMPETITOR TARGETED ATTACK: DDoS Attack ----
# Simulate num ddos attacks
numDDoSAttacks <- function(targdos_decision){
num_ddos_attacks <- 0
if (targdos_decision == 0) {0}
else{
num_ddos_attacks = round(rgamma(1,5,1))
}
num_ddos_attacks
}
# DDoS duration (l) IN HOURS
ddosDuration <-function(targdos_decision,
num_ddos_attacks,
cloud){
ddos_duration <-0
if (targdos_decision == 0) {0}
else if (targdos_decision == 1 & num_ddos_attacks>0) {
for (j in 1:num_ddos_attacks){
average_duration = runif(1,3.6,4.8) # v
dispersion = runif(1,0.8,1.2) # v/mu
individual_attack_duration <- round(rgamma(1,average_duration,dispersion))
ddos_duration = ddos_duration + individual_attack_duration
}
if (cloud==1){
cloud_duration <- 0
num_attacks_cloud = round(rgamma(1,10,1))
for (j in 1:num_attacks_cloud){
average_duration = runif(1,3.6,4.8) # v
dispersion = runif(1,0.8,1.2) # v/mu
individual_attack_duration <- round(rgamma(1,average_duration,dispersion))
cloud_duration = cloud_duration + individual_attack_duration
}
ddos_duration = ddos_duration - cloud_duration
if (ddos_duration<0){ddos_duration <- 0}
}
}
ddos_duration
}
cloudReduction <- function(cloud,ddos_duration){
ddos_duration <- ddos_duration
cloud_duration <- 0
num_attacks_cloud = round(rgamma(1,10,1))
if (cloud == 0){ddos_duration}
else{
if (cloud==1){
for (j in 1:num_attacks_cloud){
average_duration = runif(1,3.6,4.8) # v
dispersion = runif(1,0.8,1.2) # v/mu
individual_attack_duration <- round(rgamma(1,average_duration,dispersion))
cloud_duration = cloud_duration + individual_attack_duration
}
ddos_duration = ddos_duration - cloud_duration
}
}
if (ddos_duration<0){
ddos_duration = 0
}
ddos_duration
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fmtcatch_handle_indir.R
\name{make_param}
\alias{make_param}
\title{Check input directory name}
\usage{
make_param(datdir, year_ad, month, pref_name)
}
\arguments{
\item{datdir}{<- "../data/"}
\item{year_ad}{<- 2018 # Not fiscal}
\item{month}{<- 10}
\item{pref_name}{<- "kagoshima"}
}
\value{
String of input directory
}
\description{
Check input directory name
}
\examples{
check_dirname("../data/", 2018, 10, "kagoshima")
}
| /_bk/fmtcatch/man/make_param.Rd | permissive | gitter-badger/gyokaikyor | R | false | true | 508 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fmtcatch_handle_indir.R
\name{make_param}
\alias{make_param}
\title{Check input directory name}
\usage{
make_param(datdir, year_ad, month, pref_name)
}
\arguments{
\item{datdir}{<- "../data/"}
\item{year_ad}{<- 2018 # Not fiscal}
\item{month}{<- 10}
\item{pref_name}{<- "kagoshima"}
}
\value{
String of input directory
}
\description{
Check input directory name
}
\examples{
check_dirname("../data/", 2018, 10, "kagoshima")
}
|
library(swirl)
install_course_github('johnsanterre','All_The_Quizzes')
swirl() | /Admin/Quizzes.r | no_license | lancedacy/6306 | R | false | false | 78 | r | library(swirl)
install_course_github('johnsanterre','All_The_Quizzes')
swirl() |
dropDownNavPage(tableName = tableName) | /inst/UI/newData/ui.R | no_license | mpio-be/DataEntry | R | false | false | 39 | r |
dropDownNavPage(tableName = tableName) |
### load libraries
library(tidyverse)
library(scales)
library(spatstat)
library(geosphere)
library(colorspace)
library(patchwork)
library(brms)
library(ggridges)
### load output
# modelling results -- can either download or create using 'miseq/scripts/geoextent_brm_host_modelfit.R' (run before proceeding, if needed)
if(!file.exists('miseq')) dir.create('miseq')
if(!file.exists('miseq/output')) dir.create('miseq/output')
if(!file.exists('miseq/output/workspace')) dir.create('miseq/output/workspace')
if(!file.exists('miseq/output/workspace/geoextent_brm_host.RData')) {
download.file('https://cloudstor.aarnet.edu.au/plus/s/c7kamHx0RNM6pCd/download',
'miseq/output/workspace/geoextent_brm_host.RData')
}
load('miseq/output/workspace/geoextent_brm_host.RData')
### analysis of extent
# compare models -- not much difference, go with simplest model
loo_compare(fit1.ext.loo, fit2.ext.loo, fit3.ext.loo, fit4.ext.loo)
loo_compare(fit1.ext.loo, fit2.ext.loo)
# model summary
summary(fit2.ext, prob=0.95)
# population-level effects
plot(fit2.ext, pars = "^b_")
# extent greater for host.assoc == 'yes'
hypothesis(fit2.ext, "host.assocyes > 0", class = "b")
# slope greater for host.assoc == 'yes'
hypothesis(fit2.ext, "log10.sporeVolume.cent:host.assocyes + log10.sporeVolume.cent > log10.sporeVolume.cent", class = "b")
# negative slope for host.assoc == 'no'
hypothesis(fit2.ext, "log10.sporeVolume.cent < 0", class = "b")
# negative slope for host.assoc == 'yes'
hypothesis(fit2.ext, "log10.sporeVolume.cent:host.assocyes + log10.sporeVolume.cent < 0", class = "b")
### analysis of area
# compare models -- not much difference, go with simplest model
loo_compare(fit1.area.loo, fit2.area.loo, fit3.area.loo, fit4.area.loo)
loo_compare(fit1.area.loo, fit2.area.loo)
# model summary
summary(fit2.area, prob=0.95)
# population-level effects
plot(fit2.area, pars = "^b_")
# area greater for host.assoc == 'yes'
hypothesis(fit2.area, "host.assocyes > 0", class = "b")
# slope greater for host.assoc == 'yes'
hypothesis(fit2.area, "log10.sporeVolume.cent:host.assocyes + log10.sporeVolume.cent > log10.sporeVolume.cent", class = "b")
# negative slope for host.assoc == 'no'
hypothesis(fit2.area, "log10.sporeVolume.cent < 0", class = "b")
# negative slope for host.assoc == 'yes'
hypothesis(fit2.area, "log10.sporeVolume.cent:host.assocyes + log10.sporeVolume.cent < 0", class = "b")
### plotting output
### confidence bands for slope
# ext
slp.n <- c(attr(fit2.ext$fit, 'sim')$samples[[1]][['b_log10.sporeVolume.cent']][1001:2000],
attr(fit2.ext$fit, 'sim')$samples[[2]][['b_log10.sporeVolume.cent']][1001:2000],
attr(fit2.ext$fit, 'sim')$samples[[3]][['b_log10.sporeVolume.cent']][1001:2000],
attr(fit2.ext$fit, 'sim')$samples[[4]][['b_log10.sporeVolume.cent']][1001:2000])
conf.95.n <- slp.n > quantile(slp.n, prob=0.025) & slp.n < quantile(slp.n, prob=0.975)
conf.50.n <- slp.n > quantile(slp.n, prob=0.25) & slp.n < quantile(slp.n, prob=0.75)
slp.y <- c(attr(fit2.ext$fit, 'sim')$samples[[1]][['b_log10.sporeVolume.cent:host.assocyes']][1001:2000],
attr(fit2.ext$fit, 'sim')$samples[[2]][['b_log10.sporeVolume.cent:host.assocyes']][1001:2000],
attr(fit2.ext$fit, 'sim')$samples[[3]][['b_log10.sporeVolume.cent:host.assocyes']][1001:2000],
attr(fit2.ext$fit, 'sim')$samples[[4]][['b_log10.sporeVolume.cent:host.assocyes']][1001:2000])
conf.95.y <- slp.y > quantile(slp.y, prob=0.025) & slp.y < quantile(slp.y, prob=0.975)
conf.50.y <- slp.y > quantile(slp.y, prob=0.25) & slp.y < quantile(slp.y, prob=0.75)
slp.e <- bind_rows(data.frame(host.assoc='no', response='extent', slope=slp.n, conf.50=conf.50.n, conf.95=conf.95.n),
data.frame(host.assoc='yes', response='extent', slope=slp.n+slp.y, conf.50=conf.50.y, conf.95=conf.95.y))
rm(slp.n, slp.y, conf.95.n, conf.50.n, conf.95.y, conf.50.y)
# area
slp.n <- c(attr(fit2.area$fit, 'sim')$samples[[1]][['b_log10.sporeVolume.cent']][1001:2000],
attr(fit2.area$fit, 'sim')$samples[[2]][['b_log10.sporeVolume.cent']][1001:2000],
attr(fit2.area$fit, 'sim')$samples[[3]][['b_log10.sporeVolume.cent']][1001:2000],
attr(fit2.area$fit, 'sim')$samples[[4]][['b_log10.sporeVolume.cent']][1001:2000])
conf.95.n <- slp.n > quantile(slp.n, prob=0.025) & slp.n < quantile(slp.n, prob=0.975)
conf.50.n <- slp.n > quantile(slp.n, prob=0.25) & slp.n < quantile(slp.n, prob=0.75)
slp.y <- c(attr(fit2.area$fit, 'sim')$samples[[1]][['b_log10.sporeVolume.cent:host.assocyes']][1001:2000],
attr(fit2.area$fit, 'sim')$samples[[2]][['b_log10.sporeVolume.cent:host.assocyes']][1001:2000],
attr(fit2.area$fit, 'sim')$samples[[3]][['b_log10.sporeVolume.cent:host.assocyes']][1001:2000],
attr(fit2.area$fit, 'sim')$samples[[4]][['b_log10.sporeVolume.cent:host.assocyes']][1001:2000])
conf.95.y <- slp.y > quantile(slp.y, prob=0.025) & slp.y < quantile(slp.y, prob=0.975)
conf.50.y <- slp.y > quantile(slp.y, prob=0.25) & slp.y < quantile(slp.y, prob=0.75)
slp.a <- bind_rows(data.frame(host.assoc='no', response='area', slope=slp.n, conf.50=conf.50.n, conf.95=conf.95.n),
data.frame(host.assoc='yes', response='area', slope=slp.n+slp.y, conf.50=conf.50.y, conf.95=conf.95.y))
rm(slp.n, slp.y, conf.95.n, conf.50.n, conf.95.y, conf.50.y)
# join
slp <- bind_rows(slp.e, slp.a)
slp %>% group_by(host.assoc, response) %>% summarise(mean=mean(slope))
### plot
labs <- c(area = 'Range area', extent = 'Maximum distance')
# extent and area by volume
temp %>%
select(Species, order, log10.sporeVolume, host.assoc, geo_extent_prop, geo_area_prop) %>%
pivot_longer(cols=ends_with('prop'), names_to='response', values_to='value') %>%
ggplot(aes(x=log10.sporeVolume, y=car::logit(value), colour=host.assoc)) +
geom_point(alpha=0.5, shape=16) +
stat_smooth(method='lm', alpha=0.5) +
labs(x='Spore volume (um^3, log_10)', y='Geographic extent (relative to maximum distance/area, logit)') +
facet_grid(rows=vars(response), scales='free_y') +
scale_colour_manual(name='', values=c('black', 'orange'),
labels=c('free-living', 'host-associated')) +
theme(legend.position='none',
strip.background = element_blank(),
strip.text.y = element_blank()) -> p1
ggplot(slp, aes(x=slope, colour=host.assoc, fill=host.assoc)) +
geom_density(alpha=0.5) +
geom_vline(xintercept=0) +
labs(x='Slope estimate', y='Density') +
facet_grid(rows=vars(response), labeller=labeller(response=labs)) +
scale_colour_manual(name='', values=c('black', 'orange'),
labels=c('free-living', 'host-associated')) +
scale_fill_manual(name='', values=c('black', 'orange'),
labels=c('free-living', 'host-associated')) -> p2
p1 + p2
#
#
#
#
# ### group-level effects (with host.assoc)
#
# # ext
# grp.int <- grep('^r_host.assoc_order.+Intercept', names(attr(fit3.ext$fit, 'sim')$samples[[1]]))
# grp.slp <- grep('^r_host.assoc_order.+log10.sporeVolume.cent', names(attr(fit3.ext$fit, 'sim')$samples[[1]]))
#
# df.int <- bind_rows(as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[1]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[2]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[3]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[4]][grp.int])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_host.assoc_order'), names_to='order', values_to='response') %>%
# group_by(order) %>%
# summarise(int.mean = mean(response),
# int.l50 = quantile(response, prob=0.25),
# int.u50 = quantile(response, prob=0.75),
# int.l95 = quantile(response, prob=0.025),
# int.u95 = quantile(response, prob=0.975)) %>%
# mutate(order = gsub('r_host.assoc_order.', '', order),
# order = gsub('.Intercept.', '', order)) %>%
# separate(col=order, into=c('host.assoc', 'order'), sep='_')
#
# df.slp <- bind_rows(as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[1]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[2]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[3]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[4]][grp.slp])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_host.assoc_order'), names_to='order', values_to='response') %>%
# group_by(order) %>%
# summarise(slp.mean = mean(response),
# slp.l50 = quantile(response, prob=0.25),
# slp.u50 = quantile(response, prob=0.75),
# slp.l95 = quantile(response, prob=0.025),
# slp.u95 = quantile(response, prob=0.975)) %>%
# mutate(order = gsub('r_host.assoc_order.', '', order),
# order = gsub('.log10.sporeVolume.cent.', '', order)) %>%
# separate(col=order, into=c('host.assoc', 'order'), sep='_')
#
# df.ext <- full_join(df.int, df.slp) %>%
# mutate(response = 'distance')
# rm(df.int, df.slp)
# summary(df.ext)
#
# # area
# grp.int <- grep('^r_host.assoc_order.+Intercept', names(attr(fit3.area$fit, 'sim')$samples[[1]]))
# grp.slp <- grep('^r_host.assoc_order.+log10.sporeVolume.cent', names(attr(fit3.area$fit, 'sim')$samples[[1]]))
#
# df.int <- bind_rows(as.data.frame(attr(fit3.area$fit, 'sim')$samples[[1]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[2]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[3]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[4]][grp.int])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_host.assoc_order'), names_to='order', values_to='response') %>%
# group_by(order) %>%
# summarise(int.mean = mean(response),
# int.l50 = quantile(response, prob=0.25),
# int.u50 = quantile(response, prob=0.75),
# int.l95 = quantile(response, prob=0.025),
# int.u95 = quantile(response, prob=0.975)) %>%
# mutate(order = gsub('r_host.assoc_order.', '', order),
# order = gsub('.Intercept.', '', order)) %>%
# separate(col=order, into=c('host.assoc', 'order'), sep='_')
#
# df.slp <- bind_rows(as.data.frame(attr(fit3.area$fit, 'sim')$samples[[1]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[2]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[3]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[4]][grp.slp])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_host.assoc_order'), names_to='order', values_to='response') %>%
# group_by(order) %>%
# summarise(slp.mean = mean(response),
# slp.l50 = quantile(response, prob=0.25),
# slp.u50 = quantile(response, prob=0.75),
# slp.l95 = quantile(response, prob=0.025),
# slp.u95 = quantile(response, prob=0.975)) %>%
# mutate(order = gsub('r_host.assoc_order.', '', order),
# order = gsub('.log10.sporeVolume.cent.', '', order)) %>%
# separate(col=order, into=c('host.assoc', 'order'), sep='_')
#
# df.area <- full_join(df.int, df.slp) %>%
# mutate(response = 'area')
# rm(df.int, df.slp)
# summary(df.area)
#
# # join tables with spore size estimates
# trt <- geo.extent %>%
# filter(!is.na(order)) %>%
# group_by(Species) %>%
# slice(1) %>%
# ungroup() %>%
# group_by(order) %>%
# summarise(log10.sporeVolume.mean = mean(log10.sporeVolume),
# log10.sporeVolume.sd = sd(log10.sporeVolume)) %>%
# select(phylum, order, log10.sporeVolume.mean, log10.sporeVolume.sd)
# df <- bind_rows(df.ext, df.area) %>%
# left_join(trt)
#
# # plot
# ggplot(df, aes(x=int.mean, y=slp.mean, col=order)) +
# geom_point() +
# geom_errorbar(aes(ymin = slp.l50, ymax = slp.u50), width=0) +
# geom_errorbarh(aes(xmin = int.l50, xmax = int.u50)) +
# stat_smooth(method='lm', aes(col=NULL), colour='black') +
# facet_grid(rows=vars(response), cols=vars(host.assoc), scales='free') +
# labs(x='Intercept\n<- reduced geographical extent on average --- greater geographical extent on average ->',
# y='Slope\n<- extent decreases with spore volume --- extent increases with spore volume ->')
#
# ggplot(df, aes(x=log10.sporeVolume.mean, y=slp.mean, col=order)) +
# geom_point() +
# geom_errorbar(aes(ymin = slp.l50, ymax = slp.u50), width=0) +
# geom_errorbarh(aes(xmin = log10.sporeVolume.mean - log10.sporeVolume.sd,
# xmax = log10.sporeVolume.mean + log10.sporeVolume.sd)) +
# stat_smooth(method='lm', aes(col=NULL), colour='black') +
# facet_grid(rows=vars(response), cols=vars(host.assoc), scales='free') +
# labs(x='Mean spore volume (log10-transformed)',
# y='Slope\n<- extent decreases with spore volume --- extent increases with spore volume ->')
#
#
#
# ### group-level effects (without host.assoc)
#
# # ext
# grp.int <- grep('^r_order.+Intercept', names(attr(fit1.ext$fit, 'sim')$samples[[1]]))
# grp.slp <- grep('^r_order.+log10.sporeVolume.cent', names(attr(fit1.ext$fit, 'sim')$samples[[1]]))
#
# df.int <- bind_rows(as.data.frame(attr(fit1.ext$fit, 'sim')$samples[[1]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit1.ext$fit, 'sim')$samples[[2]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit1.ext$fit, 'sim')$samples[[3]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit1.ext$fit, 'sim')$samples[[4]][grp.int])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_order'), names_to='order', values_to='response') %>%
# group_by(order) %>%
# summarise(int.mean = mean(response),
# int.l50 = quantile(response, prob=0.25),
# int.u50 = quantile(response, prob=0.75),
# int.l95 = quantile(response, prob=0.025),
# int.u95 = quantile(response, prob=0.975)) %>%
# mutate(order = gsub('r_order.', '', order),
# order = gsub('.Intercept.', '', order))
#
# df.slp <- bind_rows(as.data.frame(attr(fit1.ext$fit, 'sim')$samples[[1]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit1.ext$fit, 'sim')$samples[[2]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit1.ext$fit, 'sim')$samples[[3]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit1.ext$fit, 'sim')$samples[[4]][grp.slp])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_order'), names_to='order', values_to='response') %>%
# group_by(order) %>%
# summarise(slp.mean = mean(response),
# slp.l50 = quantile(response, prob=0.25),
# slp.u50 = quantile(response, prob=0.75),
# slp.l95 = quantile(response, prob=0.025),
# slp.u95 = quantile(response, prob=0.975)) %>%
# mutate(order = gsub('r_order.', '', order),
# order = gsub('.log10.sporeVolume.cent.', '', order))
#
# df.ext <- full_join(df.int, df.slp) %>%
# mutate(response = 'distance')
# rm(df.int, df.slp)
# summary(df.ext)
#
# # area
# grp.int <- grep('^r_order.+Intercept', names(attr(fit1.area$fit, 'sim')$samples[[1]]))
# grp.slp <- grep('^r_order.+log10.sporeVolume.cent', names(attr(fit1.area$fit, 'sim')$samples[[1]]))
#
# df.int <- bind_rows(as.data.frame(attr(fit1.area$fit, 'sim')$samples[[1]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit1.area$fit, 'sim')$samples[[2]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit1.area$fit, 'sim')$samples[[3]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit1.area$fit, 'sim')$samples[[4]][grp.int])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_order'), names_to='order', values_to='response') %>%
# group_by(order) %>%
# summarise(int.mean = mean(response),
# int.l50 = quantile(response, prob=0.25),
# int.u50 = quantile(response, prob=0.75),
# int.l95 = quantile(response, prob=0.025),
# int.u95 = quantile(response, prob=0.975)) %>%
# mutate(order = gsub('r_order.', '', order),
# order = gsub('.Intercept.', '', order))
#
# df.slp <- bind_rows(as.data.frame(attr(fit1.area$fit, 'sim')$samples[[1]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit1.area$fit, 'sim')$samples[[2]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit1.area$fit, 'sim')$samples[[3]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit1.area$fit, 'sim')$samples[[4]][grp.slp])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_order'), names_to='order', values_to='response') %>%
# group_by(order) %>%
# summarise(slp.mean = mean(response),
# slp.l50 = quantile(response, prob=0.25),
# slp.u50 = quantile(response, prob=0.75),
# slp.l95 = quantile(response, prob=0.025),
# slp.u95 = quantile(response, prob=0.975)) %>%
# mutate(order = gsub('r_order.', '', order),
# order = gsub('.log10.sporeVolume.cent.', '', order))
#
# df.area <- full_join(df.int, df.slp) %>%
# mutate(response = 'area')
# rm(df.int, df.slp)
# summary(df.area)
#
# # join tables with spore size estimates
# trt <- geo.extent %>%
# filter(!is.na(order)) %>%
# group_by(Species) %>%
# slice(1) %>%
# ungroup() %>%
# group_by(phylum, order) %>%
# summarise(log10.sporeVolume.mean = mean(log10.sporeVolume),
# log10.sporeVolume.sd = sd(log10.sporeVolume)) %>%
# select(phylum, order, log10.sporeVolume.mean, log10.sporeVolume.sd)
# df <- bind_rows(df.ext, df.area) %>%
# left_join(trt)
#
# # plot
# ggplot(df, aes(x=int.mean, y=slp.mean, col=order)) +
# geom_point() +
# geom_errorbar(aes(ymin = slp.l50, ymax = slp.u50), width=0) +
# geom_errorbarh(aes(xmin = int.l50, xmax = int.u50)) +
# stat_smooth(method='lm', aes(col=NULL), colour='black') +
# facet_grid(rows=vars(response), scales='free') +
# labs(x='Intercept\n<- reduced geographical extent on average --- greater geographical extent on average ->',
# y='Slope\n<- extent decreases with spore volume --- extent increases with spore volume ->')
#
# ggplot(df, aes(x=log10.sporeVolume.mean, y=slp.mean, col=order)) +
# geom_point() +
# geom_errorbar(aes(ymin = slp.l50, ymax = slp.u50), width=0) +
# geom_errorbarh(aes(xmin = log10.sporeVolume.mean - log10.sporeVolume.sd,
# xmax = log10.sporeVolume.mean + log10.sporeVolume.sd)) +
# stat_smooth(method='lm', aes(col=NULL), colour='black') +
# facet_grid(rows=vars(response), cols=vars(phylum), scales='free') +
# labs(x='Mean spore volume (log10-transformed)',
# y='Slope\n<- extent decreases with spore volume --- extent increases with spore volume ->')
#
#
#
# ### group-level effects (with host.assoc) -- densities
#
# filter(temp, !is.na(geo_extent_prop)) %>%
# group_by(order, host.assoc) %>%
# summarise(n_spp = n()) %>%
# pivot_wider(names_from=host.assoc, values_from=n_spp, values_fill=0) %>%
# as.data.frame()
#
#
# ## group = order
#
# # ext
# grp.int <- grep('^r_host.assoc_order.+Intercept', names(attr(fit3.ext$fit, 'sim')$samples[[1]]))
# grp.slp <- grep('^r_host.assoc_order.+log10.sporeVolume.cent', names(attr(fit3.ext$fit, 'sim')$samples[[1]]))
#
# df.int <- bind_rows(as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[1]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[2]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[3]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[4]][grp.int])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_host.assoc_order.'), names_to='order', values_to='value') %>%
# mutate(order = gsub('r_host.assoc_order.', '', order),
# order = gsub('.Intercept.', '', order),
# estimate = 'intercept') %>%
# separate(col=order, into=c('host.assoc', 'order'), sep='_')
#
# df.slp <- bind_rows(as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[1]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[2]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[3]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[4]][grp.slp])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_host.assoc_order.'), names_to='order', values_to='value') %>%
# mutate(order = gsub('r_host.assoc_order.', '', order),
# order = gsub('.log10.sporeVolume.cent.', '', order),
# estimate = 'slope') %>%
# separate(col=order, into=c('host.assoc', 'order'), sep='_')
#
# df.ext <- bind_rows(df.int, df.slp) %>%
# mutate(response = 'distance')
# rm(df.int, df.slp)
# summary(df.ext)
#
# # area
# grp.int <- grep('^r_host.assoc_order.+Intercept', names(attr(fit3.area$fit, 'sim')$samples[[1]]))
# grp.slp <- grep('^r_host.assoc_order.+log10.sporeVolume.cent', names(attr(fit3.area$fit, 'sim')$samples[[1]]))
#
# df.int <- bind_rows(as.data.frame(attr(fit3.area$fit, 'sim')$samples[[1]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[2]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[3]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[4]][grp.int])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_host.assoc_order.'), names_to='order', values_to='value') %>%
# mutate(order = gsub('r_host.assoc_order.', '', order),
# order = gsub('.Intercept.', '', order),
# estimate = 'intercept') %>%
# separate(col=order, into=c('host.assoc', 'order'), sep='_')
#
# df.slp <- bind_rows(as.data.frame(attr(fit3.area$fit, 'sim')$samples[[1]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[2]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[3]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[4]][grp.slp])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_host.assoc_order.'), names_to='order', values_to='value') %>%
# mutate(order = gsub('r_host.assoc_order.', '', order),
# order = gsub('.log10.sporeVolume.cent.', '', order),
# estimate = 'slope') %>%
# separate(col=order, into=c('host.assoc', 'order'), sep='_')
#
# df.area <- bind_rows(df.int, df.slp) %>%
# mutate(response = 'area')
# rm(df.int, df.slp)
# summary(df.area)
#
# # join tables
# df <- bind_rows(df.ext, df.area)
#
# # summarise
# df %>%
# filter(estimate == 'slope') %>%
# group_by(host.assoc, order, response) %>%
# summarise(l95 = quantile(value, probs=0.025), u95 = quantile(value, probs=0.975)) %>%
# filter(u95 < 0 | l95 > 0)
#
# # plot
# ggplot(filter(df, estimate == 'intercept'), aes(x=value, y=order, fill=host.assoc)) +
# geom_density_ridges(alpha=0.5) +
# geom_vline(xintercept=0) +
# xlim(c(-0.5, 0.5)) +
# facet_grid(cols=vars(response))
# ggplot(filter(df, estimate == 'slope'), aes(x=value, y=order, fill=host.assoc)) +
# geom_density_ridges(alpha=0.5) +
# geom_vline(xintercept=0) +
# xlim(c(-0.15, 0.15)) +
# facet_grid(cols=vars(response))
#
#
# ## group = primer type
#
# # ext
# grp.int <- grep('^r_Primers.name.+Intercept', names(attr(fit3.ext$fit, 'sim')$samples[[1]]))
# df.ext <- bind_rows(as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[1]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[2]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[3]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[4]][grp.int])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_Primers.name.'), names_to='primers', values_to='value') %>%
# mutate(primers = gsub('r_Primers.name', '', primers),
# primers = gsub('.Intercept.', '', primers),
# response = 'distance')
# summary(df.ext)
#
# # area
# grp.int <- grep('^r_Primers.name.+Intercept', names(attr(fit3.area$fit, 'sim')$samples[[1]]))
# df.area <- bind_rows(as.data.frame(attr(fit3.area$fit, 'sim')$samples[[1]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[2]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[3]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[4]][grp.int])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_Primers.name.'), names_to='primers', values_to='value') %>%
# mutate(primers = gsub('r_Primers.name', '', primers),
# primers = gsub('.Intercept.', '', primers),
# response = 'area')
# summary(df.area)
#
# # join tables
# df <- bind_rows(df.ext, df.area)
#
# # plot
# ggplot(df, aes(x=value, y=primers, fill=area)) +
# geom_density_ridges(alpha=0.5) +
# geom_vline(xintercept=0) +
# xlim(c(-2, 2))
| /miseq/scripts/geoextent_brm_host_output.R | no_license | aguilart/SporeSizeFungalKingdom | R | false | false | 25,615 | r | ### load libraries
library(tidyverse)
library(scales)
library(spatstat)
library(geosphere)
library(colorspace)
library(patchwork)
library(brms)
library(ggridges)
### load output
# modelling results -- can either download or create using 'miseq/scripts/geoextent_brm_host_modelfit.R' (run before proceeding, if needed)
if(!file.exists('miseq')) dir.create('miseq')
if(!file.exists('miseq/output')) dir.create('miseq/output')
if(!file.exists('miseq/output/workspace')) dir.create('miseq/output/workspace')
if(!file.exists('miseq/output/workspace/geoextent_brm_host.RData')) {
download.file('https://cloudstor.aarnet.edu.au/plus/s/c7kamHx0RNM6pCd/download',
'miseq/output/workspace/geoextent_brm_host.RData')
}
load('miseq/output/workspace/geoextent_brm_host.RData')
### analysis of extent
# compare models -- not much difference, go with simplest model
loo_compare(fit1.ext.loo, fit2.ext.loo, fit3.ext.loo, fit4.ext.loo)
loo_compare(fit1.ext.loo, fit2.ext.loo)
# model summary
summary(fit2.ext, prob=0.95)
# population-level effects
plot(fit2.ext, pars = "^b_")
# extent greater for host.assoc == 'yes'
hypothesis(fit2.ext, "host.assocyes > 0", class = "b")
# slope greater for host.assoc == 'yes'
hypothesis(fit2.ext, "log10.sporeVolume.cent:host.assocyes + log10.sporeVolume.cent > log10.sporeVolume.cent", class = "b")
# negative slope for host.assoc == 'no'
hypothesis(fit2.ext, "log10.sporeVolume.cent < 0", class = "b")
# negative slope for host.assoc == 'yes'
hypothesis(fit2.ext, "log10.sporeVolume.cent:host.assocyes + log10.sporeVolume.cent < 0", class = "b")
### analysis of area
# compare models -- not much difference, go with simplest model
loo_compare(fit1.area.loo, fit2.area.loo, fit3.area.loo, fit4.area.loo)
loo_compare(fit1.area.loo, fit2.area.loo)
# model summary
summary(fit2.area, prob=0.95)
# population-level effects
plot(fit2.area, pars = "^b_")
# area greater for host.assoc == 'yes'
hypothesis(fit2.area, "host.assocyes > 0", class = "b")
# slope greater for host.assoc == 'yes'
hypothesis(fit2.area, "log10.sporeVolume.cent:host.assocyes + log10.sporeVolume.cent > log10.sporeVolume.cent", class = "b")
# negative slope for host.assoc == 'no'
hypothesis(fit2.area, "log10.sporeVolume.cent < 0", class = "b")
# negative slope for host.assoc == 'yes'
hypothesis(fit2.area, "log10.sporeVolume.cent:host.assocyes + log10.sporeVolume.cent < 0", class = "b")
### plotting output
### confidence bands for slope
# ext
slp.n <- c(attr(fit2.ext$fit, 'sim')$samples[[1]][['b_log10.sporeVolume.cent']][1001:2000],
attr(fit2.ext$fit, 'sim')$samples[[2]][['b_log10.sporeVolume.cent']][1001:2000],
attr(fit2.ext$fit, 'sim')$samples[[3]][['b_log10.sporeVolume.cent']][1001:2000],
attr(fit2.ext$fit, 'sim')$samples[[4]][['b_log10.sporeVolume.cent']][1001:2000])
conf.95.n <- slp.n > quantile(slp.n, prob=0.025) & slp.n < quantile(slp.n, prob=0.975)
conf.50.n <- slp.n > quantile(slp.n, prob=0.25) & slp.n < quantile(slp.n, prob=0.75)
slp.y <- c(attr(fit2.ext$fit, 'sim')$samples[[1]][['b_log10.sporeVolume.cent:host.assocyes']][1001:2000],
attr(fit2.ext$fit, 'sim')$samples[[2]][['b_log10.sporeVolume.cent:host.assocyes']][1001:2000],
attr(fit2.ext$fit, 'sim')$samples[[3]][['b_log10.sporeVolume.cent:host.assocyes']][1001:2000],
attr(fit2.ext$fit, 'sim')$samples[[4]][['b_log10.sporeVolume.cent:host.assocyes']][1001:2000])
conf.95.y <- slp.y > quantile(slp.y, prob=0.025) & slp.y < quantile(slp.y, prob=0.975)
conf.50.y <- slp.y > quantile(slp.y, prob=0.25) & slp.y < quantile(slp.y, prob=0.75)
slp.e <- bind_rows(data.frame(host.assoc='no', response='extent', slope=slp.n, conf.50=conf.50.n, conf.95=conf.95.n),
data.frame(host.assoc='yes', response='extent', slope=slp.n+slp.y, conf.50=conf.50.y, conf.95=conf.95.y))
rm(slp.n, slp.y, conf.95.n, conf.50.n, conf.95.y, conf.50.y)
# area
slp.n <- c(attr(fit2.area$fit, 'sim')$samples[[1]][['b_log10.sporeVolume.cent']][1001:2000],
attr(fit2.area$fit, 'sim')$samples[[2]][['b_log10.sporeVolume.cent']][1001:2000],
attr(fit2.area$fit, 'sim')$samples[[3]][['b_log10.sporeVolume.cent']][1001:2000],
attr(fit2.area$fit, 'sim')$samples[[4]][['b_log10.sporeVolume.cent']][1001:2000])
conf.95.n <- slp.n > quantile(slp.n, prob=0.025) & slp.n < quantile(slp.n, prob=0.975)
conf.50.n <- slp.n > quantile(slp.n, prob=0.25) & slp.n < quantile(slp.n, prob=0.75)
slp.y <- c(attr(fit2.area$fit, 'sim')$samples[[1]][['b_log10.sporeVolume.cent:host.assocyes']][1001:2000],
attr(fit2.area$fit, 'sim')$samples[[2]][['b_log10.sporeVolume.cent:host.assocyes']][1001:2000],
attr(fit2.area$fit, 'sim')$samples[[3]][['b_log10.sporeVolume.cent:host.assocyes']][1001:2000],
attr(fit2.area$fit, 'sim')$samples[[4]][['b_log10.sporeVolume.cent:host.assocyes']][1001:2000])
conf.95.y <- slp.y > quantile(slp.y, prob=0.025) & slp.y < quantile(slp.y, prob=0.975)
conf.50.y <- slp.y > quantile(slp.y, prob=0.25) & slp.y < quantile(slp.y, prob=0.75)
slp.a <- bind_rows(data.frame(host.assoc='no', response='area', slope=slp.n, conf.50=conf.50.n, conf.95=conf.95.n),
data.frame(host.assoc='yes', response='area', slope=slp.n+slp.y, conf.50=conf.50.y, conf.95=conf.95.y))
rm(slp.n, slp.y, conf.95.n, conf.50.n, conf.95.y, conf.50.y)
# join
slp <- bind_rows(slp.e, slp.a)
slp %>% group_by(host.assoc, response) %>% summarise(mean=mean(slope))
### plot
labs <- c(area = 'Range area', extent = 'Maximum distance')
# extent and area by volume
temp %>%
select(Species, order, log10.sporeVolume, host.assoc, geo_extent_prop, geo_area_prop) %>%
pivot_longer(cols=ends_with('prop'), names_to='response', values_to='value') %>%
ggplot(aes(x=log10.sporeVolume, y=car::logit(value), colour=host.assoc)) +
geom_point(alpha=0.5, shape=16) +
stat_smooth(method='lm', alpha=0.5) +
labs(x='Spore volume (um^3, log_10)', y='Geographic extent (relative to maximum distance/area, logit)') +
facet_grid(rows=vars(response), scales='free_y') +
scale_colour_manual(name='', values=c('black', 'orange'),
labels=c('free-living', 'host-associated')) +
theme(legend.position='none',
strip.background = element_blank(),
strip.text.y = element_blank()) -> p1
ggplot(slp, aes(x=slope, colour=host.assoc, fill=host.assoc)) +
geom_density(alpha=0.5) +
geom_vline(xintercept=0) +
labs(x='Slope estimate', y='Density') +
facet_grid(rows=vars(response), labeller=labeller(response=labs)) +
scale_colour_manual(name='', values=c('black', 'orange'),
labels=c('free-living', 'host-associated')) +
scale_fill_manual(name='', values=c('black', 'orange'),
labels=c('free-living', 'host-associated')) -> p2
p1 + p2
#
#
#
#
# ### group-level effects (with host.assoc)
#
# # ext
# grp.int <- grep('^r_host.assoc_order.+Intercept', names(attr(fit3.ext$fit, 'sim')$samples[[1]]))
# grp.slp <- grep('^r_host.assoc_order.+log10.sporeVolume.cent', names(attr(fit3.ext$fit, 'sim')$samples[[1]]))
#
# df.int <- bind_rows(as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[1]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[2]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[3]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[4]][grp.int])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_host.assoc_order'), names_to='order', values_to='response') %>%
# group_by(order) %>%
# summarise(int.mean = mean(response),
# int.l50 = quantile(response, prob=0.25),
# int.u50 = quantile(response, prob=0.75),
# int.l95 = quantile(response, prob=0.025),
# int.u95 = quantile(response, prob=0.975)) %>%
# mutate(order = gsub('r_host.assoc_order.', '', order),
# order = gsub('.Intercept.', '', order)) %>%
# separate(col=order, into=c('host.assoc', 'order'), sep='_')
#
# df.slp <- bind_rows(as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[1]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[2]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[3]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[4]][grp.slp])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_host.assoc_order'), names_to='order', values_to='response') %>%
# group_by(order) %>%
# summarise(slp.mean = mean(response),
# slp.l50 = quantile(response, prob=0.25),
# slp.u50 = quantile(response, prob=0.75),
# slp.l95 = quantile(response, prob=0.025),
# slp.u95 = quantile(response, prob=0.975)) %>%
# mutate(order = gsub('r_host.assoc_order.', '', order),
# order = gsub('.log10.sporeVolume.cent.', '', order)) %>%
# separate(col=order, into=c('host.assoc', 'order'), sep='_')
#
# df.ext <- full_join(df.int, df.slp) %>%
# mutate(response = 'distance')
# rm(df.int, df.slp)
# summary(df.ext)
#
# # area
# grp.int <- grep('^r_host.assoc_order.+Intercept', names(attr(fit3.area$fit, 'sim')$samples[[1]]))
# grp.slp <- grep('^r_host.assoc_order.+log10.sporeVolume.cent', names(attr(fit3.area$fit, 'sim')$samples[[1]]))
#
# df.int <- bind_rows(as.data.frame(attr(fit3.area$fit, 'sim')$samples[[1]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[2]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[3]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[4]][grp.int])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_host.assoc_order'), names_to='order', values_to='response') %>%
# group_by(order) %>%
# summarise(int.mean = mean(response),
# int.l50 = quantile(response, prob=0.25),
# int.u50 = quantile(response, prob=0.75),
# int.l95 = quantile(response, prob=0.025),
# int.u95 = quantile(response, prob=0.975)) %>%
# mutate(order = gsub('r_host.assoc_order.', '', order),
# order = gsub('.Intercept.', '', order)) %>%
# separate(col=order, into=c('host.assoc', 'order'), sep='_')
#
# df.slp <- bind_rows(as.data.frame(attr(fit3.area$fit, 'sim')$samples[[1]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[2]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[3]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[4]][grp.slp])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_host.assoc_order'), names_to='order', values_to='response') %>%
# group_by(order) %>%
# summarise(slp.mean = mean(response),
# slp.l50 = quantile(response, prob=0.25),
# slp.u50 = quantile(response, prob=0.75),
# slp.l95 = quantile(response, prob=0.025),
# slp.u95 = quantile(response, prob=0.975)) %>%
# mutate(order = gsub('r_host.assoc_order.', '', order),
# order = gsub('.log10.sporeVolume.cent.', '', order)) %>%
# separate(col=order, into=c('host.assoc', 'order'), sep='_')
#
# df.area <- full_join(df.int, df.slp) %>%
# mutate(response = 'area')
# rm(df.int, df.slp)
# summary(df.area)
#
# # join tables with spore size estimates
# trt <- geo.extent %>%
# filter(!is.na(order)) %>%
# group_by(Species) %>%
# slice(1) %>%
# ungroup() %>%
# group_by(order) %>%
# summarise(log10.sporeVolume.mean = mean(log10.sporeVolume),
# log10.sporeVolume.sd = sd(log10.sporeVolume)) %>%
# select(phylum, order, log10.sporeVolume.mean, log10.sporeVolume.sd)
# df <- bind_rows(df.ext, df.area) %>%
# left_join(trt)
#
# # plot
# ggplot(df, aes(x=int.mean, y=slp.mean, col=order)) +
# geom_point() +
# geom_errorbar(aes(ymin = slp.l50, ymax = slp.u50), width=0) +
# geom_errorbarh(aes(xmin = int.l50, xmax = int.u50)) +
# stat_smooth(method='lm', aes(col=NULL), colour='black') +
# facet_grid(rows=vars(response), cols=vars(host.assoc), scales='free') +
# labs(x='Intercept\n<- reduced geographical extent on average --- greater geographical extent on average ->',
# y='Slope\n<- extent decreases with spore volume --- extent increases with spore volume ->')
#
# ggplot(df, aes(x=log10.sporeVolume.mean, y=slp.mean, col=order)) +
# geom_point() +
# geom_errorbar(aes(ymin = slp.l50, ymax = slp.u50), width=0) +
# geom_errorbarh(aes(xmin = log10.sporeVolume.mean - log10.sporeVolume.sd,
# xmax = log10.sporeVolume.mean + log10.sporeVolume.sd)) +
# stat_smooth(method='lm', aes(col=NULL), colour='black') +
# facet_grid(rows=vars(response), cols=vars(host.assoc), scales='free') +
# labs(x='Mean spore volume (log10-transformed)',
# y='Slope\n<- extent decreases with spore volume --- extent increases with spore volume ->')
#
#
#
# ### group-level effects (without host.assoc)
#
# # ext
# grp.int <- grep('^r_order.+Intercept', names(attr(fit1.ext$fit, 'sim')$samples[[1]]))
# grp.slp <- grep('^r_order.+log10.sporeVolume.cent', names(attr(fit1.ext$fit, 'sim')$samples[[1]]))
#
# df.int <- bind_rows(as.data.frame(attr(fit1.ext$fit, 'sim')$samples[[1]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit1.ext$fit, 'sim')$samples[[2]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit1.ext$fit, 'sim')$samples[[3]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit1.ext$fit, 'sim')$samples[[4]][grp.int])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_order'), names_to='order', values_to='response') %>%
# group_by(order) %>%
# summarise(int.mean = mean(response),
# int.l50 = quantile(response, prob=0.25),
# int.u50 = quantile(response, prob=0.75),
# int.l95 = quantile(response, prob=0.025),
# int.u95 = quantile(response, prob=0.975)) %>%
# mutate(order = gsub('r_order.', '', order),
# order = gsub('.Intercept.', '', order))
#
# df.slp <- bind_rows(as.data.frame(attr(fit1.ext$fit, 'sim')$samples[[1]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit1.ext$fit, 'sim')$samples[[2]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit1.ext$fit, 'sim')$samples[[3]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit1.ext$fit, 'sim')$samples[[4]][grp.slp])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_order'), names_to='order', values_to='response') %>%
# group_by(order) %>%
# summarise(slp.mean = mean(response),
# slp.l50 = quantile(response, prob=0.25),
# slp.u50 = quantile(response, prob=0.75),
# slp.l95 = quantile(response, prob=0.025),
# slp.u95 = quantile(response, prob=0.975)) %>%
# mutate(order = gsub('r_order.', '', order),
# order = gsub('.log10.sporeVolume.cent.', '', order))
#
# df.ext <- full_join(df.int, df.slp) %>%
# mutate(response = 'distance')
# rm(df.int, df.slp)
# summary(df.ext)
#
# # area
# grp.int <- grep('^r_order.+Intercept', names(attr(fit1.area$fit, 'sim')$samples[[1]]))
# grp.slp <- grep('^r_order.+log10.sporeVolume.cent', names(attr(fit1.area$fit, 'sim')$samples[[1]]))
#
# df.int <- bind_rows(as.data.frame(attr(fit1.area$fit, 'sim')$samples[[1]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit1.area$fit, 'sim')$samples[[2]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit1.area$fit, 'sim')$samples[[3]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit1.area$fit, 'sim')$samples[[4]][grp.int])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_order'), names_to='order', values_to='response') %>%
# group_by(order) %>%
# summarise(int.mean = mean(response),
# int.l50 = quantile(response, prob=0.25),
# int.u50 = quantile(response, prob=0.75),
# int.l95 = quantile(response, prob=0.025),
# int.u95 = quantile(response, prob=0.975)) %>%
# mutate(order = gsub('r_order.', '', order),
# order = gsub('.Intercept.', '', order))
#
# df.slp <- bind_rows(as.data.frame(attr(fit1.area$fit, 'sim')$samples[[1]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit1.area$fit, 'sim')$samples[[2]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit1.area$fit, 'sim')$samples[[3]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit1.area$fit, 'sim')$samples[[4]][grp.slp])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_order'), names_to='order', values_to='response') %>%
# group_by(order) %>%
# summarise(slp.mean = mean(response),
# slp.l50 = quantile(response, prob=0.25),
# slp.u50 = quantile(response, prob=0.75),
# slp.l95 = quantile(response, prob=0.025),
# slp.u95 = quantile(response, prob=0.975)) %>%
# mutate(order = gsub('r_order.', '', order),
# order = gsub('.log10.sporeVolume.cent.', '', order))
#
# df.area <- full_join(df.int, df.slp) %>%
# mutate(response = 'area')
# rm(df.int, df.slp)
# summary(df.area)
#
# # join tables with spore size estimates
# trt <- geo.extent %>%
# filter(!is.na(order)) %>%
# group_by(Species) %>%
# slice(1) %>%
# ungroup() %>%
# group_by(phylum, order) %>%
# summarise(log10.sporeVolume.mean = mean(log10.sporeVolume),
# log10.sporeVolume.sd = sd(log10.sporeVolume)) %>%
# select(phylum, order, log10.sporeVolume.mean, log10.sporeVolume.sd)
# df <- bind_rows(df.ext, df.area) %>%
# left_join(trt)
#
# # plot
# ggplot(df, aes(x=int.mean, y=slp.mean, col=order)) +
# geom_point() +
# geom_errorbar(aes(ymin = slp.l50, ymax = slp.u50), width=0) +
# geom_errorbarh(aes(xmin = int.l50, xmax = int.u50)) +
# stat_smooth(method='lm', aes(col=NULL), colour='black') +
# facet_grid(rows=vars(response), scales='free') +
# labs(x='Intercept\n<- reduced geographical extent on average --- greater geographical extent on average ->',
# y='Slope\n<- extent decreases with spore volume --- extent increases with spore volume ->')
#
# ggplot(df, aes(x=log10.sporeVolume.mean, y=slp.mean, col=order)) +
# geom_point() +
# geom_errorbar(aes(ymin = slp.l50, ymax = slp.u50), width=0) +
# geom_errorbarh(aes(xmin = log10.sporeVolume.mean - log10.sporeVolume.sd,
# xmax = log10.sporeVolume.mean + log10.sporeVolume.sd)) +
# stat_smooth(method='lm', aes(col=NULL), colour='black') +
# facet_grid(rows=vars(response), cols=vars(phylum), scales='free') +
# labs(x='Mean spore volume (log10-transformed)',
# y='Slope\n<- extent decreases with spore volume --- extent increases with spore volume ->')
#
#
#
# ### group-level effects (with host.assoc) -- densities
#
# filter(temp, !is.na(geo_extent_prop)) %>%
# group_by(order, host.assoc) %>%
# summarise(n_spp = n()) %>%
# pivot_wider(names_from=host.assoc, values_from=n_spp, values_fill=0) %>%
# as.data.frame()
#
#
# ## group = order
#
# # ext
# grp.int <- grep('^r_host.assoc_order.+Intercept', names(attr(fit3.ext$fit, 'sim')$samples[[1]]))
# grp.slp <- grep('^r_host.assoc_order.+log10.sporeVolume.cent', names(attr(fit3.ext$fit, 'sim')$samples[[1]]))
#
# df.int <- bind_rows(as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[1]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[2]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[3]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[4]][grp.int])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_host.assoc_order.'), names_to='order', values_to='value') %>%
# mutate(order = gsub('r_host.assoc_order.', '', order),
# order = gsub('.Intercept.', '', order),
# estimate = 'intercept') %>%
# separate(col=order, into=c('host.assoc', 'order'), sep='_')
#
# df.slp <- bind_rows(as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[1]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[2]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[3]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[4]][grp.slp])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_host.assoc_order.'), names_to='order', values_to='value') %>%
# mutate(order = gsub('r_host.assoc_order.', '', order),
# order = gsub('.log10.sporeVolume.cent.', '', order),
# estimate = 'slope') %>%
# separate(col=order, into=c('host.assoc', 'order'), sep='_')
#
# df.ext <- bind_rows(df.int, df.slp) %>%
# mutate(response = 'distance')
# rm(df.int, df.slp)
# summary(df.ext)
#
# # area
# grp.int <- grep('^r_host.assoc_order.+Intercept', names(attr(fit3.area$fit, 'sim')$samples[[1]]))
# grp.slp <- grep('^r_host.assoc_order.+log10.sporeVolume.cent', names(attr(fit3.area$fit, 'sim')$samples[[1]]))
#
# df.int <- bind_rows(as.data.frame(attr(fit3.area$fit, 'sim')$samples[[1]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[2]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[3]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[4]][grp.int])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_host.assoc_order.'), names_to='order', values_to='value') %>%
# mutate(order = gsub('r_host.assoc_order.', '', order),
# order = gsub('.Intercept.', '', order),
# estimate = 'intercept') %>%
# separate(col=order, into=c('host.assoc', 'order'), sep='_')
#
# df.slp <- bind_rows(as.data.frame(attr(fit3.area$fit, 'sim')$samples[[1]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[2]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[3]][grp.slp])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[4]][grp.slp])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_host.assoc_order.'), names_to='order', values_to='value') %>%
# mutate(order = gsub('r_host.assoc_order.', '', order),
# order = gsub('.log10.sporeVolume.cent.', '', order),
# estimate = 'slope') %>%
# separate(col=order, into=c('host.assoc', 'order'), sep='_')
#
# df.area <- bind_rows(df.int, df.slp) %>%
# mutate(response = 'area')
# rm(df.int, df.slp)
# summary(df.area)
#
# # join tables
# df <- bind_rows(df.ext, df.area)
#
# # summarise
# df %>%
# filter(estimate == 'slope') %>%
# group_by(host.assoc, order, response) %>%
# summarise(l95 = quantile(value, probs=0.025), u95 = quantile(value, probs=0.975)) %>%
# filter(u95 < 0 | l95 > 0)
#
# # plot
# ggplot(filter(df, estimate == 'intercept'), aes(x=value, y=order, fill=host.assoc)) +
# geom_density_ridges(alpha=0.5) +
# geom_vline(xintercept=0) +
# xlim(c(-0.5, 0.5)) +
# facet_grid(cols=vars(response))
# ggplot(filter(df, estimate == 'slope'), aes(x=value, y=order, fill=host.assoc)) +
# geom_density_ridges(alpha=0.5) +
# geom_vline(xintercept=0) +
# xlim(c(-0.15, 0.15)) +
# facet_grid(cols=vars(response))
#
#
# ## group = primer type
#
# # ext
# grp.int <- grep('^r_Primers.name.+Intercept', names(attr(fit3.ext$fit, 'sim')$samples[[1]]))
# df.ext <- bind_rows(as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[1]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[2]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[3]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.ext$fit, 'sim')$samples[[4]][grp.int])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_Primers.name.'), names_to='primers', values_to='value') %>%
# mutate(primers = gsub('r_Primers.name', '', primers),
# primers = gsub('.Intercept.', '', primers),
# response = 'distance')
# summary(df.ext)
#
# # area
# grp.int <- grep('^r_Primers.name.+Intercept', names(attr(fit3.area$fit, 'sim')$samples[[1]]))
# df.area <- bind_rows(as.data.frame(attr(fit3.area$fit, 'sim')$samples[[1]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[2]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[3]][grp.int])[1001:2000, ],
# as.data.frame(attr(fit3.area$fit, 'sim')$samples[[4]][grp.int])[1001:2000, ]) %>%
# pivot_longer(cols=starts_with('r_Primers.name.'), names_to='primers', values_to='value') %>%
# mutate(primers = gsub('r_Primers.name', '', primers),
# primers = gsub('.Intercept.', '', primers),
# response = 'area')
# summary(df.area)
#
# # join tables
# df <- bind_rows(df.ext, df.area)
#
# # plot
# ggplot(df, aes(x=value, y=primers, fill=area)) +
# geom_density_ridges(alpha=0.5) +
# geom_vline(xintercept=0) +
# xlim(c(-2, 2))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pssmooth.R
\name{riskCurve}
\alias{riskCurve}
\title{Estimation of Conditional Clinical Endpoint Risk under Placebo and Treatment Given Biomarker Response to Treatment in a Baseline Surrogate Measure Three-Phase Sampling Design}
\usage{
riskCurve(
formula,
bsm,
tx,
data,
pstype = c("continuous", "ordered"),
bsmtype = c("continuous", "ordered"),
bwtype = c("fixed", "generalized_nn", "adaptive_nn"),
hinge = FALSE,
weights = NULL,
psGrid = NULL,
saveFile = NULL,
saveDir = NULL
)
}
\arguments{
\item{formula}{a formula object with the binary clinical endpoint on the left of the \code{~} operator. The first listed variable on the right must be the biomarker response
at \eqn{t0} and all variables that follow, if any, are discrete baseline covariates specified in all fitted models that condition on them. Interactions and transformations
of the baseline covariates are allowed. All terms in the formula must be evaluable in the data frame \code{data}.}
\item{bsm}{a character string specifying the variable name in \code{data} representing the baseline surrogate measure}
\item{tx}{a character string specifying the variable name in \code{data} representing the treatment group indicator}
\item{data}{a data frame with one row per randomized participant endpoint-free at \eqn{t_0} that contains at least the variables specified in \code{formula}, \code{bsm} and
\code{tx}. Values of \code{bsm} and the biomarker at \eqn{t_0} that are unavailable are represented as \code{NA}.}
\item{pstype}{a character string specifying whether the biomarker response shall be treated as a \code{continuous} (default) or \code{ordered} categorical variable in the
kernel density/probability estimation}
\item{bsmtype}{a character string specifying whether the baseline surrogate measure shall be treated as a \code{continuous} (default) or \code{ordered} categorical variable in the
kernel density/probability estimation}
\item{bwtype}{a character string specifying the bandwidth type for continuous variables in the kernel density estimation. The options are \code{fixed} (default) for fixed
bandwidths, \code{generalized_nn} for generalized nearest neighbors, and \code{adaptive_nn} for adaptive nearest neighbors. As noted in the documentation of the function
\code{npcdensbw} in the \code{np} package: "Adaptive nearest-neighbor bandwidths change with each sample realization in the set when estimating the density at the point \eqn{x}.
Generalized nearest-neighbor bandwidths change with the point at which the density is estimated, \eqn{x}. Fixed bandwidths are constant over the support of \eqn{x}."}
\item{hinge}{a logical value (\code{FALSE} by default) indicating whether a hinge model (Fong et al., 2017) shall be used for modeling the effect of \eqn{S(z)} on the
clinical endpoint risk. A hinge model specifies that variability in \eqn{S(z)} below the hinge point does not associate with the clinical endpoint risk.}
\item{weights}{either a numeric vector of weights or a character string specifying the variable name in \code{data} representing weights applied to observations
in the phase 2 subset in order to make inference about the target population of all randomized participants endpoint-free at \eqn{t_0}. The weights reflect that
the case:control ratio in the phase 2 subset is different from that in the target population and are passed on to GLMs in the estimation of the hinge point.
If \code{NULL} (default), weights for cases and controls are calculated separately in each study group.}
\item{psGrid}{a numeric vector of \eqn{S(1)} values at which the conditional clinical endpoint risk in each study group is estimated. If \code{NULL} (default),
a grid of values spanning the range of observed values of the biomarker will be used.}
\item{saveFile}{a character string specifying the name of an \code{.RData} file storing the output list. If \code{NULL} (default), the output list will only be returned.}
\item{saveDir}{a character string specifying a path for the output directory. If \code{NULL} (default), the output list will only be returned; otherwise, if
\code{saveFile} is specified, the output list will also be saved as an \code{.RData} file in the specified directory.}
}
\value{
If \code{saveFile} and \code{saveDir} are both specified, the output list (named \code{oList}) is saved as an \code{.RData} file; otherwise it is returned only.
The output object (of class \code{riskCurve}) is a list with the following components:
\itemize{
\item \code{psGrid}: a numeric vector of \eqn{S(1)} values at which the conditional clinical endpoint risk is estimated in the components \code{plaRiskCurve} and
\code{txRiskCurve}
\item \code{plaRiskCurve}: a numeric vector of estimates of \eqn{P\{Y(0)=1|S(1)=s_1\}} for \eqn{s_1} in \code{psGrid}
\item \code{txRiskCurve}: a numeric vector of estimates of \eqn{P\{Y(1)=1|S(1)=s_1\}} for \eqn{s_1} in \code{psGrid}
\item \code{fOptBandwidths}: a \code{conbandwidth} object returned by the call of the function \code{npcdensbw} containing the optimal bandwidths, selected by likelihood
cross-validation, in the kernel estimation of the conditional density of \eqn{S(1)} given the baseline surrogate measure and any other specified baseline covariates
\item \code{gOptBandwidths}: a \code{conbandwidth} object returned by the call of the function \code{npcdensbw} or \code{npudensbw} containing the optimal bandwidths,
selected by likelihood cross-validation, in the kernel estimation of the conditional density of \eqn{S(0)} given any specified baseline covariates or the marginal density
of \eqn{S(0)} if no baseline covariates are specified in \code{formula}
\item \code{cpointP}: if \code{hinge=TRUE}, the estimate of the hinge point in the placebo group
\item \code{cpointT}: if \code{hinge=TRUE}, the estimate of the hinge point in the treatment group
}
}
\description{
Estimates \eqn{P\{Y(z)=1|S(1)=s_1\}}, \eqn{z=0,1}, on a grid of \eqn{s_1} values following the estimation method of Juraska, Huang, and Gilbert (2018), where \eqn{Z} is the
treatment group indicator (\eqn{Z=1}, treatment; \eqn{Z=0}, placebo), \eqn{S(z)} is a continuous or ordered categorical univariate biomarker under assignment to \eqn{Z=z}
measured at fixed time \eqn{t_0} after randomization, and \eqn{Y} is a binary clinical endpoint (\eqn{Y=1}, disease; \eqn{Y=0}, no disease) measured after \eqn{t_0}. The
estimator employs the generalized product kernel density/probability estimation method of Hall, Racine, and Li (2004) implemented in the \code{np} package. The risks
\eqn{P\{Y(z)=1|S(z)=s_1,X=x\}}, \eqn{z=0,1}, where \eqn{X} is a vector of discrete baseline covariates, are estimated by fitting inverse probability-weighted logistic regression
models using the \code{osDesign} package.
}
\examples{
n <- 500
Z <- rep(0:1, each=n/2)
S <- MASS::mvrnorm(n, mu=c(2,2,3), Sigma=matrix(c(1,0.9,0.7,0.9,1,0.7,0.7,0.7,1), nrow=3))
p <- pnorm(drop(cbind(1,Z,(1-Z)*S[,2],Z*S[,3]) \%*\% c(-1.2,0.2,-0.02,-0.2)))
Y <- sapply(p, function(risk){ rbinom(1,1,risk) })
X <- rbinom(n,1,0.5)
# delete S(1) in placebo recipients
S[Z==0,3] <- NA
# delete S(0) in treatment recipients
S[Z==1,2] <- NA
# generate the indicator of being sampled into the phase 2 subset
phase2 <- rbinom(n,1,0.4)
# delete Sb, S(0) and S(1) in controls not included in the phase 2 subset
S[Y==0 & phase2==0,] <- c(NA,NA,NA)
# delete Sb in cases not included in the phase 2 subset
S[Y==1 & phase2==0,1] <- NA
data <- data.frame(X,Z,S[,1],ifelse(Z==0,S[,2],S[,3]),Y)
colnames(data) <- c("X","Z","Sb","S","Y")
qS <- quantile(data$S, probs=c(0.05,0.95), na.rm=TRUE)
grid <- seq(qS[1], qS[2], length.out=3)
out <- riskCurve(formula=Y ~ S + factor(X), bsm="Sb", tx="Z", data=data, psGrid=grid)
\donttest{
# alternatively, to save the .RData output file (no '<-' needed):
riskCurve(formula=Y ~ S + factor(X), bsm="Sb", tx="Z", data=data, saveFile="out.RData",
saveDir="./")
}
}
\references{
Fong, Y., Huang, Y., Gilbert, P. B., and Permar, S. R. (2017), chngpt: threshold regression model estimation and inference, \emph{BMC Bioinformatics}, 18.
Hall, P., Racine, J., and Li, Q. (2004), Cross-validation and the estimation of conditional probability densities, \emph{JASA} 99(468), 1015-1026.
Juraska, M., Huang, Y., and Gilbert, P. B. (2020), Inference on treatment effect modification by biomarker response in a three-phase sampling design, Biostatistics, 21(3): 545-560, \url{https://doi.org/10.1093/biostatistics/kxy074}.
}
\seealso{
\code{\link{bootRiskCurve}}, \code{\link{summary.riskCurve}} and \code{\link{plotMCEPcurve}}
}
| /man/riskCurve.Rd | no_license | cran/pssmooth | R | false | true | 8,786 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pssmooth.R
\name{riskCurve}
\alias{riskCurve}
\title{Estimation of Conditional Clinical Endpoint Risk under Placebo and Treatment Given Biomarker Response to Treatment in a Baseline Surrogate Measure Three-Phase Sampling Design}
\usage{
riskCurve(
formula,
bsm,
tx,
data,
pstype = c("continuous", "ordered"),
bsmtype = c("continuous", "ordered"),
bwtype = c("fixed", "generalized_nn", "adaptive_nn"),
hinge = FALSE,
weights = NULL,
psGrid = NULL,
saveFile = NULL,
saveDir = NULL
)
}
\arguments{
\item{formula}{a formula object with the binary clinical endpoint on the left of the \code{~} operator. The first listed variable on the right must be the biomarker response
at \eqn{t0} and all variables that follow, if any, are discrete baseline covariates specified in all fitted models that condition on them. Interactions and transformations
of the baseline covariates are allowed. All terms in the formula must be evaluable in the data frame \code{data}.}
\item{bsm}{a character string specifying the variable name in \code{data} representing the baseline surrogate measure}
\item{tx}{a character string specifying the variable name in \code{data} representing the treatment group indicator}
\item{data}{a data frame with one row per randomized participant endpoint-free at \eqn{t_0} that contains at least the variables specified in \code{formula}, \code{bsm} and
\code{tx}. Values of \code{bsm} and the biomarker at \eqn{t_0} that are unavailable are represented as \code{NA}.}
\item{pstype}{a character string specifying whether the biomarker response shall be treated as a \code{continuous} (default) or \code{ordered} categorical variable in the
kernel density/probability estimation}
\item{bsmtype}{a character string specifying whether the baseline surrogate measure shall be treated as a \code{continuous} (default) or \code{ordered} categorical variable in the
kernel density/probability estimation}
\item{bwtype}{a character string specifying the bandwidth type for continuous variables in the kernel density estimation. The options are \code{fixed} (default) for fixed
bandwidths, \code{generalized_nn} for generalized nearest neighbors, and \code{adaptive_nn} for adaptive nearest neighbors. As noted in the documentation of the function
\code{npcdensbw} in the \code{np} package: "Adaptive nearest-neighbor bandwidths change with each sample realization in the set when estimating the density at the point \eqn{x}.
Generalized nearest-neighbor bandwidths change with the point at which the density is estimated, \eqn{x}. Fixed bandwidths are constant over the support of \eqn{x}."}
\item{hinge}{a logical value (\code{FALSE} by default) indicating whether a hinge model (Fong et al., 2017) shall be used for modeling the effect of \eqn{S(z)} on the
clinical endpoint risk. A hinge model specifies that variability in \eqn{S(z)} below the hinge point does not associate with the clinical endpoint risk.}
\item{weights}{either a numeric vector of weights or a character string specifying the variable name in \code{data} representing weights applied to observations
in the phase 2 subset in order to make inference about the target population of all randomized participants endpoint-free at \eqn{t_0}. The weights reflect that
the case:control ratio in the phase 2 subset is different from that in the target population and are passed on to GLMs in the estimation of the hinge point.
If \code{NULL} (default), weights for cases and controls are calculated separately in each study group.}
\item{psGrid}{a numeric vector of \eqn{S(1)} values at which the conditional clinical endpoint risk in each study group is estimated. If \code{NULL} (default),
a grid of values spanning the range of observed values of the biomarker will be used.}
\item{saveFile}{a character string specifying the name of an \code{.RData} file storing the output list. If \code{NULL} (default), the output list will only be returned.}
\item{saveDir}{a character string specifying a path for the output directory. If \code{NULL} (default), the output list will only be returned; otherwise, if
\code{saveFile} is specified, the output list will also be saved as an \code{.RData} file in the specified directory.}
}
\value{
If \code{saveFile} and \code{saveDir} are both specified, the output list (named \code{oList}) is saved as an \code{.RData} file; otherwise it is returned only.
The output object (of class \code{riskCurve}) is a list with the following components:
\itemize{
\item \code{psGrid}: a numeric vector of \eqn{S(1)} values at which the conditional clinical endpoint risk is estimated in the components \code{plaRiskCurve} and
\code{txRiskCurve}
\item \code{plaRiskCurve}: a numeric vector of estimates of \eqn{P\{Y(0)=1|S(1)=s_1\}} for \eqn{s_1} in \code{psGrid}
\item \code{txRiskCurve}: a numeric vector of estimates of \eqn{P\{Y(1)=1|S(1)=s_1\}} for \eqn{s_1} in \code{psGrid}
\item \code{fOptBandwidths}: a \code{conbandwidth} object returned by the call of the function \code{npcdensbw} containing the optimal bandwidths, selected by likelihood
cross-validation, in the kernel estimation of the conditional density of \eqn{S(1)} given the baseline surrogate measure and any other specified baseline covariates
\item \code{gOptBandwidths}: a \code{conbandwidth} object returned by the call of the function \code{npcdensbw} or \code{npudensbw} containing the optimal bandwidths,
selected by likelihood cross-validation, in the kernel estimation of the conditional density of \eqn{S(0)} given any specified baseline covariates or the marginal density
of \eqn{S(0)} if no baseline covariates are specified in \code{formula}
\item \code{cpointP}: if \code{hinge=TRUE}, the estimate of the hinge point in the placebo group
\item \code{cpointT}: if \code{hinge=TRUE}, the estimate of the hinge point in the treatment group
}
}
\description{
Estimates \eqn{P\{Y(z)=1|S(1)=s_1\}}, \eqn{z=0,1}, on a grid of \eqn{s_1} values following the estimation method of Juraska, Huang, and Gilbert (2018), where \eqn{Z} is the
treatment group indicator (\eqn{Z=1}, treatment; \eqn{Z=0}, placebo), \eqn{S(z)} is a continuous or ordered categorical univariate biomarker under assignment to \eqn{Z=z}
measured at fixed time \eqn{t_0} after randomization, and \eqn{Y} is a binary clinical endpoint (\eqn{Y=1}, disease; \eqn{Y=0}, no disease) measured after \eqn{t_0}. The
estimator employs the generalized product kernel density/probability estimation method of Hall, Racine, and Li (2004) implemented in the \code{np} package. The risks
\eqn{P\{Y(z)=1|S(z)=s_1,X=x\}}, \eqn{z=0,1}, where \eqn{X} is a vector of discrete baseline covariates, are estimated by fitting inverse probability-weighted logistic regression
models using the \code{osDesign} package.
}
\examples{
n <- 500
Z <- rep(0:1, each=n/2)
S <- MASS::mvrnorm(n, mu=c(2,2,3), Sigma=matrix(c(1,0.9,0.7,0.9,1,0.7,0.7,0.7,1), nrow=3))
p <- pnorm(drop(cbind(1,Z,(1-Z)*S[,2],Z*S[,3]) \%*\% c(-1.2,0.2,-0.02,-0.2)))
Y <- sapply(p, function(risk){ rbinom(1,1,risk) })
X <- rbinom(n,1,0.5)
# delete S(1) in placebo recipients
S[Z==0,3] <- NA
# delete S(0) in treatment recipients
S[Z==1,2] <- NA
# generate the indicator of being sampled into the phase 2 subset
phase2 <- rbinom(n,1,0.4)
# delete Sb, S(0) and S(1) in controls not included in the phase 2 subset
S[Y==0 & phase2==0,] <- c(NA,NA,NA)
# delete Sb in cases not included in the phase 2 subset
S[Y==1 & phase2==0,1] <- NA
data <- data.frame(X,Z,S[,1],ifelse(Z==0,S[,2],S[,3]),Y)
colnames(data) <- c("X","Z","Sb","S","Y")
qS <- quantile(data$S, probs=c(0.05,0.95), na.rm=TRUE)
grid <- seq(qS[1], qS[2], length.out=3)
out <- riskCurve(formula=Y ~ S + factor(X), bsm="Sb", tx="Z", data=data, psGrid=grid)
\donttest{
# alternatively, to save the .RData output file (no '<-' needed):
riskCurve(formula=Y ~ S + factor(X), bsm="Sb", tx="Z", data=data, saveFile="out.RData",
saveDir="./")
}
}
\references{
Fong, Y., Huang, Y., Gilbert, P. B., and Permar, S. R. (2017), chngpt: threshold regression model estimation and inference, \emph{BMC Bioinformatics}, 18.
Hall, P., Racine, J., and Li, Q. (2004), Cross-validation and the estimation of conditional probability densities, \emph{JASA} 99(468), 1015-1026.
Juraska, M., Huang, Y., and Gilbert, P. B. (2020), Inference on treatment effect modification by biomarker response in a three-phase sampling design, Biostatistics, 21(3): 545-560, \url{https://doi.org/10.1093/biostatistics/kxy074}.
}
\seealso{
\code{\link{bootRiskCurve}}, \code{\link{summary.riskCurve}} and \code{\link{plotMCEPcurve}}
}
|
#' data parse functions
#'
#' Parses AWUDS excel files into R data frames.
#'
#' @param file_path chr, path to the excel file (including file extension)
#' @param citations logical, citations were included as part of the output for Export data
#'
#' @importFrom readxl read_excel
#' @importFrom readxl excel_sheets
#' @importFrom utils packageVersion
#' @rdname parser
#'
#' @export
#'
#' @examples
#' folderPath <- system.file("extdata/excel_test", package="wateRuse")
#' exportData <- parseExport(file.path(folderPath,"Export_2010_County.xlsx"),citation=TRUE)
#' TP <- exportData[["TP"]]
#' PO <- exportData[["PO"]]
#'
#' folderPath <- system.file("extdata", package="wateRuse")
#' exportData2010 <- parseExport(file.path(folderPath,"Import_2010_County-3_0805A.xlsx"),citation=TRUE)
#' LI <- exportData2010[["LI"]]
parseExport <- function(file_path, citations = FALSE){
sheet_names <- excel_sheets(file_path)
sheet_names <- sheet_names[!(sheet_names %in% c("Methods Reference List", "Method Codes"))]
#user-specified = don't parse the metadata sheet
user <- "Dataset list" %in% sheet_names
if(user){
sheets_to_parse <- sheet_names[-which(sheet_names == "Dataset list")]
} else {
sheets_to_parse <- sheet_names
}
parsed_data <- lapply(sheets_to_parse, function(sheet, path, citations){
if(citations){
major_readxl <- packageVersion("readxl")
if(major_readxl >= "1.0.0"){
all_df <- read_excel(path, sheet, skip = 2)
} else {
all_df <- read_excel(path, sheet, skip = 1)
}
} else {
all_df <- read_excel(path, sheet)
}
# remove notes that appear at bottom of reports
notes_pattern <- "[:digit:\\)]"
which_rows_notes <- grep(notes_pattern, all_df[[1]])
if(length(which_rows_notes) != 0) {
df <- all_df[-which_rows_notes,]
metadata <- list(Notes = as.list(unname(all_df[which_rows_notes,1])))
attr(df, 'Notes') <- metadata
} else {
df <- all_df
}
df <- removeDuplicateColumns(df)
df <- removeMethodColumns(df)
df <- removeAllNARows(df)
return(df)
}, path = file_path, citations = citations)
names(parsed_data) <- sheets_to_parse
if(user){
metadata <- read_excel(file_path, sheet = which(sheet_names == "Dataset list"))
attr(parsed_data, 'Datasets') <- na.omit(metadata)
}
return(parsed_data)
}
#' @export
#' @rdname parser
#'
#' @examples
#' path <- system.file("extdata", package="wateRuse")
#' enteredData <- parseEnteredElements(file.path(path,"Entered-Data_2005.xlsx"))
parseEnteredElements <- function(file_path){
all_data <- read_excel(path = file_path, sheet = 1)
# format metadata from top of excel file
population_info <- as.character(as.vector(all_data[2,1:2]))
metadata_description <- all_data[1:5, 1]
metadata_description[2] <- paste(population_info, collapse = " ")
metadata_aging_counts <- all_data[1:5, c(15,16)]
names(metadata_aging_counts) <- c('Data Aging', 'Counts')
metadata <- list(Descriptive = metadata_description,
Aging_counts = metadata_aging_counts)
# format actual data
df <- read_excel(path = file_path, sheet = 1, skip = 7)
df <- df[, which(!is.na(names(df)))] #removing columns that are all NA
df <- removeAllNARows(df)
#rename columns that have an upstream name
names(df) <- unlist(lapply(names(df), function(orig_col_name) {
renamed_col <- switch(orig_col_name,
`Once-Through Cooling` = 'Thermoelectric: Once-Through Cooling',
`Closed-Loop Cooling` = 'Thermoelectric: Closed-Loop Cooling',
Instream = 'Hydroelectric: Instream',
Offstream = 'Hydroelectric: Offstream')
col_name <- ifelse(!is.null(renamed_col), renamed_col, orig_col_name)
return(col_name)
}))
attributes(df) <- append(attributes(df), metadata)
return(df)
}
#' @export
#' @importFrom stats complete.cases
#' @rdname parser
#'
#' @examples
#' path <- system.file("extdata", package="wateRuse")
#' compareData <- parseCompareData(file.path(path, "CompareData.xlsx"))
parseCompareData <- function(file_path){
sheet_names <- excel_sheets(file_path)
parsed_data <- lapply(sheet_names, function(sheet, path, skip){
all_df <- read_excel(path, sheet)
metadata <- na.omit(names(all_df))
#grab first occurrence of completely filled row, these are real column headers
col_names_location <- which(complete.cases(all_df))[1]
names(all_df) <- all_df[col_names_location, ]
df <- removeAllNARows(all_df)
df <- df[which(df[,1] != names(df)[1]),] # remove duplicated column names that appear throughout data
return(df)
}, path = file_path)
names(parsed_data) <- sheet_names
return(parsed_data)
}
parseDumpFiles <- function(file_path){
read.table(file_path, header = TRUE, sep = '\t',
quote = NULL, comment.char = "")
}
removeDuplicateColumns <- function(df){
duplicate_columns <- which(duplicated(names(df)))
if(length(duplicate_columns) > 0){
df <- df[, -duplicate_columns]
}
return(df)
}
removeMethodColumns <- function(df){
Mcolumns <- which(grepl("-M", names(df)))
if(length(Mcolumns) > 0){
df <- df[, -Mcolumns]
}
return(df)
}
removeAllNARows <- function(df){
col1 <- df[[1]]
noNA_df <- df[!is.na(col1),]
return(noNA_df)
}
| /R/parseDataFunctions.R | permissive | rwdudley-usgs/wateRuse | R | false | false | 5,317 | r | #' data parse functions
#'
#' Parses AWUDS excel files into R data frames.
#'
#' @param file_path chr, path to the excel file (including file extension)
#' @param citations logical, citations were included as part of the output for Export data
#'
#' @importFrom readxl read_excel
#' @importFrom readxl excel_sheets
#' @importFrom utils packageVersion
#' @rdname parser
#'
#' @export
#'
#' @examples
#' folderPath <- system.file("extdata/excel_test", package="wateRuse")
#' exportData <- parseExport(file.path(folderPath,"Export_2010_County.xlsx"),citation=TRUE)
#' TP <- exportData[["TP"]]
#' PO <- exportData[["PO"]]
#'
#' folderPath <- system.file("extdata", package="wateRuse")
#' exportData2010 <- parseExport(file.path(folderPath,"Import_2010_County-3_0805A.xlsx"),citation=TRUE)
#' LI <- exportData2010[["LI"]]
parseExport <- function(file_path, citations = FALSE){
sheet_names <- excel_sheets(file_path)
sheet_names <- sheet_names[!(sheet_names %in% c("Methods Reference List", "Method Codes"))]
#user-specified = don't parse the metadata sheet
user <- "Dataset list" %in% sheet_names
if(user){
sheets_to_parse <- sheet_names[-which(sheet_names == "Dataset list")]
} else {
sheets_to_parse <- sheet_names
}
parsed_data <- lapply(sheets_to_parse, function(sheet, path, citations){
if(citations){
major_readxl <- packageVersion("readxl")
if(major_readxl >= "1.0.0"){
all_df <- read_excel(path, sheet, skip = 2)
} else {
all_df <- read_excel(path, sheet, skip = 1)
}
} else {
all_df <- read_excel(path, sheet)
}
# remove notes that appear at bottom of reports
notes_pattern <- "[:digit:\\)]"
which_rows_notes <- grep(notes_pattern, all_df[[1]])
if(length(which_rows_notes) != 0) {
df <- all_df[-which_rows_notes,]
metadata <- list(Notes = as.list(unname(all_df[which_rows_notes,1])))
attr(df, 'Notes') <- metadata
} else {
df <- all_df
}
df <- removeDuplicateColumns(df)
df <- removeMethodColumns(df)
df <- removeAllNARows(df)
return(df)
}, path = file_path, citations = citations)
names(parsed_data) <- sheets_to_parse
if(user){
metadata <- read_excel(file_path, sheet = which(sheet_names == "Dataset list"))
attr(parsed_data, 'Datasets') <- na.omit(metadata)
}
return(parsed_data)
}
#' @export
#' @rdname parser
#'
#' @examples
#' path <- system.file("extdata", package="wateRuse")
#' enteredData <- parseEnteredElements(file.path(path,"Entered-Data_2005.xlsx"))
parseEnteredElements <- function(file_path){
all_data <- read_excel(path = file_path, sheet = 1)
# format metadata from top of excel file
population_info <- as.character(as.vector(all_data[2,1:2]))
metadata_description <- all_data[1:5, 1]
metadata_description[2] <- paste(population_info, collapse = " ")
metadata_aging_counts <- all_data[1:5, c(15,16)]
names(metadata_aging_counts) <- c('Data Aging', 'Counts')
metadata <- list(Descriptive = metadata_description,
Aging_counts = metadata_aging_counts)
# format actual data
df <- read_excel(path = file_path, sheet = 1, skip = 7)
df <- df[, which(!is.na(names(df)))] #removing columns that are all NA
df <- removeAllNARows(df)
#rename columns that have an upstream name
names(df) <- unlist(lapply(names(df), function(orig_col_name) {
renamed_col <- switch(orig_col_name,
`Once-Through Cooling` = 'Thermoelectric: Once-Through Cooling',
`Closed-Loop Cooling` = 'Thermoelectric: Closed-Loop Cooling',
Instream = 'Hydroelectric: Instream',
Offstream = 'Hydroelectric: Offstream')
col_name <- ifelse(!is.null(renamed_col), renamed_col, orig_col_name)
return(col_name)
}))
attributes(df) <- append(attributes(df), metadata)
return(df)
}
#' @export
#' @importFrom stats complete.cases
#' @rdname parser
#'
#' @examples
#' path <- system.file("extdata", package="wateRuse")
#' compareData <- parseCompareData(file.path(path, "CompareData.xlsx"))
parseCompareData <- function(file_path){
sheet_names <- excel_sheets(file_path)
parsed_data <- lapply(sheet_names, function(sheet, path, skip){
all_df <- read_excel(path, sheet)
metadata <- na.omit(names(all_df))
#grab first occurrence of completely filled row, these are real column headers
col_names_location <- which(complete.cases(all_df))[1]
names(all_df) <- all_df[col_names_location, ]
df <- removeAllNARows(all_df)
df <- df[which(df[,1] != names(df)[1]),] # remove duplicated column names that appear throughout data
return(df)
}, path = file_path)
names(parsed_data) <- sheet_names
return(parsed_data)
}
parseDumpFiles <- function(file_path){
read.table(file_path, header = TRUE, sep = '\t',
quote = NULL, comment.char = "")
}
removeDuplicateColumns <- function(df){
duplicate_columns <- which(duplicated(names(df)))
if(length(duplicate_columns) > 0){
df <- df[, -duplicate_columns]
}
return(df)
}
removeMethodColumns <- function(df){
Mcolumns <- which(grepl("-M", names(df)))
if(length(Mcolumns) > 0){
df <- df[, -Mcolumns]
}
return(df)
}
removeAllNARows <- function(df){
col1 <- df[[1]]
noNA_df <- df[!is.na(col1),]
return(noNA_df)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classifier_fns.R
\name{colNameReplace}
\alias{colNameReplace}
\title{Replace One Set of Column Names With Another}
\usage{
colNameReplace(array, name.before, name.after)
}
\arguments{
\item{name.after}{}
}
\value{
array
}
\description{
Replace One Set of Column Names With Another
}
| /man/colNameReplace.Rd | no_license | n8thangreen/STIecoPredict | R | false | true | 361 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classifier_fns.R
\name{colNameReplace}
\alias{colNameReplace}
\title{Replace One Set of Column Names With Another}
\usage{
colNameReplace(array, name.before, name.after)
}
\arguments{
\item{name.after}{}
}
\value{
array
}
\description{
Replace One Set of Column Names With Another
}
|
/20180529clusterEPCI.R | no_license | loduplo/flexOrange | R | false | false | 11,107 | r | ||
#' Abundance ratio
#'
#' Creates a vector containing the abundance ratio for each peptide
#' @param dat a data frame containing columns with median peptide abundances
#' @param group column string or number (numerator of ratio)
#' @param ctrl column string or number (demoninator of ratio)
#' defaults to "ctrl_med"
#' @keywords ratio
#' @export
#' @examples
#' df$ratio <- abun_ratio(df, "group1_med")
abun_ratio <- function (dat, group, ctrl = "ctrl_med"){
dat[, group] / dat[, ctrl]
}
| /R/abun_ratio.R | permissive | tsoleary/proteomixr | R | false | false | 492 | r | #' Abundance ratio
#'
#' Creates a vector containing the abundance ratio for each peptide
#' @param dat a data frame containing columns with median peptide abundances
#' @param group column string or number (numerator of ratio)
#' @param ctrl column string or number (demoninator of ratio)
#' defaults to "ctrl_med"
#' @keywords ratio
#' @export
#' @examples
#' df$ratio <- abun_ratio(df, "group1_med")
abun_ratio <- function (dat, group, ctrl = "ctrl_med"){
dat[, group] / dat[, ctrl]
}
|
# install.packages('keras')
# install.packages('purrr')
# install.packages('functional')
library(IsolationForest)
library(MASS)
library(caret)
library(fGarch)
library(fitdistrplus)
library(pracma)
library(BBmisc)
library(functional)
library(dplyr)
library(keras)
library(lubridate)
library(tensorflow)
Sys.sleep(5)
install_tensorflow(restart_session = FALSE)
setwd("/home/jonghyeon3/extension_AD/evaluations/data")
fn<-list.files(getwd())
#data load and preprocess
{
input = data.frame(read.csv('medium-0.1-1.csv', header=T))
normal= input[which(input$anomaly_type =="normal"),]
anomaly= input[which(input$anomaly_type !="normal"),]
normal_seq = aggregate(normal$Activity, by=list(normal$Case), FUN=paste0)
anomaly_seq = aggregate(anomaly$Activity, by=list(anomaly$Case), FUN=paste0)
delete_case= anomaly_seq[which(is.element(anomaly_seq$x , normal_seq$x)),'Group.1']
input = input[which(!is.element(input$Case, delete_case)),]
input$Event = 1:nrow(input)
input$Event = as.factor(input$Event)
one= rep(1, nrow(input))
input[,'start'] = ave(one, by= input$Case, FUN= cumsum) -1
input[which(input$start !=1),'start'] =0
}
####
#functions
{
fun_itree = function(x){
if(sum( apply(x, 2, FUN= function(x){length(unique(x))} ) > 1 )>0){
tr<-IsolationTrees(as.data.frame(x), ntree=100, nmin=5)
as<-AnomalyScore(x,tr)
return(as$outF)
}else{
as = rep(0, nrow(x))
return(as)
}
}
fun_embedding = function(ActivityID, embedding_size){
model <- keras_model_sequential()
model %>% layer_embedding(input_dim = length(unique(ActivityID))+1, output_dim = embedding_size, input_length = 1, name="embedding") %>%
layer_flatten() %>%
layer_dense(units=40, activation = "relu") %>%
layer_dense(units=10, activation = "relu") %>%
layer_dense(units=1)
model %>% compile(loss = "mse", optimizer = "sgd", metric="accuracy")
layer <- get_layer(model, "embedding")
embeddings <- data.frame(layer$get_weights()[[1]])
embeddings$ActivityID <- c("none", levels(ActivityID) )
return(embeddings)
}
fun_onehot = function(data){
if(length(levels(data$ActivityID))>1){
a<- model.matrix(~ActivityID, data = data)
A<- as.numeric(data[,2])
A[which(A!=1)] <- 0
a<- cbind(ActivityID1 = A, a[,-1])
onehot<- as.data.frame(a)
}else{
A<- as.numeric(data[,2])
A[which(A!=1)] <- 0
a<- cbind(ActivityID1 = A)
onehot<- as.data.frame(a)
}
return(onehot)
}
fun_batch_remove_TRUE = function(input, Min, start_index, Max, until, embedding_size_p, remove_threshold ){}
fun_batch_remove_FALSE = function(input, Min,start_index, Max, until, embedding_size_p ){
#prepare data
pre<-input
pre= pre[ with(pre, order(Case,timestamp)),]
one= rep(1, nrow(pre))
pre[,'start'] = ave(one, by= pre$Case, FUN= cumsum) -1
pre[which(pre$start !=1),'start'] =0
pre= pre[ with(pre, order(timestamp)),]
pre[,'Event'] = as.factor(1:nrow(pre))
pre[,'num_case'] = cumsum(pre$start)
pre[,'leverage'] = rep(-1, nrow(pre))
pre[,'t1'] = rep(0, nrow(pre))
pre[,'t2'] = rep(0, nrow(pre))
pre[,'t3'] = rep(0, nrow(pre))
pre[,'tn']= rep(0, nrow(pre))
pre[,'time'] = rep(0, nrow(pre))
event_num = nrow(pre)
case_num= length(unique(pre$Case))
start_index = start_index
last_index = nrow(pre)
leverage_start <- Sys.time()
pre2 = pre[1:start_index,]
cur_len = sum(pre2$start)
data<- pre2[,c("Case","Activity","order")]
names(data)[1:2] <- c("ID", "ActivityID")
#basic: Max should be larger than Min or equal
if(Max< (Min+1)){
Max=Min+1
}
# Max option
if(cur_len > Max ){
del_case = pre[which(pre$start==1),'Case'][1:(cur_len-Max)]
pre = pre[which(!is.element(pre$Case, del_case)),]
pre[,'num_case'] = cumsum(pre$start)
event_num = nrow(pre)
case_num= length(unique(pre$Case))
last_index = nrow(pre)
pre2 = pre2[which(!is.element(pre2$Case, del_case)),]
data<- pre2[,c("Case","Activity","order")]
names(data)[1:2] <- c("ID", "ActivityID")
cur_len = sum(pre2$start)
start_index = nrow(pre2)
last_index = nrow(pre)
}
if(start_index == last_index){
#skip
}else{
if(embedding_size_p>0){
num_act= length(unique(data$ActivityID))
embedding_size = round(num_act*embedding_size_p)
# deep embedding encoding
embeddings = fun_embedding(as.factor(data$ActivityID), embedding_size)
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
n= length(unique(data[,1]))
m = max(table(data[,1]))
data$order = as.character(data$order)
data$ID = as.character(data$ID)
all3 = merge(data, embeddings, by='ActivityID', all.x=T)
all3= all3[ with(all3, order(ID, order)),]
all3 = all3[,c("ID","ActivityID",names(all3)[(ncol(all3)-embedding_size+1):ncol(all3)])]
num_event = nrow(all3)
max<- m*(embedding_size)
c=unique(pre2[,c("Case","anomaly_type")]) #CHANGE
label = as.character(c[,2])
# prefix encoding
prefixL = as.numeric()
newdat2<- matrix(NA, nrow=num_event , ncol=max)
for(j in 1:num_event){
cut = all3[which(all3[1:j,1]== all3[j,1] ),-c(1:2)]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
newdat3 = data.frame(cbind(Case=as.character(all3[,1]), label= as.character(pre2$anomaly_type), newdat2))
x2= newdat3[which(prefixL == prefixL[start_index]),-(1:2)]
x2 = x2[,1:(prefixL[start_index]*embedding_size)]
}else{
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
# One-hot encoding
data1 <- fun_onehot(data)
newdat <- cbind(data[,1], data1)
newdat[,1] <- as.factor(newdat[,1])
n<- length(levels((newdat[,1]))) # the number of cases
m<-max(table((newdat[,1]))) # maximum trace length
num_act= ncol(newdat)-1
num_event = nrow(newdat)
max<- m*num_act
c=unique(pre2[,c("Case","anomaly_type")])
# prefix encoding
prefixL = as.numeric()
newdat2<- matrix(NA, nrow=num_event , ncol=max)
for(j in 1:num_event){
cut = newdat[which(newdat[1:j,1]== newdat[j,1] ),-1]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
act_save = names(newdat) #change 1
newdat3 = data.frame(cbind(Case=as.character(newdat[,1]), label= as.character(pre2$anomaly_type), newdat2))
x2= newdat3[which(prefixL == prefixL[start_index]),-(1:2)]
x2 = x2[,1:(prefixL[start_index]*num_act)]
}
#Caculate leverage
x= as.matrix(sapply(x2, as.numeric))
h_diag <- fun_itree(x)
pre[start_index, 'leverage'] = h_diag[length(h_diag)]
leverage_end <- Sys.time()
pre[start_index, 'time'] = (leverage_end-leverage_start)
pre[start_index, 'tn'] = (h_diag[length(h_diag)] > (mean(h_diag)+sd(h_diag)))
#Set escape option
if(until==0 | start_index+until>last_index){
until = last_index
}else{
until= start_index+until
}
#Start event steam
for(i in (start_index+1):until){ # last_index
print(paste("Start to calculate leverage score of ", i ,"-th event (total ",event_num," events)" ,sep=''))
leverage_start <- Sys.time()
pre2 = rbind(pre2, pre[i,])
cur_len = sum(pre2$start)
data<- pre2[,c("Case","Activity",'order')]
names(data)[1:2] <- c("ID", "ActivityID")
# Max option
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
if(cur_len > Max ){
del_case = pre2[which(pre2$start==1),'Case']
del_case = del_case[1:(cur_len-Max)]
del_case= del_case[which(!is.element(del_case, object_case))]
data = data[which(!is.element(data[,1], del_case)),]
pre3= pre2[which(!is.element(pre2[,1], del_case)),]
label = as.character(pre3[,c("anomaly_type")])
c=unique(pre3[,c("Case","anomaly_type")]) #revision2
}else{
label = as.character(pre2[,c("anomaly_type")])
pre3=pre2; c=unique(pre3[,c("Case","anomaly_type")]) #revision2
}
if(embedding_size_p>0){
num_act= length(unique(data$ActivityID))
embedding_size = round(num_act*embedding_size_p)
# embedding encoding
embeddings = fun_embedding( as.factor(data$ActivityID), embedding_size)
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
n= length(unique(data[,1]))
m = max(table(data[,1]))
data$order = as.character(data$order)
data$ID = as.character(data$ID)
all3 = merge(data, embeddings, by='ActivityID', all.x=T)
all3= all3[ with(all3, order(ID, order)),]
all3 = all3[,c("ID","ActivityID",names(all3)[(ncol(all3)-embedding_size+1):ncol(all3)])]
num_event = nrow(all3)
max<- m*(embedding_size)
c=unique(pre2[,c("Case","anomaly_type")]) #CHANGE
label = as.character(c[,2])
{ # update event
newdat2<- matrix(NA, nrow=num_event , ncol=max)
prefixL = as.numeric()
for(j in 1:num_event){
cut = all3[which(all3[1:j,1]== all3[j,1] ),-c(1:2)]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
}
# Max option
if(cur_len > Max ){
del_case = pre2[which(pre2$start==1),'Case'][1:(cur_len-Max)]
del_case= del_case[which(!is.element(del_case, object_case))]
pre2 = pre2[which(!is.element(all3[,1], del_case)),]
newdat2 = newdat2[which(!is.element(all3[,1], del_case)),]
label= label[which(!is.element(all3[,1], del_case))]
prefixL= prefixL[which(!is.element(all3[,1], del_case))]
all3 = all3[which(!is.element(all3[,1], del_case)),]
}
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
newdat3 <-data.frame(cbind(Case= as.character(all3[,1]), label= label, newdat2))
x2= newdat3[which(prefixL == prefixL[length(prefixL)]),-(1:2)]
x2 = x2[,1:(prefixL[length(prefixL)]*embedding_size)]
}else{
object_case = pre3$Case[nrow(pre3)] #revision2
object_event = pre3$Event[nrow(pre3)] #revision2
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
#revision2
# One-hot encoding
data1 <- fun_onehot(data)
newdat <- cbind(data[,1], data1)
newdat[,1] <- as.factor(newdat[,1])
n<- length(levels((newdat[,1]))) # the number of cases
m<-max(table((newdat[,1]))) # maximum trace length
num_act= ncol(newdat)-1
num_event = nrow(newdat)
max<- m*num_act
newdat2<- matrix(NA, nrow=n , ncol=max)
prefixL = as.numeric()
for(j in 1:n){
cut = newdat[which(newdat[,1]== c[j,1] ),-1]
save2 <- as.vector(t(cut))
prefixL[j] = sum(save2)
newdat2[j,1:length(save2)] <- save2
}
CP = prefixL[which(c[,1]== object_case)]
newdat2 = newdat2[which(prefixL >= CP),]
if( length(which(prefixL >= CP)) != 1 ){
newdat2 = newdat2[, 1:(CP*num_act)]
loc = which(c[which(prefixL >= CP),1] == object_case)
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
newdat3= cbind(c[which(prefixL >= CP),], newdat2)
act_save = names(newdat) #change 1
# newdat3 <-data.frame(cbind(Case= as.character(newdat[,1]), label= label, newdat2))
x2= newdat3[,-(1:2)]
}else{
x2= NA
}
}
#revision2
if(is.na(x2)){
pre[i, 'leverage'] = 0
}else{
#Calculate leverage
x= as.matrix(sapply(x2, as.numeric))
h_diag <- fun_itree(x)
pre[i, 'leverage'] = h_diag[loc]
}
leverage_end <- Sys.time()
print(paste("Anomaly score of", i ,"-th event = ", round( h_diag[loc],5), " (CaseID=",object_case,")" ,sep=''))
pre[i, 'time'] = (leverage_end-leverage_start)
pre[i, 'tn'] = (h_diag[loc] > (mean(h_diag)+sd(h_diag)))
}
return(pre)
}
}
fun_remove_TRUE = function(input, Min,start_index, Max, until,embedding_size_p, remove_threshold ){}
fun_remove_FALSE = function(input, Min, start_index, Max, until, embedding_size_p){}
streaming_score = function(input, Min = 100, start_index = start_index, Max = 0, until=0, batch = TRUE ,embedding_size_p = 0, remove=TRUE, remove_threshold = 0.2){
total_start <- Sys.time()
if(remove==TRUE){
if(batch==TRUE){ #
pre=fun_batch_remove_TRUE(input=input, Min=Min, start_index= start_index, Max=Max, until=until, embedding_size_p=embedding_size_p, remove_threshold=remove_threshold )
}else{
pre=fun_remove_TRUE(input=input, Min=Min, start_index= start_index, Max=Max, until=until, embedding_size_p=embedding_size_p, remove_threshold=remove_threshold )
}
}else{
if(batch==TRUE){
pre=fun_batch_remove_FALSE(input=input, Min=Min, start_index=start_index, Max=Max, until=until, embedding_size_p=embedding_size_p )
}else{
pre=fun_remove_FALSE(input=input, Min=Min,start_index=start_index, Max=Max, until=until, embedding_size_p=embedding_size_p )
}
}
total_end <- Sys.time()
print(total_end - total_start)
return(pre)
}
}
#Result
{
start_index = which(cumsum(input$start) == 101)[1]
last_index = nrow(input)
part = seq(start_index, last_index, 1000 )
part = part[-length(part)]
output_total = data.frame()
for(i in part){
output = streaming_score(input, Min=100, start_index= i, Max=1000, until = 299, batch=TRUE, remove= FALSE, embedding_size_p=0) # onehot
if(is.null(output) == 0 ){
output = output[order(output$timestamp),]
start = min(which(output$leverage >=0))
loc = which(output$leverage>=0)
output = output[loc,]
output_total = rbind(output_total, output)
}
}
setwd("/home/jonghyeon3/extension_AD/evaluations/bs2/result")
write.csv(output_total, "result_itree_medium.csv", row.names= FALSE)
}
# plot(see$leverage, ylim= c(0,1),
# col= ifelse(see$label==1 ,'red', 'black' ), cex= ifelse(see$label==1 ,1.0, 0.5), pch= ifelse(see$label==1 ,9, 1)
# , ylab= 'Anomaly score')
#
# plot(see2$leverage, ylim= c(0,1),
# col= ifelse(see2$label==1 ,'red', 'black' ), cex= ifelse(see2$label==1 ,1.0, 0.5), pch= ifelse(see2$label==1 ,9, 1)
# , ylab= 'Anomaly score')
| /bs2/Model2_medium.R | no_license | paai-lab/Online-Anomaly-Detection-Extension-2021 | R | false | false | 16,330 | r | # install.packages('keras')
# install.packages('purrr')
# install.packages('functional')
library(IsolationForest)
library(MASS)
library(caret)
library(fGarch)
library(fitdistrplus)
library(pracma)
library(BBmisc)
library(functional)
library(dplyr)
library(keras)
library(lubridate)
library(tensorflow)
Sys.sleep(5)
install_tensorflow(restart_session = FALSE)
setwd("/home/jonghyeon3/extension_AD/evaluations/data")
fn<-list.files(getwd())
#data load and preprocess
{
input = data.frame(read.csv('medium-0.1-1.csv', header=T))
normal= input[which(input$anomaly_type =="normal"),]
anomaly= input[which(input$anomaly_type !="normal"),]
normal_seq = aggregate(normal$Activity, by=list(normal$Case), FUN=paste0)
anomaly_seq = aggregate(anomaly$Activity, by=list(anomaly$Case), FUN=paste0)
delete_case= anomaly_seq[which(is.element(anomaly_seq$x , normal_seq$x)),'Group.1']
input = input[which(!is.element(input$Case, delete_case)),]
input$Event = 1:nrow(input)
input$Event = as.factor(input$Event)
one= rep(1, nrow(input))
input[,'start'] = ave(one, by= input$Case, FUN= cumsum) -1
input[which(input$start !=1),'start'] =0
}
####
#functions
{
fun_itree = function(x){
if(sum( apply(x, 2, FUN= function(x){length(unique(x))} ) > 1 )>0){
tr<-IsolationTrees(as.data.frame(x), ntree=100, nmin=5)
as<-AnomalyScore(x,tr)
return(as$outF)
}else{
as = rep(0, nrow(x))
return(as)
}
}
fun_embedding = function(ActivityID, embedding_size){
model <- keras_model_sequential()
model %>% layer_embedding(input_dim = length(unique(ActivityID))+1, output_dim = embedding_size, input_length = 1, name="embedding") %>%
layer_flatten() %>%
layer_dense(units=40, activation = "relu") %>%
layer_dense(units=10, activation = "relu") %>%
layer_dense(units=1)
model %>% compile(loss = "mse", optimizer = "sgd", metric="accuracy")
layer <- get_layer(model, "embedding")
embeddings <- data.frame(layer$get_weights()[[1]])
embeddings$ActivityID <- c("none", levels(ActivityID) )
return(embeddings)
}
fun_onehot = function(data){
if(length(levels(data$ActivityID))>1){
a<- model.matrix(~ActivityID, data = data)
A<- as.numeric(data[,2])
A[which(A!=1)] <- 0
a<- cbind(ActivityID1 = A, a[,-1])
onehot<- as.data.frame(a)
}else{
A<- as.numeric(data[,2])
A[which(A!=1)] <- 0
a<- cbind(ActivityID1 = A)
onehot<- as.data.frame(a)
}
return(onehot)
}
fun_batch_remove_TRUE = function(input, Min, start_index, Max, until, embedding_size_p, remove_threshold ){}
fun_batch_remove_FALSE = function(input, Min,start_index, Max, until, embedding_size_p ){
#prepare data
pre<-input
pre= pre[ with(pre, order(Case,timestamp)),]
one= rep(1, nrow(pre))
pre[,'start'] = ave(one, by= pre$Case, FUN= cumsum) -1
pre[which(pre$start !=1),'start'] =0
pre= pre[ with(pre, order(timestamp)),]
pre[,'Event'] = as.factor(1:nrow(pre))
pre[,'num_case'] = cumsum(pre$start)
pre[,'leverage'] = rep(-1, nrow(pre))
pre[,'t1'] = rep(0, nrow(pre))
pre[,'t2'] = rep(0, nrow(pre))
pre[,'t3'] = rep(0, nrow(pre))
pre[,'tn']= rep(0, nrow(pre))
pre[,'time'] = rep(0, nrow(pre))
event_num = nrow(pre)
case_num= length(unique(pre$Case))
start_index = start_index
last_index = nrow(pre)
leverage_start <- Sys.time()
pre2 = pre[1:start_index,]
cur_len = sum(pre2$start)
data<- pre2[,c("Case","Activity","order")]
names(data)[1:2] <- c("ID", "ActivityID")
#basic: Max should be larger than Min or equal
if(Max< (Min+1)){
Max=Min+1
}
# Max option
if(cur_len > Max ){
del_case = pre[which(pre$start==1),'Case'][1:(cur_len-Max)]
pre = pre[which(!is.element(pre$Case, del_case)),]
pre[,'num_case'] = cumsum(pre$start)
event_num = nrow(pre)
case_num= length(unique(pre$Case))
last_index = nrow(pre)
pre2 = pre2[which(!is.element(pre2$Case, del_case)),]
data<- pre2[,c("Case","Activity","order")]
names(data)[1:2] <- c("ID", "ActivityID")
cur_len = sum(pre2$start)
start_index = nrow(pre2)
last_index = nrow(pre)
}
if(start_index == last_index){
#skip
}else{
if(embedding_size_p>0){
num_act= length(unique(data$ActivityID))
embedding_size = round(num_act*embedding_size_p)
# deep embedding encoding
embeddings = fun_embedding(as.factor(data$ActivityID), embedding_size)
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
n= length(unique(data[,1]))
m = max(table(data[,1]))
data$order = as.character(data$order)
data$ID = as.character(data$ID)
all3 = merge(data, embeddings, by='ActivityID', all.x=T)
all3= all3[ with(all3, order(ID, order)),]
all3 = all3[,c("ID","ActivityID",names(all3)[(ncol(all3)-embedding_size+1):ncol(all3)])]
num_event = nrow(all3)
max<- m*(embedding_size)
c=unique(pre2[,c("Case","anomaly_type")]) #CHANGE
label = as.character(c[,2])
# prefix encoding
prefixL = as.numeric()
newdat2<- matrix(NA, nrow=num_event , ncol=max)
for(j in 1:num_event){
cut = all3[which(all3[1:j,1]== all3[j,1] ),-c(1:2)]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
newdat3 = data.frame(cbind(Case=as.character(all3[,1]), label= as.character(pre2$anomaly_type), newdat2))
x2= newdat3[which(prefixL == prefixL[start_index]),-(1:2)]
x2 = x2[,1:(prefixL[start_index]*embedding_size)]
}else{
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
# One-hot encoding
data1 <- fun_onehot(data)
newdat <- cbind(data[,1], data1)
newdat[,1] <- as.factor(newdat[,1])
n<- length(levels((newdat[,1]))) # the number of cases
m<-max(table((newdat[,1]))) # maximum trace length
num_act= ncol(newdat)-1
num_event = nrow(newdat)
max<- m*num_act
c=unique(pre2[,c("Case","anomaly_type")])
# prefix encoding
prefixL = as.numeric()
newdat2<- matrix(NA, nrow=num_event , ncol=max)
for(j in 1:num_event){
cut = newdat[which(newdat[1:j,1]== newdat[j,1] ),-1]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
act_save = names(newdat) #change 1
newdat3 = data.frame(cbind(Case=as.character(newdat[,1]), label= as.character(pre2$anomaly_type), newdat2))
x2= newdat3[which(prefixL == prefixL[start_index]),-(1:2)]
x2 = x2[,1:(prefixL[start_index]*num_act)]
}
#Caculate leverage
x= as.matrix(sapply(x2, as.numeric))
h_diag <- fun_itree(x)
pre[start_index, 'leverage'] = h_diag[length(h_diag)]
leverage_end <- Sys.time()
pre[start_index, 'time'] = (leverage_end-leverage_start)
pre[start_index, 'tn'] = (h_diag[length(h_diag)] > (mean(h_diag)+sd(h_diag)))
#Set escape option
if(until==0 | start_index+until>last_index){
until = last_index
}else{
until= start_index+until
}
#Start event steam
for(i in (start_index+1):until){ # last_index
print(paste("Start to calculate leverage score of ", i ,"-th event (total ",event_num," events)" ,sep=''))
leverage_start <- Sys.time()
pre2 = rbind(pre2, pre[i,])
cur_len = sum(pre2$start)
data<- pre2[,c("Case","Activity",'order')]
names(data)[1:2] <- c("ID", "ActivityID")
# Max option
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
if(cur_len > Max ){
del_case = pre2[which(pre2$start==1),'Case']
del_case = del_case[1:(cur_len-Max)]
del_case= del_case[which(!is.element(del_case, object_case))]
data = data[which(!is.element(data[,1], del_case)),]
pre3= pre2[which(!is.element(pre2[,1], del_case)),]
label = as.character(pre3[,c("anomaly_type")])
c=unique(pre3[,c("Case","anomaly_type")]) #revision2
}else{
label = as.character(pre2[,c("anomaly_type")])
pre3=pre2; c=unique(pre3[,c("Case","anomaly_type")]) #revision2
}
if(embedding_size_p>0){
num_act= length(unique(data$ActivityID))
embedding_size = round(num_act*embedding_size_p)
# embedding encoding
embeddings = fun_embedding( as.factor(data$ActivityID), embedding_size)
object_case = pre2$Case[nrow(pre2)]
object_event = pre2$Event[nrow(pre2)]
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
n= length(unique(data[,1]))
m = max(table(data[,1]))
data$order = as.character(data$order)
data$ID = as.character(data$ID)
all3 = merge(data, embeddings, by='ActivityID', all.x=T)
all3= all3[ with(all3, order(ID, order)),]
all3 = all3[,c("ID","ActivityID",names(all3)[(ncol(all3)-embedding_size+1):ncol(all3)])]
num_event = nrow(all3)
max<- m*(embedding_size)
c=unique(pre2[,c("Case","anomaly_type")]) #CHANGE
label = as.character(c[,2])
{ # update event
newdat2<- matrix(NA, nrow=num_event , ncol=max)
prefixL = as.numeric()
for(j in 1:num_event){
cut = all3[which(all3[1:j,1]== all3[j,1] ),-c(1:2)]
if(class(cut)=='numeric'){
prefixL[j] = 1
}else{
prefixL[j] = nrow(cut)
}
save2 <- as.vector(t(cut))
newdat2[j,1:length(save2)] <- save2
}
}
# Max option
if(cur_len > Max ){
del_case = pre2[which(pre2$start==1),'Case'][1:(cur_len-Max)]
del_case= del_case[which(!is.element(del_case, object_case))]
pre2 = pre2[which(!is.element(all3[,1], del_case)),]
newdat2 = newdat2[which(!is.element(all3[,1], del_case)),]
label= label[which(!is.element(all3[,1], del_case))]
prefixL= prefixL[which(!is.element(all3[,1], del_case))]
all3 = all3[which(!is.element(all3[,1], del_case)),]
}
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
newdat3 <-data.frame(cbind(Case= as.character(all3[,1]), label= label, newdat2))
x2= newdat3[which(prefixL == prefixL[length(prefixL)]),-(1:2)]
x2 = x2[,1:(prefixL[length(prefixL)]*embedding_size)]
}else{
object_case = pre3$Case[nrow(pre3)] #revision2
object_event = pre3$Event[nrow(pre3)] #revision2
data$ID <- as.factor(data$ID)
data$ActivityID <- as.factor(data$ActivityID)
#revision2
# One-hot encoding
data1 <- fun_onehot(data)
newdat <- cbind(data[,1], data1)
newdat[,1] <- as.factor(newdat[,1])
n<- length(levels((newdat[,1]))) # the number of cases
m<-max(table((newdat[,1]))) # maximum trace length
num_act= ncol(newdat)-1
num_event = nrow(newdat)
max<- m*num_act
newdat2<- matrix(NA, nrow=n , ncol=max)
prefixL = as.numeric()
for(j in 1:n){
cut = newdat[which(newdat[,1]== c[j,1] ),-1]
save2 <- as.vector(t(cut))
prefixL[j] = sum(save2)
newdat2[j,1:length(save2)] <- save2
}
CP = prefixL[which(c[,1]== object_case)]
newdat2 = newdat2[which(prefixL >= CP),]
if( length(which(prefixL >= CP)) != 1 ){
newdat2 = newdat2[, 1:(CP*num_act)]
loc = which(c[which(prefixL >= CP),1] == object_case)
newdat2[which(is.na(newdat2))] <- 0 # zero-padding
newdat2_save= newdat2
newdat3= cbind(c[which(prefixL >= CP),], newdat2)
act_save = names(newdat) #change 1
# newdat3 <-data.frame(cbind(Case= as.character(newdat[,1]), label= label, newdat2))
x2= newdat3[,-(1:2)]
}else{
x2= NA
}
}
#revision2
if(is.na(x2)){
pre[i, 'leverage'] = 0
}else{
#Calculate leverage
x= as.matrix(sapply(x2, as.numeric))
h_diag <- fun_itree(x)
pre[i, 'leverage'] = h_diag[loc]
}
leverage_end <- Sys.time()
print(paste("Anomaly score of", i ,"-th event = ", round( h_diag[loc],5), " (CaseID=",object_case,")" ,sep=''))
pre[i, 'time'] = (leverage_end-leverage_start)
pre[i, 'tn'] = (h_diag[loc] > (mean(h_diag)+sd(h_diag)))
}
return(pre)
}
}
fun_remove_TRUE = function(input, Min,start_index, Max, until,embedding_size_p, remove_threshold ){}
fun_remove_FALSE = function(input, Min, start_index, Max, until, embedding_size_p){}
streaming_score = function(input, Min = 100, start_index = start_index, Max = 0, until=0, batch = TRUE ,embedding_size_p = 0, remove=TRUE, remove_threshold = 0.2){
total_start <- Sys.time()
if(remove==TRUE){
if(batch==TRUE){ #
pre=fun_batch_remove_TRUE(input=input, Min=Min, start_index= start_index, Max=Max, until=until, embedding_size_p=embedding_size_p, remove_threshold=remove_threshold )
}else{
pre=fun_remove_TRUE(input=input, Min=Min, start_index= start_index, Max=Max, until=until, embedding_size_p=embedding_size_p, remove_threshold=remove_threshold )
}
}else{
if(batch==TRUE){
pre=fun_batch_remove_FALSE(input=input, Min=Min, start_index=start_index, Max=Max, until=until, embedding_size_p=embedding_size_p )
}else{
pre=fun_remove_FALSE(input=input, Min=Min,start_index=start_index, Max=Max, until=until, embedding_size_p=embedding_size_p )
}
}
total_end <- Sys.time()
print(total_end - total_start)
return(pre)
}
}
#Result
{
start_index = which(cumsum(input$start) == 101)[1]
last_index = nrow(input)
part = seq(start_index, last_index, 1000 )
part = part[-length(part)]
output_total = data.frame()
for(i in part){
output = streaming_score(input, Min=100, start_index= i, Max=1000, until = 299, batch=TRUE, remove= FALSE, embedding_size_p=0) # onehot
if(is.null(output) == 0 ){
output = output[order(output$timestamp),]
start = min(which(output$leverage >=0))
loc = which(output$leverage>=0)
output = output[loc,]
output_total = rbind(output_total, output)
}
}
setwd("/home/jonghyeon3/extension_AD/evaluations/bs2/result")
write.csv(output_total, "result_itree_medium.csv", row.names= FALSE)
}
# plot(see$leverage, ylim= c(0,1),
# col= ifelse(see$label==1 ,'red', 'black' ), cex= ifelse(see$label==1 ,1.0, 0.5), pch= ifelse(see$label==1 ,9, 1)
# , ylab= 'Anomaly score')
#
# plot(see2$leverage, ylim= c(0,1),
# col= ifelse(see2$label==1 ,'red', 'black' ), cex= ifelse(see2$label==1 ,1.0, 0.5), pch= ifelse(see2$label==1 ,9, 1)
# , ylab= 'Anomaly score')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSDA-package.R
\docType{data}
\name{Irises}
\alias{Irises}
\title{R.A. Fishers famous data on Irises}
\format{
A data frame/tibble with 150 observations on five variables
\describe{
\item{sepal_length}{sepal length (in cm)}
\item{sepal_width}{sepal width (in cm)}
\item{petal_length}{petal length (in cm)}
\item{petal_width}{petal width (in cm)}
\item{species}{a factor with levels \code{setosa}, \code{versicolor}, and \code{virginica}}
}
}
\source{
Fisher, R. A. (1936) The use of multiple measurements in taxonomic problems.
\emph{Annals of Eugenics}, \strong{7}, Part II, 179-188.
}
\usage{
Irises
}
\description{
Data for Examples 1.15 and 5.19
}
\examples{
tapply(Irises$sepal_length, Irises$species, mean)
t.test(Irises$sepal_length[Irises$species == "setosa"], conf.level = 0.99)
hist(Irises$sepal_length[Irises$species == "setosa"],
main = "Sepal length for\n Iris Setosa",
xlab = "Length (in cm)")
boxplot(sepal_length ~ species, data = Irises)
}
\references{
Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
}
\keyword{datasets}
| /man/Irises.Rd | no_license | alanarnholt/BSDA | R | false | true | 1,218 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSDA-package.R
\docType{data}
\name{Irises}
\alias{Irises}
\title{R.A. Fishers famous data on Irises}
\format{
A data frame/tibble with 150 observations on five variables
\describe{
\item{sepal_length}{sepal length (in cm)}
\item{sepal_width}{sepal width (in cm)}
\item{petal_length}{petal length (in cm)}
\item{petal_width}{petal width (in cm)}
\item{species}{a factor with levels \code{setosa}, \code{versicolor}, and \code{virginica}}
}
}
\source{
Fisher, R. A. (1936) The use of multiple measurements in taxonomic problems.
\emph{Annals of Eugenics}, \strong{7}, Part II, 179-188.
}
\usage{
Irises
}
\description{
Data for Examples 1.15 and 5.19
}
\examples{
tapply(Irises$sepal_length, Irises$species, mean)
t.test(Irises$sepal_length[Irises$species == "setosa"], conf.level = 0.99)
hist(Irises$sepal_length[Irises$species == "setosa"],
main = "Sepal length for\n Iris Setosa",
xlab = "Length (in cm)")
boxplot(sepal_length ~ species, data = Irises)
}
\references{
Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
}
\keyword{datasets}
|
rm(list=ls())
getwd()
#setwd()
source('D:/Dropbox/Dropbox/HEC/Code/exp1.1/.Rprofile')
library("forecast") ; library("xts") ; library("data.table") ; library("reshape2")
library("ggplot2")
setwd(pth.dropbox.code) ; source("./DataAdaptor/00_data_adaptor_test.R")
setwd(pth.dropbox.code) ; source("./ModelFitting/ets/ets_functions.R")
#============== DATA LOADING =====================
# get the necessary data for a specific item
spw = f_da.reg.cat.test(par.category="beer", par.periodicity="weekly")
spm = f_da.reg.cat.test(par.category="beer", par.periodicity="445")
items = spw[!is.na(IRI_KEY),as.character(unique(fc.item))]
items = spm[,as.character(unique(fc.item))]
#=============== TESTING =================
test.single = FALSE
test.multi = FALSE
if (test.single == TRUE) {
item.id=2
ssm = spm[fc.item == items[item.id]]
this.roll = f_run.item(ssm, h=3)
Err = this.roll$Err
}
if (test.multi == TRUE) {
multi.item.results =
lapply(1:2, #length(items)
function(i) { print(items[i])
ssm = spm[fc.item == items[i]]
this.roll = f_run.item(ssm, h = 3)
Err = this.roll$Err
Err[,fc.item := items[i]]
})
#saveRDS(object=rbindlist(multi.item.results),file="errors.rds")
}
library(stringr)
setwd(pth.dropbox.data) ; Err2 = readRDS("errors.rds")
Err3 = data.table(dcast(data=Err2,formula=fc.item~k,fun.aggregate=median,value.var="rae"))
Err3[,lvl := str_count( fc.item, "/")+1 ]
Err3.melt = data.table(melt(Err3,variable.name = "k", id=c("fc.item","lvl")))
qplot(data = Err3.melt,y=value, x=factor(lvl), geom="boxplot") + geom_jitter()
qplot(data = Err3.melt[lvl>1],x=value, colour=factor(lvl), geom="density")
qplot(data = Err3.melt,x=value, geom="histogram") + facet_wrap(facets=~lvl, ncol=1)
#f_summary.plots(Err)
#Err
| /ModelFitting/ets/ets_testing.R | no_license | wellermatt/exp1.1 | R | false | false | 1,917 | r |
rm(list=ls())
getwd()
#setwd()
source('D:/Dropbox/Dropbox/HEC/Code/exp1.1/.Rprofile')
library("forecast") ; library("xts") ; library("data.table") ; library("reshape2")
library("ggplot2")
setwd(pth.dropbox.code) ; source("./DataAdaptor/00_data_adaptor_test.R")
setwd(pth.dropbox.code) ; source("./ModelFitting/ets/ets_functions.R")
#============== DATA LOADING =====================
# get the necessary data for a specific item
spw = f_da.reg.cat.test(par.category="beer", par.periodicity="weekly")
spm = f_da.reg.cat.test(par.category="beer", par.periodicity="445")
items = spw[!is.na(IRI_KEY),as.character(unique(fc.item))]
items = spm[,as.character(unique(fc.item))]
#=============== TESTING =================
test.single = FALSE
test.multi = FALSE
if (test.single == TRUE) {
item.id=2
ssm = spm[fc.item == items[item.id]]
this.roll = f_run.item(ssm, h=3)
Err = this.roll$Err
}
if (test.multi == TRUE) {
multi.item.results =
lapply(1:2, #length(items)
function(i) { print(items[i])
ssm = spm[fc.item == items[i]]
this.roll = f_run.item(ssm, h = 3)
Err = this.roll$Err
Err[,fc.item := items[i]]
})
#saveRDS(object=rbindlist(multi.item.results),file="errors.rds")
}
library(stringr)
setwd(pth.dropbox.data) ; Err2 = readRDS("errors.rds")
Err3 = data.table(dcast(data=Err2,formula=fc.item~k,fun.aggregate=median,value.var="rae"))
Err3[,lvl := str_count( fc.item, "/")+1 ]
Err3.melt = data.table(melt(Err3,variable.name = "k", id=c("fc.item","lvl")))
qplot(data = Err3.melt,y=value, x=factor(lvl), geom="boxplot") + geom_jitter()
qplot(data = Err3.melt[lvl>1],x=value, colour=factor(lvl), geom="density")
qplot(data = Err3.melt,x=value, geom="histogram") + facet_wrap(facets=~lvl, ncol=1)
#f_summary.plots(Err)
#Err
|
## these 2 functions together will cache the inverse of a matrixdown
## this function exposes 4 member functions: set, get, setInverse, getInverse
## this stores the inverse of a matrix in cache
makeCacheMatrix <- function(x = matrix()) {
m <- NULL # assign inverse variable to null for later use
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
setMatrix <- function(solve) m <<- solve
getMatrix <- function() m
list(set = set
,get = get
,setMatrix = setMatrix
,getMatrix = getMatrix)
}
## returns the inverse value from cache or adds the inverse to cache and displays results
## from forums steps to check
## a <- makeCacheMatrix(matrix(c(-1, -2, 1, 1), 2,2))
## cacheSolve(a) returns the inverse of the matrix first time throw
## cacheSolve(a) returns "getting cached data" and
cacheSolve <- function(x, ...) {
t <- x$getMatrix()
if(!is.null(t)){
message("Getting cached data")
return(t)
}
data <- x$get()
t <- solve(data, ...)
x$setMatrix(t)
t
}
| /cachematrix.R | no_license | rlharmon/ProgrammingAssignment2 | R | false | false | 1,045 | r | ## these 2 functions together will cache the inverse of a matrixdown
## this function exposes 4 member functions: set, get, setInverse, getInverse
## this stores the inverse of a matrix in cache
makeCacheMatrix <- function(x = matrix()) {
m <- NULL # assign inverse variable to null for later use
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
setMatrix <- function(solve) m <<- solve
getMatrix <- function() m
list(set = set
,get = get
,setMatrix = setMatrix
,getMatrix = getMatrix)
}
## returns the inverse value from cache or adds the inverse to cache and displays results
## from forums steps to check
## a <- makeCacheMatrix(matrix(c(-1, -2, 1, 1), 2,2))
## cacheSolve(a) returns the inverse of the matrix first time throw
## cacheSolve(a) returns "getting cached data" and
cacheSolve <- function(x, ...) {
t <- x$getMatrix()
if(!is.null(t)){
message("Getting cached data")
return(t)
}
data <- x$get()
t <- solve(data, ...)
x$setMatrix(t)
t
}
|
library(compositions)
### Name: pairwiseplot
### Title: Creates a paneled plot like pairs for two different datasets.
### Aliases: pairwisePlot pairwisePlot.default
### Keywords: hplot
### ** Examples
X <- rnorm(100)
Y <- rnorm.acomp(100,acomp(c(A=1,B=1,C=1)),0.1*diag(3))+acomp(t(outer(c(0.2,0.3,0.4),X,"^")))
pairs(cbind(ilr(Y),X),panel=function(x,y,...) {points(x,y,...);abline(lm(y~x))})
pairs(cbind(balance(Y,~A/B/C),X),
panel=function(x,y,...) {points(x,y,...);abline(lm(y~x))})
pairwisePlot(balance(Y,~A/B/C),X)
pairwisePlot(X,balance(Y,~A/B/C),
panel=function(x,y,...) {plot(x,y,...);abline(lm(y~x))})
pairwisePlot(X,balance01(Y,~A/B/C))
# A function to extract a portion representation of subcompsitions
# with two elements:
subComps <- function(X,...,all=list(...)) {
X <- oneOrDataset(X)
nams <- sapply(all,function(x) paste(x[[2]],x[[3]],sep=","))
val <- sapply(all,function(x){
a = X[,match(as.character(x[[2]]),colnames(X)) ]
b = X[,match(as.character(x[[2]]),colnames(X)) ]
c = X[,match(as.character(x[[3]]),colnames(X)) ]
return(a/(b+c))
})
colnames(val)<-nams
val
}
pairwisePlot(X,subComps(Y,A~B,A~C,B~C))
## using Hydrochemical data set as illustration of mixed possibilities
data(Hydrochem)
xc = acomp(Hydrochem[,c("Ca","Mg","Na","K")])
fk = Hydrochem$River
pH = -log10(Hydrochem$H)
covars = data.frame(pH, River=fk)
pairwisePlot(clr(xc), pH)
pairwisePlot(clr(xc), pH, col=fk)
pairwisePlot(pH, ilr(xc), add.line=TRUE)
pairwisePlot(covars, ilr(xc), add.line=TRUE, line.col="magenta")
pairwisePlot(clr(xc), covars, add.robust=TRUE)
| /data/genthat_extracted_code/compositions/examples/pairwisePlot.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,655 | r | library(compositions)
### Name: pairwiseplot
### Title: Creates a paneled plot like pairs for two different datasets.
### Aliases: pairwisePlot pairwisePlot.default
### Keywords: hplot
### ** Examples
X <- rnorm(100)
Y <- rnorm.acomp(100,acomp(c(A=1,B=1,C=1)),0.1*diag(3))+acomp(t(outer(c(0.2,0.3,0.4),X,"^")))
pairs(cbind(ilr(Y),X),panel=function(x,y,...) {points(x,y,...);abline(lm(y~x))})
pairs(cbind(balance(Y,~A/B/C),X),
panel=function(x,y,...) {points(x,y,...);abline(lm(y~x))})
pairwisePlot(balance(Y,~A/B/C),X)
pairwisePlot(X,balance(Y,~A/B/C),
panel=function(x,y,...) {plot(x,y,...);abline(lm(y~x))})
pairwisePlot(X,balance01(Y,~A/B/C))
# A function to extract a portion representation of subcompsitions
# with two elements:
subComps <- function(X,...,all=list(...)) {
X <- oneOrDataset(X)
nams <- sapply(all,function(x) paste(x[[2]],x[[3]],sep=","))
val <- sapply(all,function(x){
a = X[,match(as.character(x[[2]]),colnames(X)) ]
b = X[,match(as.character(x[[2]]),colnames(X)) ]
c = X[,match(as.character(x[[3]]),colnames(X)) ]
return(a/(b+c))
})
colnames(val)<-nams
val
}
pairwisePlot(X,subComps(Y,A~B,A~C,B~C))
## using Hydrochemical data set as illustration of mixed possibilities
data(Hydrochem)
xc = acomp(Hydrochem[,c("Ca","Mg","Na","K")])
fk = Hydrochem$River
pH = -log10(Hydrochem$H)
covars = data.frame(pH, River=fk)
pairwisePlot(clr(xc), pH)
pairwisePlot(clr(xc), pH, col=fk)
pairwisePlot(pH, ilr(xc), add.line=TRUE)
pairwisePlot(covars, ilr(xc), add.line=TRUE, line.col="magenta")
pairwisePlot(clr(xc), covars, add.robust=TRUE)
|
vbar=0.05
T=280
a=4.192
b=0.02665
R=0.083145
P=R*T/(vbar-b)-a/(vbar*(vbar+b)+b*(vbar-b))
cat(P,"\n") | /co2.R | no_license | d42knight/chem160homework6 | R | false | false | 100 | r | vbar=0.05
T=280
a=4.192
b=0.02665
R=0.083145
P=R*T/(vbar-b)-a/(vbar*(vbar+b)+b*(vbar-b))
cat(P,"\n") |
# setup code block
if (!require(stringr)){install.packages("stringr")}
if (!require(udpipe)){install.packages("udpipe")}
if (!require(textrank)){install.packages("textrank")}
if (!require(lattice)){install.packages("lattice")}
if (!require(igraph)){install.packages("igraph")}
if (!require(ggraph)){install.packages("ggraph")}
if (!require(wordcloud)){install.packages("wordcloud")}
if (!require(RColorBrewer)){install.packages("RColorBrewer")}
if (!require(shiny)){install.packages("shiny")}
if (!require(shinydashboard)){install.packages("shinydashboard")}
library(stringr)
library(udpipe)
library(textrank)
library(lattice)
library(igraph)
library(ggraph)
library(ggplot2)
library(wordcloud)
library(stringr)
library(tm)
library(RColorBrewer)
library(shiny)
library(shinydashboard)
| /dependencies.r | no_license | ankitasaraf/Shiny_text_an | R | false | false | 786 | r | # setup code block
if (!require(stringr)){install.packages("stringr")}
if (!require(udpipe)){install.packages("udpipe")}
if (!require(textrank)){install.packages("textrank")}
if (!require(lattice)){install.packages("lattice")}
if (!require(igraph)){install.packages("igraph")}
if (!require(ggraph)){install.packages("ggraph")}
if (!require(wordcloud)){install.packages("wordcloud")}
if (!require(RColorBrewer)){install.packages("RColorBrewer")}
if (!require(shiny)){install.packages("shiny")}
if (!require(shinydashboard)){install.packages("shinydashboard")}
library(stringr)
library(udpipe)
library(textrank)
library(lattice)
library(igraph)
library(ggraph)
library(ggplot2)
library(wordcloud)
library(stringr)
library(tm)
library(RColorBrewer)
library(shiny)
library(shinydashboard)
|
#
# boxplot_monthly_compare_runs.R
#
#' Box-and-whisker plots of baseline and scenario simulated monthly averaged nutrient and plankton data with credible intervals derived from Monte Carlo StrathE2E runs.
#'
#' Creates a multi-panel plot comparing a range of simulated monthly averaged nutrient and plankton data from a baseline and a scenario mode run, with the distribution of credible values
#' generated by the e2e_run_mc() Monte Carlo function.
#'
#' For details of how the distribution of credible output values from StrathE2E are calculated see help(e2e_run_mc).
#'
#' The function plots a multi-panel page of box-and-whisker plots showing the medians and variability ranges (quartiles as box-and-whisker) of a range of monthly averaged nutrient and plankton data from the final year of a baseline run
#' (shown in black), alongside comparable box-and-whisker plots (shown in red) of the same measures derived from the final year of a scenario run. The credible intervals for each case need ot be generted by the Monte Carlo methodology (e2e_run_mc() function).
#'
#' Optionally the function can read an example data set for one of the two North Sea model variants supplied with the package.
#'
#' @param model1 R-list object defining the baseline model configuration compiled by the e2e_read() function.
#' @param ci.data1 Logical. If TRUE plot credible intervals around model results based on Monte Carlo simulation withteh e2e_run_mc() function, (default=FALSE).
#' @param use.saved1 Logical. If TRUE use data from a prior user-defined run held as csv files data in the current results folder, (default=FALSE).
#' @param use.example1 Logical. If TRUE use pre-computed example data from the internal North Sea model as the baseline rather than user-generated data, (default=FALSE).
#' @param results1 R-list object of baseline model output generated by the e2e_run(), (default=NULL).
#' @param model2 R-list object defining the baseline model configuration compiled by the e2e_read() function.
#' @param ci.data2 Logical. If TRUE plot credible intervals around model results based on Monte Carlo simulation withteh e2e_run_mc() function, (default=FALSE).
#' @param use.saved2 Logical. If TRUE use data from a prior user-defined run held as csv files data in the current results folder, (default=FALSE).
#' @param use.example2 Logical. If TRUE use pre-computed example data from the internal North Sea model as the scenario rather than user-generated data, (default=FALSE).
#' @param results2 R-list object of scenario model output generated by the e2e_run(), (default=NULL).
#'
#' @return Graphical display in a new graphics window.
#'
#' @noRd
#
# ---------------------------------------------------------------------
# | |
# | Authors: Mike Heath, Ian Thurlbeck |
# | Department of Mathematics and Statistics |
# | University of Strathclyde, Glasgow |
# | |
# | Date of this version: May 2020 |
# | |
# ---------------------------------------------------------------------
boxplot_monthly_compare_runs <- function(model1, ci.data1=FALSE, use.saved1=FALSE, use.example1=FALSE, results1=NULL,
model2, ci.data2=FALSE, use.saved2=FALSE, use.example2=FALSE, results2=NULL) {
start_par = par()$mfrow
on.exit(par(mfrow = start_par))
resultsdir1 <- elt(model1, "setup", "resultsdir")
model.ident1 <- elt(model1, "setup", "model.ident")
model.path1 <- elt(model1, "setup", "model.path")
model.name1 <- elt(model1, "setup", "model.name")
model.variant1 <- elt(model1, "setup", "model.variant")
resultsdir2 <- elt(model2, "setup", "resultsdir")
model.ident2 <- elt(model2, "setup", "model.ident")
model.path2 <- elt(model2, "setup", "model.path")
model.name2 <- elt(model2, "setup", "model.name")
model.variant2 <- elt(model2, "setup", "model.variant")
#Read the observed data file
#Format expected = 7 columns
#Month Variable median lower_centile upper_centile Units low_cent_value upp_cent_value Comments
#The variable names expected are:
#surface_nitrate
#deep_nitrate
#surface_ammonia
#deep_ammonia
#surface_chlorophyll
#omniv_zooplankton
#carniv_zooplankton
#larvae_susp_dep_benthos
#larvae_carn_scav_benthos
# obstargetdataset <- get.model.file(model.path, TARGET_DATA_DIR, file.pattern=MONTHLY_TARGET_DATA)
corefilename<-"CredInt_processed_monthly_mass"
monlab<-c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec")
if(ci.data1==TRUE){
if(use.example1==TRUE){
credintervaldata1 <- get.example.results(model.name1, model.variant1, corefilename, CREDINT_DIR)
}
if(use.example1==FALSE){
credpath1 <- makepath(resultsdir1, CREDINT_DIR)
credfile1 <- csvname(credpath1, corefilename, model.ident1)
if (! file.exists(credfile1)) {
message("Error: cannot find credible interval output file: ", credfile1)
stop("Please run the Monte Carlo function!\n")
}
message("Reading credible interval processed data from '", credfile1, "'")
credintervaldata1 <- readcsv(credfile1, row.names=1) # first column is row names
}
}
if(ci.data2==TRUE){
if(use.example2==TRUE){
credintervaldata2 <- get.example.results(model.name2, model.variant2, corefilename, CREDINT_DIR)
}
if(use.example2==FALSE){
credpath2 <- makepath(resultsdir2, CREDINT_DIR)
credfile2 <- csvname(credpath2, corefilename, model.ident2)
if (! file.exists(credfile2)) {
message("Error: cannot find credible interval output file: ", credfile2)
stop("Please run the Monte Carlo function!\n")
}
message("Reading credible interval processed data from '", credfile2, "'")
credintervaldata2 <- readcsv(credfile2, row.names=1) # first column is row names
}
}
if(ci.data1==FALSE){
if(use.saved1==TRUE){
datafile1 <- csvname(resultsdir1, "model_monthlyresults", model.ident1)
print(paste("Using baseline data held in a file ",datafile1," from a past model run"))
check.exists(datafile1)
modelmonthly1<-readcsv(datafile1)
}
if(use.saved1==FALSE){
modelmonthly1 <- elt(results1, "final.year.outputs", "monthly.averages")
}
}
if(ci.data2==FALSE){
if(use.saved2==TRUE){
datafile2 <- csvname(resultsdir2, "model_monthlyresults", model.ident2)
print(paste("Using scenario data held in a file ",datafile2," from a past model run"))
check.exists(datafile2)
modelmonthly2<-readcsv(datafile2)
}
if(use.saved2==FALSE){
modelmonthly2 <- elt(results2, "final.year.outputs", "monthly.averages")
}
}
# --------------------------------------------------------------------------
if(ci.data1==FALSE){
#convert modelmonthly into credintervaldata format
credintervaldata1 <- data.frame(rep(rep(NA,6*ncol(modelmonthly1))))
for(jj in 2:12){
credintervaldata1[,jj]<-rep(rep(NA,6*ncol(modelmonthly1)))
}
colnames(credintervaldata1)<-c("1","2","3","4","5","6","7","8","9","10","11","12")
for(jj in 1:ncol(modelmonthly1)){
nameset<-c(paste((names(modelmonthly1))[jj],"-maxlik",sep=""),
paste((names(modelmonthly1))[jj],"-0.005",sep=""),
paste((names(modelmonthly1))[jj],"-0.25",sep=""),
paste((names(modelmonthly1))[jj],"-0.5",sep=""),
paste((names(modelmonthly1))[jj],"-0.75",sep=""),
paste((names(modelmonthly1))[jj],"-0.995",sep="") )
if(jj==1) fullnameset<-nameset
if(jj>1) fullnameset<-c(fullnameset,nameset)
}
rownames(credintervaldata1)<-fullnameset
for(jj in 1:ncol(modelmonthly1)){
for(kk in 1:6){
credintervaldata1[(((jj-1)*6)+kk),1:12] <- as.numeric(modelmonthly1[,jj])
}
}
}
if(ci.data2==FALSE){
#convert modelmonthly into credintervaldata format
credintervaldata2 <- data.frame(rep(rep(NA,6*ncol(modelmonthly2))))
for(jj in 2:12){
credintervaldata2[,jj]<-rep(rep(NA,6*ncol(modelmonthly2)))
}
colnames(credintervaldata2)<-c("1","2","3","4","5","6","7","8","9","10","11","12")
for(jj in 1:ncol(modelmonthly2)){
nameset<-c(paste((names(modelmonthly2))[jj],"-maxlik",sep=""),
paste((names(modelmonthly2))[jj],"-0.005",sep=""),
paste((names(modelmonthly2))[jj],"-0.25",sep=""),
paste((names(modelmonthly2))[jj],"-0.5",sep=""),
paste((names(modelmonthly2))[jj],"-0.75",sep=""),
paste((names(modelmonthly2))[jj],"-0.995",sep="") )
if(jj==1) fullnameset<-nameset
if(jj>1) fullnameset<-c(fullnameset,nameset)
}
rownames(credintervaldata2)<-fullnameset
for(jj in 1:ncol(modelmonthly2)){
for(kk in 1:6){
credintervaldata2[(((jj-1)*6)+kk),1:12] <- as.numeric(modelmonthly2[,jj])
}
}
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# BASELINE MODEL DATA:
#Generate the list objects needed by the bxp plotting function
for(iii in 1:9){
credrows<- seq( ((iii-1)*(5+1))+2,((iii-1)*(5+1))+(5+1) )
modeldata2plot<-(credintervaldata1[credrows,1])
for(jj in 2:12) { modeldata2plot<-c(modeldata2plot,(credintervaldata1[credrows,jj]))}
array2plot<- array(dim=c(5,12),modeldata2plot)
bxpdata<-list(stats=array2plot,n=rep(100,12),conf=NULL,out=numeric(length=0),names=monlab)
# bxp(bxpdata,boxwex=0.3,at=seq(1,12)+0.35,xlim=c(0,13),ylim=c(0,max(modeldata2plot)*1.1))
if(iii==1) bxpdata1<-bxpdata
if(iii==2) bxpdata2<-bxpdata
if(iii==3) bxpdata3<-bxpdata
if(iii==4) bxpdata4<-bxpdata
if(iii==5) bxpdata5<-bxpdata
if(iii==6) bxpdata6<-bxpdata
if(iii==7) bxpdata7<-bxpdata
if(iii==8) bxpdata8<-bxpdata
if(iii==9) bxpdata9<-bxpdata
}
bxpdata10<-list(stats=(bxpdata9$stats+bxpdata8$stats),n=rep(100,12),conf=NULL,out=numeric(length=0),names=monlab)
#Combines the two types of benthic larvae
#Package all the bxpdata objects up into a list t pass into the plotting function
bxpdata.all.1<-list(bxpdata1=bxpdata1,
bxpdata2=bxpdata2,
bxpdata3=bxpdata3,
bxpdata4=bxpdata4,
bxpdata5=bxpdata5,
bxpdata6=bxpdata6,
bxpdata7=bxpdata7,
bxpdata8=bxpdata8,
bxpdata9=bxpdata9,
bxpdata10=bxpdata10)
# SCENARIO MODEL DATA:
#Generate the list objects needed by the bxp plotting function
for(iii in 1:9){
credrows<- seq( ((iii-1)*(5+1))+2,((iii-1)*(5+1))+(5+1) )
modeldata2plot<-(credintervaldata2[credrows,1])
for(jj in 2:12) { modeldata2plot<-c(modeldata2plot,(credintervaldata2[credrows,jj]))}
array2plot<- array(dim=c(5,12),modeldata2plot)
bxpdata<-list(stats=array2plot,n=rep(100,12),conf=NULL,out=numeric(length=0),names=monlab)
# bxp(bxpdata,boxwex=0.3,at=seq(1,12)+0.35,xlim=c(0,13),ylim=c(0,max(modeldata2plot)*1.1))
if(iii==1) bxpdata1<-bxpdata
if(iii==2) bxpdata2<-bxpdata
if(iii==3) bxpdata3<-bxpdata
if(iii==4) bxpdata4<-bxpdata
if(iii==5) bxpdata5<-bxpdata
if(iii==6) bxpdata6<-bxpdata
if(iii==7) bxpdata7<-bxpdata
if(iii==8) bxpdata8<-bxpdata
if(iii==9) bxpdata9<-bxpdata
}
bxpdata10<-list(stats=(bxpdata9$stats+bxpdata8$stats),n=rep(100,12),conf=NULL,out=numeric(length=0),names=monlab)
#Combines the two types of benthic larvae
#Package all the bxpdata objects up into a list t pass into the plotting function
bxpdata.all.2<-list(bxpdata1=bxpdata1,
bxpdata2=bxpdata2,
bxpdata3=bxpdata3,
bxpdata4=bxpdata4,
bxpdata5=bxpdata5,
bxpdata6=bxpdata6,
bxpdata7=bxpdata7,
bxpdata8=bxpdata8,
bxpdata9=bxpdata9,
bxpdata10=bxpdata10)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
plotdata_mco<-function(bxpdata.all.1, bxpdata.all.2, obspar, monlab){
if(obspar==1){
bxpdata1<-bxpdata.all.1$bxpdata1
bxpdata2<-bxpdata.all.2$bxpdata1
}
if(obspar==2){
bxpdata1<-bxpdata.all.1$bxpdata2
bxpdata2<-bxpdata.all.2$bxpdata2
}
if(obspar==3){
bxpdata1<-bxpdata.all.1$bxpdata3
bxpdata2<-bxpdata.all.2$bxpdata3
}
if(obspar==4){
bxpdata1<-bxpdata.all.1$bxpdata4
bxpdata2<-bxpdata.all.2$bxpdata4
}
if(obspar==5){
bxpdata1<-bxpdata.all.1$bxpdata5
bxpdata2<-bxpdata.all.2$bxpdata5
}
if(obspar==6){
bxpdata1<-bxpdata.all.1$bxpdata6
bxpdata2<-bxpdata.all.2$bxpdata6
}
if(obspar==7){
bxpdata1<-bxpdata.all.1$bxpdata7
bxpdata2<-bxpdata.all.2$bxpdata7
}
if(obspar==8){
bxpdata1<-bxpdata.all.1$bxpdata8
bxpdata2<-bxpdata.all.2$bxpdata8
}
if(obspar==9){
bxpdata1<-bxpdata.all.1$bxpdata9
bxpdata2<-bxpdata.all.2$bxpdata9
}
if(obspar==10){
bxpdata1<-bxpdata.all.1$bxpdata10
bxpdata2<-bxpdata.all.2$bxpdata10
}
modplot1<-bxpdata1$stats
modplot2<-bxpdata2$stats
if(obspar==1 | obspar==2) ymax<- max(0, max(as.data.frame(bxpdata.all.1$bxpdata1$stats)), max(as.data.frame(bxpdata.all.1$bxpdata2$stats)), max(as.data.frame(bxpdata.all.2$bxpdata1$stats)), max(as.data.frame(bxpdata.all.2$bxpdata2$stats)),na.rm=TRUE )
if(obspar==3 | obspar==4) ymax<- max(0, max(as.data.frame(bxpdata.all.1$bxpdata3$stats)), max(as.data.frame(bxpdata.all.1$bxpdata4$stats)), max(as.data.frame(bxpdata.all.2$bxpdata3$stats)), max(as.data.frame(bxpdata.all.2$bxpdata4$stats)),na.rm=TRUE )
if(obspar>4) ymax<- max(0, max(as.data.frame(modplot1),na.rm=TRUE),max(as.data.frame(modplot2),na.rm=TRUE),na.rm=TRUE )
if(ymax==0 | is.na(ymax)==TRUE) ymax<-1
bxp(bxpdata1,boxwex=0.25,at=1:12,yaxt="n",ylim=c(0,ymax*1.1),show.names=FALSE,las=1,cex.axis=1.1,
boxcol="black",whiskcol="black",whisklty="solid",medcol="black",staplecol="black")
axis(labels=monlab, at=seq(1,12),side=1,las=1,cex.axis=1.1,padj=-0.55)
if(obspar==1){
axis(side=2,cex.lab=1.0,las=1)
mtext("Surf.nitrate",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==2){
axis(side=2,cex.lab=1.0,las=1)
mtext("Deep nitrate",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==3){
axis(side=2,cex.lab=1.0,las=1)
mtext("Surf.ammonia",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==4){
axis(side=2,cex.lab=1.0,las=1)
mtext("Deep ammonia",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==5){
axis(side=2,cex.lab=1.0,las=1)
mtext("Chlorophyll",cex=0.8,side=2,line=4)
mtext(bquote(mg.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==6){
axis(side=2,cex.lab=1.0,las=1)
mtext("Omniv.zoo",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==7){
axis(side=2,cex.lab=1.0,las=1)
mtext("Carniv.zoo",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==8){
axis(side=2,cex.lab=1.0,las=1)
mtext("Larv.s/d.benth.",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==9){
axis(side=2,cex.lab=1.0,las=1)
mtext("Larv.c/s.benth.",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==10){
axis(side=2,cex.lab=1.0,las=1)
mtext("Benthos larvae (all)",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
bxp(bxpdata2,add=TRUE,boxwex=0.25,at=1:12+0.35,yaxt="n",xaxt="n",
boxcol="red",whiskcol="red",whisklty="solid",medcol="red",staplecol="red")
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
par(mfrow=c(4,2))
par(mar=c(3,6,0.6,0.5))
plotdata_mco(bxpdata.all.1, bxpdata.all.2, 1, monlab)
plotdata_mco(bxpdata.all.1, bxpdata.all.2, 2, monlab)
plotdata_mco(bxpdata.all.1, bxpdata.all.2, 3, monlab)
plotdata_mco(bxpdata.all.1, bxpdata.all.2, 4, monlab)
plotdata_mco(bxpdata.all.1, bxpdata.all.2, 5, monlab)
plotdata_mco(bxpdata.all.1, bxpdata.all.2, 6, monlab)
plotdata_mco(bxpdata.all.1, bxpdata.all.2, 7, monlab)
#plotdata_mco(bxpdata.all.1, bxpdata.all.2, 8, monlab)
#plotdata_mco(bxpdata.all.1, bxpdata.all.2, 9, monlab)
plotdata_mco(bxpdata.all.1, bxpdata.all.2, 10, monlab)
legend(grconvertX(0.425, "ndc", "user"), grconvertY(0.045, "ndc", "user"),
c("baseline","scenario"), fill = c("black","red"), ncol=2, bty="n", xpd = NA)
}
| /R/boxplot_monthly_compare_runs.R | no_license | cran/StrathE2E2 | R | false | false | 16,629 | r | #
# boxplot_monthly_compare_runs.R
#
#' Box-and-whisker plots of baseline and scenario simulated monthly averaged nutrient and plankton data with credible intervals derived from Monte Carlo StrathE2E runs.
#'
#' Creates a multi-panel plot comparing a range of simulated monthly averaged nutrient and plankton data from a baseline and a scenario mode run, with the distribution of credible values
#' generated by the e2e_run_mc() Monte Carlo function.
#'
#' For details of how the distribution of credible output values from StrathE2E are calculated see help(e2e_run_mc).
#'
#' The function plots a multi-panel page of box-and-whisker plots showing the medians and variability ranges (quartiles as box-and-whisker) of a range of monthly averaged nutrient and plankton data from the final year of a baseline run
#' (shown in black), alongside comparable box-and-whisker plots (shown in red) of the same measures derived from the final year of a scenario run. The credible intervals for each case need ot be generted by the Monte Carlo methodology (e2e_run_mc() function).
#'
#' Optionally the function can read an example data set for one of the two North Sea model variants supplied with the package.
#'
#' @param model1 R-list object defining the baseline model configuration compiled by the e2e_read() function.
#' @param ci.data1 Logical. If TRUE plot credible intervals around model results based on Monte Carlo simulation withteh e2e_run_mc() function, (default=FALSE).
#' @param use.saved1 Logical. If TRUE use data from a prior user-defined run held as csv files data in the current results folder, (default=FALSE).
#' @param use.example1 Logical. If TRUE use pre-computed example data from the internal North Sea model as the baseline rather than user-generated data, (default=FALSE).
#' @param results1 R-list object of baseline model output generated by the e2e_run(), (default=NULL).
#' @param model2 R-list object defining the baseline model configuration compiled by the e2e_read() function.
#' @param ci.data2 Logical. If TRUE plot credible intervals around model results based on Monte Carlo simulation withteh e2e_run_mc() function, (default=FALSE).
#' @param use.saved2 Logical. If TRUE use data from a prior user-defined run held as csv files data in the current results folder, (default=FALSE).
#' @param use.example2 Logical. If TRUE use pre-computed example data from the internal North Sea model as the scenario rather than user-generated data, (default=FALSE).
#' @param results2 R-list object of scenario model output generated by the e2e_run(), (default=NULL).
#'
#' @return Graphical display in a new graphics window.
#'
#' @noRd
#
# ---------------------------------------------------------------------
# | |
# | Authors: Mike Heath, Ian Thurlbeck |
# | Department of Mathematics and Statistics |
# | University of Strathclyde, Glasgow |
# | |
# | Date of this version: May 2020 |
# | |
# ---------------------------------------------------------------------
boxplot_monthly_compare_runs <- function(model1, ci.data1=FALSE, use.saved1=FALSE, use.example1=FALSE, results1=NULL,
model2, ci.data2=FALSE, use.saved2=FALSE, use.example2=FALSE, results2=NULL) {
start_par = par()$mfrow
on.exit(par(mfrow = start_par))
resultsdir1 <- elt(model1, "setup", "resultsdir")
model.ident1 <- elt(model1, "setup", "model.ident")
model.path1 <- elt(model1, "setup", "model.path")
model.name1 <- elt(model1, "setup", "model.name")
model.variant1 <- elt(model1, "setup", "model.variant")
resultsdir2 <- elt(model2, "setup", "resultsdir")
model.ident2 <- elt(model2, "setup", "model.ident")
model.path2 <- elt(model2, "setup", "model.path")
model.name2 <- elt(model2, "setup", "model.name")
model.variant2 <- elt(model2, "setup", "model.variant")
#Read the observed data file
#Format expected = 7 columns
#Month Variable median lower_centile upper_centile Units low_cent_value upp_cent_value Comments
#The variable names expected are:
#surface_nitrate
#deep_nitrate
#surface_ammonia
#deep_ammonia
#surface_chlorophyll
#omniv_zooplankton
#carniv_zooplankton
#larvae_susp_dep_benthos
#larvae_carn_scav_benthos
# obstargetdataset <- get.model.file(model.path, TARGET_DATA_DIR, file.pattern=MONTHLY_TARGET_DATA)
corefilename<-"CredInt_processed_monthly_mass"
monlab<-c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec")
if(ci.data1==TRUE){
if(use.example1==TRUE){
credintervaldata1 <- get.example.results(model.name1, model.variant1, corefilename, CREDINT_DIR)
}
if(use.example1==FALSE){
credpath1 <- makepath(resultsdir1, CREDINT_DIR)
credfile1 <- csvname(credpath1, corefilename, model.ident1)
if (! file.exists(credfile1)) {
message("Error: cannot find credible interval output file: ", credfile1)
stop("Please run the Monte Carlo function!\n")
}
message("Reading credible interval processed data from '", credfile1, "'")
credintervaldata1 <- readcsv(credfile1, row.names=1) # first column is row names
}
}
if(ci.data2==TRUE){
if(use.example2==TRUE){
credintervaldata2 <- get.example.results(model.name2, model.variant2, corefilename, CREDINT_DIR)
}
if(use.example2==FALSE){
credpath2 <- makepath(resultsdir2, CREDINT_DIR)
credfile2 <- csvname(credpath2, corefilename, model.ident2)
if (! file.exists(credfile2)) {
message("Error: cannot find credible interval output file: ", credfile2)
stop("Please run the Monte Carlo function!\n")
}
message("Reading credible interval processed data from '", credfile2, "'")
credintervaldata2 <- readcsv(credfile2, row.names=1) # first column is row names
}
}
if(ci.data1==FALSE){
if(use.saved1==TRUE){
datafile1 <- csvname(resultsdir1, "model_monthlyresults", model.ident1)
print(paste("Using baseline data held in a file ",datafile1," from a past model run"))
check.exists(datafile1)
modelmonthly1<-readcsv(datafile1)
}
if(use.saved1==FALSE){
modelmonthly1 <- elt(results1, "final.year.outputs", "monthly.averages")
}
}
if(ci.data2==FALSE){
if(use.saved2==TRUE){
datafile2 <- csvname(resultsdir2, "model_monthlyresults", model.ident2)
print(paste("Using scenario data held in a file ",datafile2," from a past model run"))
check.exists(datafile2)
modelmonthly2<-readcsv(datafile2)
}
if(use.saved2==FALSE){
modelmonthly2 <- elt(results2, "final.year.outputs", "monthly.averages")
}
}
# --------------------------------------------------------------------------
if(ci.data1==FALSE){
#convert modelmonthly into credintervaldata format
credintervaldata1 <- data.frame(rep(rep(NA,6*ncol(modelmonthly1))))
for(jj in 2:12){
credintervaldata1[,jj]<-rep(rep(NA,6*ncol(modelmonthly1)))
}
colnames(credintervaldata1)<-c("1","2","3","4","5","6","7","8","9","10","11","12")
for(jj in 1:ncol(modelmonthly1)){
nameset<-c(paste((names(modelmonthly1))[jj],"-maxlik",sep=""),
paste((names(modelmonthly1))[jj],"-0.005",sep=""),
paste((names(modelmonthly1))[jj],"-0.25",sep=""),
paste((names(modelmonthly1))[jj],"-0.5",sep=""),
paste((names(modelmonthly1))[jj],"-0.75",sep=""),
paste((names(modelmonthly1))[jj],"-0.995",sep="") )
if(jj==1) fullnameset<-nameset
if(jj>1) fullnameset<-c(fullnameset,nameset)
}
rownames(credintervaldata1)<-fullnameset
for(jj in 1:ncol(modelmonthly1)){
for(kk in 1:6){
credintervaldata1[(((jj-1)*6)+kk),1:12] <- as.numeric(modelmonthly1[,jj])
}
}
}
if(ci.data2==FALSE){
#convert modelmonthly into credintervaldata format
credintervaldata2 <- data.frame(rep(rep(NA,6*ncol(modelmonthly2))))
for(jj in 2:12){
credintervaldata2[,jj]<-rep(rep(NA,6*ncol(modelmonthly2)))
}
colnames(credintervaldata2)<-c("1","2","3","4","5","6","7","8","9","10","11","12")
for(jj in 1:ncol(modelmonthly2)){
nameset<-c(paste((names(modelmonthly2))[jj],"-maxlik",sep=""),
paste((names(modelmonthly2))[jj],"-0.005",sep=""),
paste((names(modelmonthly2))[jj],"-0.25",sep=""),
paste((names(modelmonthly2))[jj],"-0.5",sep=""),
paste((names(modelmonthly2))[jj],"-0.75",sep=""),
paste((names(modelmonthly2))[jj],"-0.995",sep="") )
if(jj==1) fullnameset<-nameset
if(jj>1) fullnameset<-c(fullnameset,nameset)
}
rownames(credintervaldata2)<-fullnameset
for(jj in 1:ncol(modelmonthly2)){
for(kk in 1:6){
credintervaldata2[(((jj-1)*6)+kk),1:12] <- as.numeric(modelmonthly2[,jj])
}
}
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# BASELINE MODEL DATA:
#Generate the list objects needed by the bxp plotting function
for(iii in 1:9){
credrows<- seq( ((iii-1)*(5+1))+2,((iii-1)*(5+1))+(5+1) )
modeldata2plot<-(credintervaldata1[credrows,1])
for(jj in 2:12) { modeldata2plot<-c(modeldata2plot,(credintervaldata1[credrows,jj]))}
array2plot<- array(dim=c(5,12),modeldata2plot)
bxpdata<-list(stats=array2plot,n=rep(100,12),conf=NULL,out=numeric(length=0),names=monlab)
# bxp(bxpdata,boxwex=0.3,at=seq(1,12)+0.35,xlim=c(0,13),ylim=c(0,max(modeldata2plot)*1.1))
if(iii==1) bxpdata1<-bxpdata
if(iii==2) bxpdata2<-bxpdata
if(iii==3) bxpdata3<-bxpdata
if(iii==4) bxpdata4<-bxpdata
if(iii==5) bxpdata5<-bxpdata
if(iii==6) bxpdata6<-bxpdata
if(iii==7) bxpdata7<-bxpdata
if(iii==8) bxpdata8<-bxpdata
if(iii==9) bxpdata9<-bxpdata
}
bxpdata10<-list(stats=(bxpdata9$stats+bxpdata8$stats),n=rep(100,12),conf=NULL,out=numeric(length=0),names=monlab)
#Combines the two types of benthic larvae
#Package all the bxpdata objects up into a list t pass into the plotting function
bxpdata.all.1<-list(bxpdata1=bxpdata1,
bxpdata2=bxpdata2,
bxpdata3=bxpdata3,
bxpdata4=bxpdata4,
bxpdata5=bxpdata5,
bxpdata6=bxpdata6,
bxpdata7=bxpdata7,
bxpdata8=bxpdata8,
bxpdata9=bxpdata9,
bxpdata10=bxpdata10)
# SCENARIO MODEL DATA:
#Generate the list objects needed by the bxp plotting function
for(iii in 1:9){
credrows<- seq( ((iii-1)*(5+1))+2,((iii-1)*(5+1))+(5+1) )
modeldata2plot<-(credintervaldata2[credrows,1])
for(jj in 2:12) { modeldata2plot<-c(modeldata2plot,(credintervaldata2[credrows,jj]))}
array2plot<- array(dim=c(5,12),modeldata2plot)
bxpdata<-list(stats=array2plot,n=rep(100,12),conf=NULL,out=numeric(length=0),names=monlab)
# bxp(bxpdata,boxwex=0.3,at=seq(1,12)+0.35,xlim=c(0,13),ylim=c(0,max(modeldata2plot)*1.1))
if(iii==1) bxpdata1<-bxpdata
if(iii==2) bxpdata2<-bxpdata
if(iii==3) bxpdata3<-bxpdata
if(iii==4) bxpdata4<-bxpdata
if(iii==5) bxpdata5<-bxpdata
if(iii==6) bxpdata6<-bxpdata
if(iii==7) bxpdata7<-bxpdata
if(iii==8) bxpdata8<-bxpdata
if(iii==9) bxpdata9<-bxpdata
}
bxpdata10<-list(stats=(bxpdata9$stats+bxpdata8$stats),n=rep(100,12),conf=NULL,out=numeric(length=0),names=monlab)
#Combines the two types of benthic larvae
#Package all the bxpdata objects up into a list t pass into the plotting function
bxpdata.all.2<-list(bxpdata1=bxpdata1,
bxpdata2=bxpdata2,
bxpdata3=bxpdata3,
bxpdata4=bxpdata4,
bxpdata5=bxpdata5,
bxpdata6=bxpdata6,
bxpdata7=bxpdata7,
bxpdata8=bxpdata8,
bxpdata9=bxpdata9,
bxpdata10=bxpdata10)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
plotdata_mco<-function(bxpdata.all.1, bxpdata.all.2, obspar, monlab){
if(obspar==1){
bxpdata1<-bxpdata.all.1$bxpdata1
bxpdata2<-bxpdata.all.2$bxpdata1
}
if(obspar==2){
bxpdata1<-bxpdata.all.1$bxpdata2
bxpdata2<-bxpdata.all.2$bxpdata2
}
if(obspar==3){
bxpdata1<-bxpdata.all.1$bxpdata3
bxpdata2<-bxpdata.all.2$bxpdata3
}
if(obspar==4){
bxpdata1<-bxpdata.all.1$bxpdata4
bxpdata2<-bxpdata.all.2$bxpdata4
}
if(obspar==5){
bxpdata1<-bxpdata.all.1$bxpdata5
bxpdata2<-bxpdata.all.2$bxpdata5
}
if(obspar==6){
bxpdata1<-bxpdata.all.1$bxpdata6
bxpdata2<-bxpdata.all.2$bxpdata6
}
if(obspar==7){
bxpdata1<-bxpdata.all.1$bxpdata7
bxpdata2<-bxpdata.all.2$bxpdata7
}
if(obspar==8){
bxpdata1<-bxpdata.all.1$bxpdata8
bxpdata2<-bxpdata.all.2$bxpdata8
}
if(obspar==9){
bxpdata1<-bxpdata.all.1$bxpdata9
bxpdata2<-bxpdata.all.2$bxpdata9
}
if(obspar==10){
bxpdata1<-bxpdata.all.1$bxpdata10
bxpdata2<-bxpdata.all.2$bxpdata10
}
modplot1<-bxpdata1$stats
modplot2<-bxpdata2$stats
if(obspar==1 | obspar==2) ymax<- max(0, max(as.data.frame(bxpdata.all.1$bxpdata1$stats)), max(as.data.frame(bxpdata.all.1$bxpdata2$stats)), max(as.data.frame(bxpdata.all.2$bxpdata1$stats)), max(as.data.frame(bxpdata.all.2$bxpdata2$stats)),na.rm=TRUE )
if(obspar==3 | obspar==4) ymax<- max(0, max(as.data.frame(bxpdata.all.1$bxpdata3$stats)), max(as.data.frame(bxpdata.all.1$bxpdata4$stats)), max(as.data.frame(bxpdata.all.2$bxpdata3$stats)), max(as.data.frame(bxpdata.all.2$bxpdata4$stats)),na.rm=TRUE )
if(obspar>4) ymax<- max(0, max(as.data.frame(modplot1),na.rm=TRUE),max(as.data.frame(modplot2),na.rm=TRUE),na.rm=TRUE )
if(ymax==0 | is.na(ymax)==TRUE) ymax<-1
bxp(bxpdata1,boxwex=0.25,at=1:12,yaxt="n",ylim=c(0,ymax*1.1),show.names=FALSE,las=1,cex.axis=1.1,
boxcol="black",whiskcol="black",whisklty="solid",medcol="black",staplecol="black")
axis(labels=monlab, at=seq(1,12),side=1,las=1,cex.axis=1.1,padj=-0.55)
if(obspar==1){
axis(side=2,cex.lab=1.0,las=1)
mtext("Surf.nitrate",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==2){
axis(side=2,cex.lab=1.0,las=1)
mtext("Deep nitrate",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==3){
axis(side=2,cex.lab=1.0,las=1)
mtext("Surf.ammonia",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==4){
axis(side=2,cex.lab=1.0,las=1)
mtext("Deep ammonia",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==5){
axis(side=2,cex.lab=1.0,las=1)
mtext("Chlorophyll",cex=0.8,side=2,line=4)
mtext(bquote(mg.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==6){
axis(side=2,cex.lab=1.0,las=1)
mtext("Omniv.zoo",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==7){
axis(side=2,cex.lab=1.0,las=1)
mtext("Carniv.zoo",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==8){
axis(side=2,cex.lab=1.0,las=1)
mtext("Larv.s/d.benth.",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==9){
axis(side=2,cex.lab=1.0,las=1)
mtext("Larv.c/s.benth.",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
if(obspar==10){
axis(side=2,cex.lab=1.0,las=1)
mtext("Benthos larvae (all)",cex=0.8,side=2,line=4)
mtext(bquote(mMN.m^-3),cex=0.6,side=2,line=2.7)
}
bxp(bxpdata2,add=TRUE,boxwex=0.25,at=1:12+0.35,yaxt="n",xaxt="n",
boxcol="red",whiskcol="red",whisklty="solid",medcol="red",staplecol="red")
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
par(mfrow=c(4,2))
par(mar=c(3,6,0.6,0.5))
plotdata_mco(bxpdata.all.1, bxpdata.all.2, 1, monlab)
plotdata_mco(bxpdata.all.1, bxpdata.all.2, 2, monlab)
plotdata_mco(bxpdata.all.1, bxpdata.all.2, 3, monlab)
plotdata_mco(bxpdata.all.1, bxpdata.all.2, 4, monlab)
plotdata_mco(bxpdata.all.1, bxpdata.all.2, 5, monlab)
plotdata_mco(bxpdata.all.1, bxpdata.all.2, 6, monlab)
plotdata_mco(bxpdata.all.1, bxpdata.all.2, 7, monlab)
#plotdata_mco(bxpdata.all.1, bxpdata.all.2, 8, monlab)
#plotdata_mco(bxpdata.all.1, bxpdata.all.2, 9, monlab)
plotdata_mco(bxpdata.all.1, bxpdata.all.2, 10, monlab)
legend(grconvertX(0.425, "ndc", "user"), grconvertY(0.045, "ndc", "user"),
c("baseline","scenario"), fill = c("black","red"), ncol=2, bty="n", xpd = NA)
}
|
## The following two functions together calculates the inversion of a matrix.
##The first function makeCacheMatrix initiates and stores the values calculated in the second cacheSolve function.
##
# makeCacheMatrix creates a list with four functions (get,set,getinverse,setinverse) and the two elements m and x.
#makeCacheMatrix takes x as an argument and requires it to be a matrix.
#Initially it sets inverse to NULL which allows us to check if we have a NULL value or a cached value in the next function.
#It then creates "setters" and "getters" to be used later and stores it all in a list as named objects.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(inv) inverse <<- inv
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function takes the previous function makeCacheMatrix as an input. It retrieves the inverse data anc checks if the inverse matrix has been computes or if it is NULL.
#If so it computes the inverted matrix and saves it in list of makeCacheMatrix.
cacheSolve <- function(x, ...) {
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("getting cached invertec matrix data")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | Kroysler/ProgrammingAssignment2 | R | false | false | 1,619 | r | ## The following two functions together calculates the inversion of a matrix.
##The first function makeCacheMatrix initiates and stores the values calculated in the second cacheSolve function.
##
# makeCacheMatrix creates a list with four functions (get,set,getinverse,setinverse) and the two elements m and x.
#makeCacheMatrix takes x as an argument and requires it to be a matrix.
#Initially it sets inverse to NULL which allows us to check if we have a NULL value or a cached value in the next function.
#It then creates "setters" and "getters" to be used later and stores it all in a list as named objects.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(inv) inverse <<- inv
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function takes the previous function makeCacheMatrix as an input. It retrieves the inverse data anc checks if the inverse matrix has been computes or if it is NULL.
#If so it computes the inverted matrix and saves it in list of makeCacheMatrix.
cacheSolve <- function(x, ...) {
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("getting cached invertec matrix data")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
## Return a matrix that is the inverse of 'x'
}
|
#---------------------------------------------------------------
# IARP Version 1.3
# Added user identified indemnity
# 9th February 2015
# Nirav Khimashia
#---------------------------------------------------------------
library(shiny)
#library(leaflet)
library(RColorBrewer)
library(scales)
library(lattice)
library(dplyr)
library(plotGoogleMaps)
source('global.R')
source('global_check_user_input.R')
library(leafletR)
library(rMaps)
library(shinyIncubator)
Sys.setlocale(locale="English")
#...............................................................................
# Load databases
DB_Message = paste('IARP Version 1.3'); print(DB_Message)
DB_Message = paste('Libraries loaded - ', Sys.time()); print(DB_Message)
CropSeasons <- readRDS('data/CropSeasons.Rds')
Product_type.db <<- get_Product_type_db(CropSeasons)
Districts <- readRDS('data/Districts.Rds')
States <- readRDS('data/States.Rds')
adminID.db <<- get_adminID_db(Districts,States)
Risk_Items <- readRDS('data/Risk_Items.Rds')
Crops <- readRDS('data/Crops.Rds')
Exposure.db <<- get_exposure_db(Risk_Items, Crops, adminID.db)
raw_WBCIS <- readRDS('data/Risk_Items_YearWise_LossCosts.Rds')
raw_historic <- readRDS('data/Risk_Items_YearWise_Historical_Yields.Rds')
raw_synthetic <- readRDS('data/Risk_Items_YearWise_Synthetic_Yields.Rds')
WBCIS_gy.db <<- get_gy_db(raw_WBCIS, Risk_Items, adminID.db)
Historic_gy.db <<- get_gy_db(raw_historic, Risk_Items, adminID.db)
Synthetic_gy.db <<- get_gy_db(raw_synthetic, Risk_Items, adminID.db)
DB_Message = paste('All Databases loaded and prepared - ', Sys.time()); print(DB_Message)
rm(CropSeasons, Districts, States, Risk_Items, Crops, raw_historic, raw_synthetic)
#...............................................................................
display.flag <<- 0
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
#------------------------------------------------------------------------
# Clean all varibales and screen
observe({
input$ClearDisplay
if (input$ClearDisplay == 0)
return()
isolate({
display.flag <<- 0; display_array <<- NULL
MNAISdata_audit_array <<- NULL; MNAISdata_audit_display_array <<- NULL
WBCISdata_audit_array <<- NULL; WBCISdata_audit_display_array <<- NULL
MNAIS_Dissaggregated_exposure.db <<- NULL; MNAIS_Display_Dissaggregated_exposure.db <<- NULL
WBCIS_Dissaggregated_exposure.db <<- NULL; WBCIS_Display_Dissaggregated_exposure.db <<- NULL
IND_LOSS_Historic_gy.db <<- NULL
IND_LOSS_Synthetic_gy.db <<- NULL
Display_IND_LOSS_Historic_gy.db <<- NULL
Display_IND_LOSS_Synthetic_gy.db <<- NULL
L1_loss_Historic_gy.final <<- NULL
L2_loss_Historic_gy.final <<- NULL
L3_loss_Historic_gy.final <<- NULL
L4_loss_Historic_gy.final <<- NULL
L1_loss_Synthetic_gy.final <<- NULL
L2_loss_Synthetic_gy.final <<- NULL
L3_loss_Synthetic_gy.final <<- NULL
L4_loss_Synthetic_gy.final <<- NULL
Historic_summary_display_final <<- NULL
Synthetic_summary_display_final <<- NULL
WBCIS.final <<- NULL
L1_WBCIS_loss.final <<- NULL
L2_WBCIS_loss.final <<- NULL
L3_WBCIS_loss.final <<- NULL
L4_WBCIS_loss.final <<- NULL
output$UserInput <- renderDataTable({return(NULL)}, options = list(orderClasses = TRUE))
output$MNAISDataAudit <- renderDataTable({return(NULL)}, options = list(orderClasses = TRUE))
output$WBCISDataAudit <- renderDataTable({return(NULL)}, options = list(orderClasses = TRUE))
output$MNAISDisplayDissaggregated <- renderDataTable({return(NULL)}, options = list(orderClasses = TRUE))
output$WBCISDisplayDissaggregated <- renderDataTable({return(NULL)}, options = list(orderClasses = TRUE))
output$HistoricLosses <- renderDataTable({return(NULL)}, options = list(orderClasses = TRUE))
output$ModelledLosses <- renderDataTable({return(NULL)}, options = list(orderClasses = TRUE))
output$WBCISLosses <- renderDataTable({return(NULL)}, options = list(orderClasses = TRUE))
#output$Data_Audit_LOB_Pie <- renderPlot({NULL)})
source('global.R')
})
})
#------------------------------------------------------------------------
# read in a file and store in display array
observe({
input$UserInput # Do take a dependency on file input
inFile <- input$UserInput
if (is.null(inFile))
return(NULL)
#-------------------------------------------------------------------------------------------------
# Busy Animation
# Create a Progress object
progress <- shiny::Progress$new()
progress$set(message = "Computing ....", value = 0)
on.exit(progress$close())
updateProgress <- function(value = NULL, detail = NULL)
{if (is.null(value)) {value <- progress$getValue(); value <- value + (progress$getMax() - value) / 7; Sys.sleep(1)}
progress$set(value = value, detail = detail)}
#-------------------------------------------------------------------------------------------------
# Get and check user input file
raw_input <<- read.csv(inFile$datapath, header = T, sep = ',', quote = input$quote) # No dependency on input$dataset
if (is.function(updateProgress)) {updateProgress(detail = 'Validating User Input ...........')}
Checked_raw_input <- Check_UserInput(raw_input, adminID.db, Exposure.db, Product_type.db, Check_UserInput_Name_Mismatch, Check_UserInput_Prepare_Exposure_db, Check_UserInput_modelled_adminlevel, Check_UserInput_TSI_check)
Message=paste('Validated User Input ...........', Sys.time()); print(Message)
if (is.function(updateProgress)) {updateProgress(detail = 'Validation Successful ...........')}
#-------------------------------------------------------------------------------------------------
# prepare data for UI Display
file_input <- data.frame(lapply(Checked_raw_input, as.character), stringsAsFactors=FALSE)
if(display.flag == 1) {display_array <<- rbind(file_input, display_array)}
if(display.flag == 0) {display_array <<- file_input; display.flag <<- 1}
if (is.function(updateProgress)) {updateProgress(detail = 'Prepare data for UI Display ...........')}
#-------------------------------------------------------------------------------------------------
# Output Data in to the UI
output$UserInput <- renderDataTable({return(display_array)}, options = list(orderClasses = TRUE))
#---------------------------------------------------------------------------------------------
# perform MNAIS data audit and display in to the UI
MNAIS_display_array <- display_array[,-10]
MNAISdata_audit_array <<- as.data.frame(Perform_Data_Audit(MNAIS_display_array))
x_flag = 0
if(!is.null(MNAISdata_audit_array))
{
if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS Data Audit computing ...........')}
State = rownames(MNAISdata_audit_array)
MNAISdata_audit_display_array <<- cbind(State, format(MNAISdata_audit_array, scientific=FALSE))
output$MNAISDataAudit <- renderDataTable({return(MNAISdata_audit_display_array)}, options = list(orderClasses = TRUE))
x_flag = 1
MNAISdata_audit_array <<- as.data.frame(MNAISdata_audit_array)
Message=paste('MNAIS Data Audit computed ....', Sys.time()); print(Message)
if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS Data Audit computed ...........')}
}
# perform WBCIS data audit and display in to the UI
WBCIS_display_array <- display_array[,-9]
WBCISdata_audit_array <<- Perform_Data_Audit(WBCIS_display_array)
y_flag = 0
if(!is.null(WBCISdata_audit_array))
{
if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS Data Audit computing ...........')}
State = rownames(WBCISdata_audit_array)
WBCISdata_audit_display_array <<- cbind(State, format(WBCISdata_audit_array, scientific=FALSE))
output$WBCISDataAudit <- renderDataTable({return(WBCISdata_audit_display_array)}, options = list(orderClasses = TRUE))
y_flag = 1
WBCISdata_audit_array <<- as.data.frame(WBCISdata_audit_array)
Message=paste('WBCIS Data Audit computed ....', Sys.time()); print(Message)
if (is.function(updateProgress)) {updateProgress(detail = 'WBCIS Data Audit computed ...........')}
}
if((x_flag == 1) && (y_flag == 0)){output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot_1(MNAISdata_audit_array, "MNAIS Line of Business")})}
if((x_flag == 0) && (y_flag == 1)){output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot_1(WBCISdata_audit_array, "WBCIS Line of Business")})}
if((x_flag == 1) && (y_flag == 1)){output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot(MNAISdata_audit_array,WBCISdata_audit_array)})}
if (is.function(updateProgress)) {updateProgress(detail = 'LOB Graphics computed ...........')}
#---------------------------------------------------------------------------------------------
# Pie Chart with Percentages
#output$Data_Audit_State_TSI <- renderPlot({State_TSI_Plot(MNAISdata_audit_array, WBCISdata_audit_array)})
})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Download User Input
output$Download_DisplayArray <- downloadHandler(
filename = function() { paste('Validated_User_input.csv', sep='') },
content = function(file) {write.csv(display_array, file)})
# Download Data Audit Summary
output$Download_MNAISDataAuditSummary <- downloadHandler(
filename = function() { paste('MNAIS_Data_Audit_Summary', '.csv', sep='') },
content = function(file) {write.csv(MNAISdata_audit_display_array, file)})
output$Download_WBCISDataAuditSummary <- downloadHandler(
filename = function() { paste('WBCIS_Data_Audit_Summary', '.csv', sep='') },
content = function(file) {write.csv(WBCISdata_audit_display_array, file)})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# read in all single entry inputs and store in display array
observe({
input$goButton
if (input$goButton == 0)
return()
isolate({
ContractID_input <- input$Unique_Contract_Name
State_input <- input$Unique_States_input
District_input <- input$Unique_District_input
Crop_input <- input$Unique_Crop_input
Season_input <- input$Unique_Season_input
TSI <- input$TSI
EPI <- input$EPI
PR <- input$PR
#if(District_input == 'All'){District_input = NA}
single_entry_input <<- isolate(cbind(State_input, District_input, Crop_input, Season_input, TSI, EPI, PR))
Checked_single_entry_input.tmp <- as.data.frame(Check_UserInput(single_entry_input,adminID.db,Exposure.db,Product_type.db))
Checked_single_entry_input <- data.frame(lapply(Checked_single_entry_input.tmp, as.character), stringsAsFactors=FALSE)
if(display.flag > 0) {display_array <<- rbind(Checked_single_entry_input, display_array)}
if(display.flag == 0) {display_array <<- Checked_single_entry_input; display.flag <<- 1}
#---------------------------------------------------------------------------------------------
# perform MNAIS data audit and display in to the UI
MNAIS_display_array <- display_array[,-9]
MNAISdata_audit_array <<- Perform_Data_Audit(MNAIS_display_array)
x_flag = 0
if(!is.null(MNAISdata_audit_array))
{
State = rownames(MNAISdata_audit_array)
MNAISdata_audit_display_array <<- cbind(State, format(MNAISdata_audit_array, scientific=FALSE))
output$MNAISDataAudit <- renderDataTable({return(MNAISdata_audit_display_array)}, options = list(orderClasses = TRUE))
x_flag = 1
}
# perform WBCIS data audit and display in to the UI
WBCIS_display_array <- display_array[,-8]
WBCISdata_audit_array <<- Perform_Data_Audit(WBCIS_display_array)
y_flag = 0
if(!is.null(WBCISdata_audit_array))
{
State = rownames(WBCISdata_audit_array)
WBCISdata_audit_display_array <<- cbind(State, format(WBCISdata_audit_array, scientific=FALSE))
output$WBCISDataAudit <- renderDataTable({return(WBCISdata_audit_display_array)}, options = list(orderClasses = TRUE))
y_flag = 1
}
if((x_flag == 1) && (y_flag == 0)){output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot_1(MNAISdata_audit_array, "MNAIS Line of Business")})}
if((x_flag == 0) && (y_flag == 1)){output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot_1(WBCISdata_audit_array, "WBCIS Line of Business")})}
if((x_flag == 1) && (y_flag == 1)){output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot(MNAISdata_audit_array,WBCISdata_audit_array)})}
#---------------------------------------------------------------------------------------------
})
})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Display "display array" when go button is presses
observe({
input$goButton
if (input$goButton == 0)
return()
# display user input
isolate({output$UserInput <- renderDataTable({return(display_array)}, options = list(orderClasses = TRUE))}) #isolate
#---------------------------------------------------------------------------------------------
# perform MNAIS data audit and display in to the UI
MNAIS_display_array <- display_array[,-9]
MNAISdata_audit_array <<- Perform_Data_Audit(MNAIS_display_array)
x_flag = 0
if(!is.null(MNAISdata_audit_array))
{
State = rownames(MNAISdata_audit_array)
MNAISdata_audit_display_array <<- cbind(State, format(MNAISdata_audit_array, scientific=FALSE))
output$MNAISDataAudit <- renderDataTable({return(MNAISdata_audit_display_array)}, options = list(orderClasses = TRUE))
x_flag = 1
}
# perform WBCIS data audit and display in to the UI
WBCIS_display_array <- display_array[,-8]
WBCISdata_audit_array <<- Perform_Data_Audit(WBCIS_display_array)
y_flag = 0
if(!is.null(WBCISdata_audit_array))
{
State = rownames(WBCISdata_audit_array)
WBCISdata_audit_display_array <<- cbind(State, format(WBCISdata_audit_array, scientific=FALSE))
output$WBCISDataAudit <- renderDataTable({return(WBCISdata_audit_display_array)}, options = list(orderClasses = TRUE))
y_flag = 1
}
if((x_flag == 1) && (y_flag == 0)){output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot_1(MNAISdata_audit_array, "MNAIS Line of Business")})}
if((x_flag == 0) && (y_flag == 1)){output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot_1(WBCISdata_audit_array, "WBCIS Line of Business")})}
if((x_flag == 1) && (y_flag == 1)){output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot(MNAISdata_audit_array,WBCISdata_audit_array)})}
#---------------------------------------------------------------------------------------------
# if(!is.null(data_audit_array))
# {
# # display data audit
# State = rownames(data_audit_array)
# data_audit_display_array <- cbind(State, format(data_audit_array, scientific=FALSE))
# isolate({output$DataAudit <- renderDataTable({return(data_audit_display_array)}, options = list(orderClasses = TRUE))}) #isolate
#
# # Pie Chart with Percentages & barchart for state vs TSI
# output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot(data_audit_array) })
# output$Data_Audit_State_TSI <- renderPlot({State_TSI_Plot(data_audit_array)})
# }
})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Display "Dissaggregate" when go button is presses
observe({
input$Dissaggregate
if (input$Dissaggregate == 0)
return()
#-------------------------------------------------------------------------------------------------
# Busy Animation
# Create a Progress object
progress <- shiny::Progress$new()
progress$set(message = "Computing ....", value = 0)
on.exit(progress$close())
updateProgress <- function(value = NULL, detail = NULL)
{if (is.null(value)) {value <- progress$getValue(); value <- value + (progress$getMax() - value) / 8; Sys.sleep(1)}
progress$set(value = value, detail = detail)}
# allow for district errors to pass through
MNAIS_display_array <- display_array[,-10]; if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS array filtered ...........')}
WBCIS_display_array <- display_array[,-9]; if (is.function(updateProgress)) {updateProgress(detail = 'WBCIS array filtered ...........')}
#options(warn=-1)
if(!is.null(MNAIS_display_array))
{
MNAIS_display_array <- deduct_district_error_tsi(MNAIS_display_array)
if(nrow(MNAIS_display_array) > 0)
{
MNAIS_display_array = as.data.frame(Convert_Par_to_ID(MNAIS_display_array, adminID.db, Product_type.db))
Message=paste('MNAIS Parameter to ID Conversion successful ....', Sys.time()); print(Message)
#...............................................................................
# ASSUMPTION USER INPUT DOES NOT CONTAIN ANY UNMODELLED DISTRICTS ANY MORE
MNAIS_Exposure.db <- get_mutually_exclusive_exposure(MNAIS_display_array, Exposure.db) # get mutually exclusive modelled states
MNAIS_Dissaggregated_exposure.db <- disaggregate_exposure(MNAIS_Exposure.db, MNAIS_display_array, Aggregate_user_exposure, district_state_level_disaggregation, district_level_disaggregation, state_level_disaggregation)
Message=paste('MNAIS Dissaggregation successful ....', Sys.time()); print(Message)
if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS Dissaggregation successful ...........')}
MNAIS_Dissaggregated_exposure.db <<- as.data.frame(MNAIS_Dissaggregated_exposure.db)
MNAIS_Display_Dissaggregated_exposure.db <<- Convert_ID_to_Par_Dissagregate(MNAIS_Dissaggregated_exposure.db, adminID.db, Product_type.db)
MNAIS_Display_Dissaggregated_exposure.db = MNAIS_Display_Dissaggregated_exposure.db[,c(-6), drop=FALSE] #remove 'is modelled' tab
MNAIS_Display_Dissaggregated_exposure.db[,5] <- format(round((as.numeric(as.character(MNAIS_Display_Dissaggregated_exposure.db[,5]))), 0), numeric = TRUE)
MNAIS_Display_Dissaggregated_exposure.db[,6] <- format(round((as.numeric(as.character(MNAIS_Display_Dissaggregated_exposure.db[,6]))), 0), numeric = TRUE)
MNAIS_Display_Dissaggregated_exposure.db[,5] = format(MNAIS_Display_Dissaggregated_exposure.db[,5], scientific = FALSE)
MNAIS_Display_Dissaggregated_exposure.db[,6] = format(MNAIS_Display_Dissaggregated_exposure.db[,6], scientific = FALSE)
MNAIS_Display_Dissaggregated_exposure.final <<- MNAIS_Display_Dissaggregated_exposure.db[,, drop = FALSE]
isolate({output$MNAISDisplayDissaggregated <- renderDataTable({return(MNAIS_Display_Dissaggregated_exposure.final)}, options = list(orderClasses = TRUE))}) #isolate
#...............................................................................
}
}
if(!is.null(WBCIS_display_array))
{
WBCIS_display_array <- deduct_district_error_tsi(WBCIS_display_array)
if(nrow(WBCIS_display_array) > 0)
{
WBCIS_display_array = as.data.frame(Convert_Par_to_ID(WBCIS_display_array, adminID.db, Product_type.db))
Message=paste('WBCIS Parameter to ID Conversion successful ....', Sys.time()); print(Message)
# #...............................................................................
# # ASSUMPTION USER INPUT DOES NOT CONTAIN ANY UNMODELLED DISTRICTS ANY MORE
WBCIS_Exposure.db <- get_mutually_exclusive_exposure(WBCIS_display_array, Exposure.db)
WBCIS_Dissaggregated_exposure.db <- disaggregate_exposure_WBCIS(WBCIS_Exposure.db, WBCIS_display_array,Aggregate_user_exposure, district_state_level_disaggregation, district_level_disaggregation, state_level_disaggregation)
Message=paste('WBCIS Dissaggregation successful ....', Sys.time()); print(Message)
if (is.function(updateProgress)) {updateProgress(detail = 'WBCIS Dissaggregation successful ...........')}
WBCIS_Dissaggregated_exposure.db <<- as.data.frame(WBCIS_Dissaggregated_exposure.db, drop = FALSE)
WBCIS_Display_Dissaggregated_exposure.db <<- Convert_ID_to_Par_Dissagregate(WBCIS_Dissaggregated_exposure.db, adminID.db, Product_type.db)
WBCIS_Display_Dissaggregated_exposure.db = WBCIS_Display_Dissaggregated_exposure.db[,c(-6), drop = FALSE] #remove 'is modelled' tab
WBCIS_Display_Dissaggregated_exposure.db[,5] <- format(round((as.numeric(as.character(WBCIS_Display_Dissaggregated_exposure.db[,5]))), 0), numeric = TRUE)
WBCIS_Display_Dissaggregated_exposure.db[,6] <- format(round((as.numeric(as.character(WBCIS_Display_Dissaggregated_exposure.db[,6]))), 0), numeric = TRUE)
WBCIS_Display_Dissaggregated_exposure.db[,5] = format(WBCIS_Display_Dissaggregated_exposure.db[,5], scientific = FALSE)
WBCIS_Display_Dissaggregated_exposure.db[,6] = format(WBCIS_Display_Dissaggregated_exposure.db[,6], scientific = FALSE)
WBCIS_Display_Dissaggregated_exposure.final <<- WBCIS_Display_Dissaggregated_exposure.db[,, drop = FALSE]
isolate({output$WBCISDisplayDissaggregated <- renderDataTable({return(WBCIS_Display_Dissaggregated_exposure.final)}, options = list(orderClasses = TRUE))}) #isolate
#...............................................................................
} }
#options(warn=0)
})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Download Dissaggregated Exposure
output$Download_MNAIS_Disaggregated_Exposure <- downloadHandler(
filename = function() { paste('MNAIS_Dissaggregated_exposure.csv', sep='') },
content = function(file) {write.csv(MNAIS_Display_Dissaggregated_exposure.final, file)})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Download Dissaggregated Exposure
output$Download_WBCIS_Disaggregated_Exposure <- downloadHandler(
filename = function() { paste('WBCIS_Dissaggregated_exposure.csv', sep='') },
content = function(file) {write.csv(WBCIS_Display_Dissaggregated_exposure.final, file)})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Compute Simulation
observe({
input$MNAIS_Simulation
if(input$MNAIS_Simulation == 0)
return()
#-------------------------------------------------------------------------------------------------
# Busy Animation
# Create a Progress object
progress <- shiny::Progress$new()
progress$set(message = "Computing ....", value = 0)
on.exit(progress$close())
updateProgress <- function(value = NULL, detail = NULL)
{if (is.null(value)) {value <- progress$getValue(); value <- value + (progress$getMax() - value) / 5; Sys.sleep(1)}
progress$set(value = value, detail = detail)}
#...............................................................................
# Attach Guaranteed Yield
UserInput.db <- MNAIS_Dissaggregated_exposure.db
Historic_gy.db = Get_Guaranteed_gy(Historic_gy.db , UserInput.db, Exposure.db); if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS - Attached Guaranteed Yield to Historic exposure ...........')}
Synthetic_gy.db = Get_Guaranteed_gy(Synthetic_gy.db, UserInput.db, Exposure.db); if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS - Attached Guaranteed Yield to Synthetic exposure ...........')}
Message=paste('MNAIS - Attach Guaranteed Yield ....', Sys.time()); print(Message)
#.................................................................................
#...............................................................................
# Compute Indemnity Loss
# gy.db = Historic_gy.db
if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS - Indemnity loss computing ...............')}
IND_LOSS_Historic_gy.db <<- Compute_Indemnity_loss(Historic_gy.db)
IND_LOSS_Synthetic_gy.db <<- Compute_Indemnity_loss(Synthetic_gy.db)
Display_IND_LOSS_Historic_gy.db <<- Convert_ID_to_Par_detailed_Losses(IND_LOSS_Historic_gy.db, adminID.db, Product_type.db)
Display_IND_LOSS_Synthetic_gy.db <<- Convert_ID_to_Par_detailed_Losses(IND_LOSS_Synthetic_gy.db, adminID.db, Product_type.db)
Message=paste('MNAIS - Indemnity loss computed ....', Sys.time()); print(Message)
if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS - Indemnity loss computed ...............')}
LOSS_Historic_gy.db <- Compute_aggregate(IND_LOSS_Historic_gy.db, Product_type.db, adminID.db)
LOSS_Synthetic_gy.db <- Compute_aggregate(IND_LOSS_Synthetic_gy.db, Product_type.db, adminID.db)
Message=paste('MNAIS - Level Aggregation computed ....', Sys.time()); print(Message)
if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS - Level Aggregation computed ...............')}
L1_loss_Historic_gy.final <<- unique(LOSS_Historic_gy.db[LOSS_Historic_gy.db[,1] == 'level1',])
L2_loss_Historic_gy.final <<- unique(LOSS_Historic_gy.db[LOSS_Historic_gy.db[,1] == 'level2',])
L3_loss_Historic_gy.final <<- unique(LOSS_Historic_gy.db[LOSS_Historic_gy.db[,1] == 'level3',])
L4_loss_Historic_gy.final <<- unique(LOSS_Historic_gy.db[LOSS_Historic_gy.db[,1] == 'level4',])
L1_loss_Synthetic_gy.final <<- unique(LOSS_Synthetic_gy.db[LOSS_Synthetic_gy.db[,1] == 'level1',])
L2_loss_Synthetic_gy.final <<- unique(LOSS_Synthetic_gy.db[LOSS_Synthetic_gy.db[,1] == 'level2',])
L3_loss_Synthetic_gy.final <<- unique(LOSS_Synthetic_gy.db[LOSS_Synthetic_gy.db[,1] == 'level3',])
L4_loss_Synthetic_gy.final <<- unique(LOSS_Synthetic_gy.db[LOSS_Synthetic_gy.db[,1] == 'level4',])
#...............................................................................
#...............................................................................
# Display Losses
Historic_summary_display <- Compute_display_aggregate(IND_LOSS_Historic_gy.db, Product_type.db, adminID.db)
State = rownames(Historic_summary_display)
Historic_summary_display_final <<- cbind(State, format(Historic_summary_display, scientific=FALSE))
Synthetic_summary_display <- Compute_display_aggregate(IND_LOSS_Synthetic_gy.db, Product_type.db, adminID.db)
State = rownames(Synthetic_summary_display)
Synthetic_summary_display_final <<- cbind(State, format(Synthetic_summary_display, scientific=FALSE))
isolate({output$HistoricLosses <- renderDataTable({return(Historic_summary_display_final)}, options = list(orderClasses = TRUE))}) #isolate
isolate({output$ModelledLosses <- renderDataTable({return(Synthetic_summary_display_final)}, options = list(orderClasses = TRUE))}) #isolate
#...............................................................................
})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Compute Simulation
observe({
input$WBCIS_Simulation
if(input$WBCIS_Simulation == 0)
return()
#-------------------------------------------------------------------------------------------------
# Busy Animation
# Create a Progress object
progress <- shiny::Progress$new()
progress$set(message = "Computing ....", value = 0)
on.exit(progress$close())
updateProgress <- function(value = NULL, detail = NULL)
{if (is.null(value)) {value <- progress$getValue(); value <- value + (progress$getMax() - value) / 2; Sys.sleep(1)}
progress$set(value = value, detail = detail)}
#...............................................................................
# Calculate WBCIS Loss
UserInput.db <- as.data.frame(WBCIS_Display_Dissaggregated_exposure.db)
WBCIS_GY <- Convert_ID_to_Par_WBCIS(WBCIS_gy.db, adminID.db, Product_type.db)
t = merge(UserInput.db, WBCIS_GY, by = c('State_Name','District_name','Crop_Name','Season_Name'))
WBCIS.tmp = t[,c(-6,-7,-8)]
WBCIS.tmp[,7] = WBCIS.tmp[,7] / 100
TSI = as.numeric(as.character(WBCIS.tmp[,5]))
LossP = as.numeric(as.character(WBCIS.tmp[,7]))
Indemnity_Loss = TSI * LossP
WBCIS.final <<- cbind(WBCIS.tmp, Indemnity_Loss)
Message=paste('WBCIS - Loss Computed ....', Sys.time()); print(Message)
if (is.function(updateProgress)) {updateProgress(detail = 'WBCIS - Loss Computed .................')}
#...............................................................................
Aggregated_WBCIS_Losses <- Compute_aggregate_WBCIS(WBCIS.final, Product_type.db, adminID.db)
Message=paste('WBCIS - Level Aggregation computed ....', Sys.time()); print(Message)
if (is.function(updateProgress)) {updateProgress(detail = 'WBCIS - Level Aggregation computed .................')}
L1_WBCIS_loss.final <<- unique(Aggregated_WBCIS_Losses[Aggregated_WBCIS_Losses[,1] == 'level1',])
L2_WBCIS_loss.final <<- unique(Aggregated_WBCIS_Losses[Aggregated_WBCIS_Losses[,1] == 'level2',])
L3_WBCIS_loss.final <<- unique(Aggregated_WBCIS_Losses[Aggregated_WBCIS_Losses[,1] == 'level3',])
L4_WBCIS_loss.final <<- unique(Aggregated_WBCIS_Losses[Aggregated_WBCIS_Losses[,1] == 'level4',])
isolate({output$WBCISLosses <- renderDataTable({return(Aggregated_WBCIS_Losses)}, options = list(orderClasses = TRUE))}) #isolate
#------------------------------------------------------------------------
})
#------------------------------------------------------------------------
# Download Historic Losses
output$Download_WBCIS_l1 <- downloadHandler(
filename = function() { paste('Level1_WBCIS_Losses.csv', sep='') },
content = function(file) {write.csv(L1_WBCIS_loss.final, file)})
output$Download_WBCIS_l2 <- downloadHandler(
filename = function() { paste('Level2_WBCIS_Losses.csv', sep='') },
content = function(file) {write.csv(L2_WBCIS_loss.final, file)})
output$Download_WBCIS_l3 <- downloadHandler(
filename = function() { paste('Level3_WBCIS_Losses.csv', sep='') },
content = function(file) {write.csv(L3_WBCIS_loss.final, file)})
output$Download_WBCIS_l4 <- downloadHandler(
filename = function() { paste('Level4_WBCIS_Losses.csv', sep='') },
content = function(file) {write.csv(L4_WBCIS_loss.final, file)})
output$Download_WBCIS_l5 <- downloadHandler(
filename = function() { paste('Level5_WBCIS_Losses.csv', sep='') },
content = function(file) {write.csv(WBCIS.final, file)})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Download Historic Losses
output$Download_historic_l1 <- downloadHandler(
filename = function() { paste('Level1_Historic_Losses', '.csv', sep='') },
content = function(file) {write.csv(L1_loss_Historic_gy.final, file)})
output$Download_historic_l2 <- downloadHandler(
filename = function() { paste('Level2_Historic_Losses', '.csv', sep='') },
content = function(file) {write.csv(L2_loss_Historic_gy.final, file)})
output$Download_historic_l3 <- downloadHandler(
filename = function() { paste('Level3_Historic_Losses', '.csv', sep='') },
content = function(file) {write.csv(L3_loss_Historic_gy.final, file)})
output$Download_historic_l4 <- downloadHandler(
filename = function() { paste('Level4_Historic_Losses', '.csv', sep='') },
content = function(file) {write.csv(L4_loss_Historic_gy.final, file)})
output$Download_historic_l5 <- downloadHandler(
filename = function() { paste('Level5_Historic_Losses', '.csv', sep='') },
content = function(file) {write.csv(Display_IND_LOSS_Historic_gy.db, file)})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Download Synthetic Losses
output$Download_synthetic_l1 <- downloadHandler(
filename = function() { paste('Level1_Synthetic_Losses', '.csv', sep='') },
content = function(file) {write.csv(L1_loss_Synthetic_gy.final, file)})
output$Download_synthetic_l2 <- downloadHandler(
filename = function() { paste('Level2_Synthetic_Losses', '.csv', sep='') },
content = function(file) {write.csv(L2_loss_Synthetic_gy.final, file)})
output$Download_synthetic_l3 <- downloadHandler(
filename = function() { paste('Level3_Synthetic_Losses', '.csv', sep='') },
content = function(file) {write.csv(L3_loss_Synthetic_gy.final, file)})
output$Download_synthetic_l4 <- downloadHandler(
filename = function() { paste('Level4_Synthetic_Losses', '.csv', sep='') },
content = function(file) {write.csv(L4_loss_Synthetic_gy.final, file)})
output$Download_synthetic_l5 <- downloadHandler(
filename = function() { paste('Level5_Synthetic_Losses', '.csv', sep='') },
content = function(file) {write.csv(Display_IND_LOSS_Synthetic_gy.db, file)})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Display Interactive Map
output$myChart <- renderMap({
map3 <- Leaflet$new()
map3$tileLayer(provider = "MapQuestOpen.OSM")
map3$set(width = 1600, height = 800)
map3$setView(c(20,78), zoom = 4)
map3
})
#------------------------------------------------------------------------
})#on.exit(rm(list=ls(all=TRUE))) | /server.R | no_license | asiariskcentre/test2 | R | false | false | 44,834 | r | #---------------------------------------------------------------
# IARP Version 1.3
# Added user identified indemnity
# 9th February 2015
# Nirav Khimashia
#---------------------------------------------------------------
library(shiny)
#library(leaflet)
library(RColorBrewer)
library(scales)
library(lattice)
library(dplyr)
library(plotGoogleMaps)
source('global.R')
source('global_check_user_input.R')
library(leafletR)
library(rMaps)
library(shinyIncubator)
Sys.setlocale(locale="English")
#...............................................................................
# Load databases
DB_Message = paste('IARP Version 1.3'); print(DB_Message)
DB_Message = paste('Libraries loaded - ', Sys.time()); print(DB_Message)
CropSeasons <- readRDS('data/CropSeasons.Rds')
Product_type.db <<- get_Product_type_db(CropSeasons)
Districts <- readRDS('data/Districts.Rds')
States <- readRDS('data/States.Rds')
adminID.db <<- get_adminID_db(Districts,States)
Risk_Items <- readRDS('data/Risk_Items.Rds')
Crops <- readRDS('data/Crops.Rds')
Exposure.db <<- get_exposure_db(Risk_Items, Crops, adminID.db)
raw_WBCIS <- readRDS('data/Risk_Items_YearWise_LossCosts.Rds')
raw_historic <- readRDS('data/Risk_Items_YearWise_Historical_Yields.Rds')
raw_synthetic <- readRDS('data/Risk_Items_YearWise_Synthetic_Yields.Rds')
WBCIS_gy.db <<- get_gy_db(raw_WBCIS, Risk_Items, adminID.db)
Historic_gy.db <<- get_gy_db(raw_historic, Risk_Items, adminID.db)
Synthetic_gy.db <<- get_gy_db(raw_synthetic, Risk_Items, adminID.db)
DB_Message = paste('All Databases loaded and prepared - ', Sys.time()); print(DB_Message)
rm(CropSeasons, Districts, States, Risk_Items, Crops, raw_historic, raw_synthetic)
#...............................................................................
display.flag <<- 0
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
#------------------------------------------------------------------------
# Clean all varibales and screen
observe({
input$ClearDisplay
if (input$ClearDisplay == 0)
return()
isolate({
display.flag <<- 0; display_array <<- NULL
MNAISdata_audit_array <<- NULL; MNAISdata_audit_display_array <<- NULL
WBCISdata_audit_array <<- NULL; WBCISdata_audit_display_array <<- NULL
MNAIS_Dissaggregated_exposure.db <<- NULL; MNAIS_Display_Dissaggregated_exposure.db <<- NULL
WBCIS_Dissaggregated_exposure.db <<- NULL; WBCIS_Display_Dissaggregated_exposure.db <<- NULL
IND_LOSS_Historic_gy.db <<- NULL
IND_LOSS_Synthetic_gy.db <<- NULL
Display_IND_LOSS_Historic_gy.db <<- NULL
Display_IND_LOSS_Synthetic_gy.db <<- NULL
L1_loss_Historic_gy.final <<- NULL
L2_loss_Historic_gy.final <<- NULL
L3_loss_Historic_gy.final <<- NULL
L4_loss_Historic_gy.final <<- NULL
L1_loss_Synthetic_gy.final <<- NULL
L2_loss_Synthetic_gy.final <<- NULL
L3_loss_Synthetic_gy.final <<- NULL
L4_loss_Synthetic_gy.final <<- NULL
Historic_summary_display_final <<- NULL
Synthetic_summary_display_final <<- NULL
WBCIS.final <<- NULL
L1_WBCIS_loss.final <<- NULL
L2_WBCIS_loss.final <<- NULL
L3_WBCIS_loss.final <<- NULL
L4_WBCIS_loss.final <<- NULL
output$UserInput <- renderDataTable({return(NULL)}, options = list(orderClasses = TRUE))
output$MNAISDataAudit <- renderDataTable({return(NULL)}, options = list(orderClasses = TRUE))
output$WBCISDataAudit <- renderDataTable({return(NULL)}, options = list(orderClasses = TRUE))
output$MNAISDisplayDissaggregated <- renderDataTable({return(NULL)}, options = list(orderClasses = TRUE))
output$WBCISDisplayDissaggregated <- renderDataTable({return(NULL)}, options = list(orderClasses = TRUE))
output$HistoricLosses <- renderDataTable({return(NULL)}, options = list(orderClasses = TRUE))
output$ModelledLosses <- renderDataTable({return(NULL)}, options = list(orderClasses = TRUE))
output$WBCISLosses <- renderDataTable({return(NULL)}, options = list(orderClasses = TRUE))
#output$Data_Audit_LOB_Pie <- renderPlot({NULL)})
source('global.R')
})
})
#------------------------------------------------------------------------
# read in a file and store in display array
observe({
input$UserInput # Do take a dependency on file input
inFile <- input$UserInput
if (is.null(inFile))
return(NULL)
#-------------------------------------------------------------------------------------------------
# Busy Animation
# Create a Progress object
progress <- shiny::Progress$new()
progress$set(message = "Computing ....", value = 0)
on.exit(progress$close())
updateProgress <- function(value = NULL, detail = NULL)
{if (is.null(value)) {value <- progress$getValue(); value <- value + (progress$getMax() - value) / 7; Sys.sleep(1)}
progress$set(value = value, detail = detail)}
#-------------------------------------------------------------------------------------------------
# Get and check user input file
raw_input <<- read.csv(inFile$datapath, header = T, sep = ',', quote = input$quote) # No dependency on input$dataset
if (is.function(updateProgress)) {updateProgress(detail = 'Validating User Input ...........')}
Checked_raw_input <- Check_UserInput(raw_input, adminID.db, Exposure.db, Product_type.db, Check_UserInput_Name_Mismatch, Check_UserInput_Prepare_Exposure_db, Check_UserInput_modelled_adminlevel, Check_UserInput_TSI_check)
Message=paste('Validated User Input ...........', Sys.time()); print(Message)
if (is.function(updateProgress)) {updateProgress(detail = 'Validation Successful ...........')}
#-------------------------------------------------------------------------------------------------
# prepare data for UI Display
file_input <- data.frame(lapply(Checked_raw_input, as.character), stringsAsFactors=FALSE)
if(display.flag == 1) {display_array <<- rbind(file_input, display_array)}
if(display.flag == 0) {display_array <<- file_input; display.flag <<- 1}
if (is.function(updateProgress)) {updateProgress(detail = 'Prepare data for UI Display ...........')}
#-------------------------------------------------------------------------------------------------
# Output Data in to the UI
output$UserInput <- renderDataTable({return(display_array)}, options = list(orderClasses = TRUE))
#---------------------------------------------------------------------------------------------
# perform MNAIS data audit and display in to the UI
MNAIS_display_array <- display_array[,-10]
MNAISdata_audit_array <<- as.data.frame(Perform_Data_Audit(MNAIS_display_array))
x_flag = 0
if(!is.null(MNAISdata_audit_array))
{
if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS Data Audit computing ...........')}
State = rownames(MNAISdata_audit_array)
MNAISdata_audit_display_array <<- cbind(State, format(MNAISdata_audit_array, scientific=FALSE))
output$MNAISDataAudit <- renderDataTable({return(MNAISdata_audit_display_array)}, options = list(orderClasses = TRUE))
x_flag = 1
MNAISdata_audit_array <<- as.data.frame(MNAISdata_audit_array)
Message=paste('MNAIS Data Audit computed ....', Sys.time()); print(Message)
if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS Data Audit computed ...........')}
}
# perform WBCIS data audit and display in to the UI
WBCIS_display_array <- display_array[,-9]
WBCISdata_audit_array <<- Perform_Data_Audit(WBCIS_display_array)
y_flag = 0
if(!is.null(WBCISdata_audit_array))
{
if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS Data Audit computing ...........')}
State = rownames(WBCISdata_audit_array)
WBCISdata_audit_display_array <<- cbind(State, format(WBCISdata_audit_array, scientific=FALSE))
output$WBCISDataAudit <- renderDataTable({return(WBCISdata_audit_display_array)}, options = list(orderClasses = TRUE))
y_flag = 1
WBCISdata_audit_array <<- as.data.frame(WBCISdata_audit_array)
Message=paste('WBCIS Data Audit computed ....', Sys.time()); print(Message)
if (is.function(updateProgress)) {updateProgress(detail = 'WBCIS Data Audit computed ...........')}
}
if((x_flag == 1) && (y_flag == 0)){output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot_1(MNAISdata_audit_array, "MNAIS Line of Business")})}
if((x_flag == 0) && (y_flag == 1)){output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot_1(WBCISdata_audit_array, "WBCIS Line of Business")})}
if((x_flag == 1) && (y_flag == 1)){output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot(MNAISdata_audit_array,WBCISdata_audit_array)})}
if (is.function(updateProgress)) {updateProgress(detail = 'LOB Graphics computed ...........')}
#---------------------------------------------------------------------------------------------
# Pie Chart with Percentages
#output$Data_Audit_State_TSI <- renderPlot({State_TSI_Plot(MNAISdata_audit_array, WBCISdata_audit_array)})
})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Download User Input
output$Download_DisplayArray <- downloadHandler(
filename = function() { paste('Validated_User_input.csv', sep='') },
content = function(file) {write.csv(display_array, file)})
# Download Data Audit Summary
output$Download_MNAISDataAuditSummary <- downloadHandler(
filename = function() { paste('MNAIS_Data_Audit_Summary', '.csv', sep='') },
content = function(file) {write.csv(MNAISdata_audit_display_array, file)})
output$Download_WBCISDataAuditSummary <- downloadHandler(
filename = function() { paste('WBCIS_Data_Audit_Summary', '.csv', sep='') },
content = function(file) {write.csv(WBCISdata_audit_display_array, file)})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# read in all single entry inputs and store in display array
observe({
input$goButton
if (input$goButton == 0)
return()
isolate({
ContractID_input <- input$Unique_Contract_Name
State_input <- input$Unique_States_input
District_input <- input$Unique_District_input
Crop_input <- input$Unique_Crop_input
Season_input <- input$Unique_Season_input
TSI <- input$TSI
EPI <- input$EPI
PR <- input$PR
#if(District_input == 'All'){District_input = NA}
single_entry_input <<- isolate(cbind(State_input, District_input, Crop_input, Season_input, TSI, EPI, PR))
Checked_single_entry_input.tmp <- as.data.frame(Check_UserInput(single_entry_input,adminID.db,Exposure.db,Product_type.db))
Checked_single_entry_input <- data.frame(lapply(Checked_single_entry_input.tmp, as.character), stringsAsFactors=FALSE)
if(display.flag > 0) {display_array <<- rbind(Checked_single_entry_input, display_array)}
if(display.flag == 0) {display_array <<- Checked_single_entry_input; display.flag <<- 1}
#---------------------------------------------------------------------------------------------
# perform MNAIS data audit and display in to the UI
MNAIS_display_array <- display_array[,-9]
MNAISdata_audit_array <<- Perform_Data_Audit(MNAIS_display_array)
x_flag = 0
if(!is.null(MNAISdata_audit_array))
{
State = rownames(MNAISdata_audit_array)
MNAISdata_audit_display_array <<- cbind(State, format(MNAISdata_audit_array, scientific=FALSE))
output$MNAISDataAudit <- renderDataTable({return(MNAISdata_audit_display_array)}, options = list(orderClasses = TRUE))
x_flag = 1
}
# perform WBCIS data audit and display in to the UI
WBCIS_display_array <- display_array[,-8]
WBCISdata_audit_array <<- Perform_Data_Audit(WBCIS_display_array)
y_flag = 0
if(!is.null(WBCISdata_audit_array))
{
State = rownames(WBCISdata_audit_array)
WBCISdata_audit_display_array <<- cbind(State, format(WBCISdata_audit_array, scientific=FALSE))
output$WBCISDataAudit <- renderDataTable({return(WBCISdata_audit_display_array)}, options = list(orderClasses = TRUE))
y_flag = 1
}
if((x_flag == 1) && (y_flag == 0)){output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot_1(MNAISdata_audit_array, "MNAIS Line of Business")})}
if((x_flag == 0) && (y_flag == 1)){output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot_1(WBCISdata_audit_array, "WBCIS Line of Business")})}
if((x_flag == 1) && (y_flag == 1)){output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot(MNAISdata_audit_array,WBCISdata_audit_array)})}
#---------------------------------------------------------------------------------------------
})
})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Display "display array" when go button is presses
observe({
input$goButton
if (input$goButton == 0)
return()
# display user input
isolate({output$UserInput <- renderDataTable({return(display_array)}, options = list(orderClasses = TRUE))}) #isolate
#---------------------------------------------------------------------------------------------
# perform MNAIS data audit and display in to the UI
MNAIS_display_array <- display_array[,-9]
MNAISdata_audit_array <<- Perform_Data_Audit(MNAIS_display_array)
x_flag = 0
if(!is.null(MNAISdata_audit_array))
{
State = rownames(MNAISdata_audit_array)
MNAISdata_audit_display_array <<- cbind(State, format(MNAISdata_audit_array, scientific=FALSE))
output$MNAISDataAudit <- renderDataTable({return(MNAISdata_audit_display_array)}, options = list(orderClasses = TRUE))
x_flag = 1
}
# perform WBCIS data audit and display in to the UI
WBCIS_display_array <- display_array[,-8]
WBCISdata_audit_array <<- Perform_Data_Audit(WBCIS_display_array)
y_flag = 0
if(!is.null(WBCISdata_audit_array))
{
State = rownames(WBCISdata_audit_array)
WBCISdata_audit_display_array <<- cbind(State, format(WBCISdata_audit_array, scientific=FALSE))
output$WBCISDataAudit <- renderDataTable({return(WBCISdata_audit_display_array)}, options = list(orderClasses = TRUE))
y_flag = 1
}
if((x_flag == 1) && (y_flag == 0)){output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot_1(MNAISdata_audit_array, "MNAIS Line of Business")})}
if((x_flag == 0) && (y_flag == 1)){output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot_1(WBCISdata_audit_array, "WBCIS Line of Business")})}
if((x_flag == 1) && (y_flag == 1)){output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot(MNAISdata_audit_array,WBCISdata_audit_array)})}
#---------------------------------------------------------------------------------------------
# if(!is.null(data_audit_array))
# {
# # display data audit
# State = rownames(data_audit_array)
# data_audit_display_array <- cbind(State, format(data_audit_array, scientific=FALSE))
# isolate({output$DataAudit <- renderDataTable({return(data_audit_display_array)}, options = list(orderClasses = TRUE))}) #isolate
#
# # Pie Chart with Percentages & barchart for state vs TSI
# output$Data_Audit_LOB_Pie <- renderPlot({LOB_Pie_Plot(data_audit_array) })
# output$Data_Audit_State_TSI <- renderPlot({State_TSI_Plot(data_audit_array)})
# }
})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Display "Dissaggregate" when go button is presses
observe({
input$Dissaggregate
if (input$Dissaggregate == 0)
return()
#-------------------------------------------------------------------------------------------------
# Busy Animation
# Create a Progress object
progress <- shiny::Progress$new()
progress$set(message = "Computing ....", value = 0)
on.exit(progress$close())
updateProgress <- function(value = NULL, detail = NULL)
{if (is.null(value)) {value <- progress$getValue(); value <- value + (progress$getMax() - value) / 8; Sys.sleep(1)}
progress$set(value = value, detail = detail)}
# allow for district errors to pass through
MNAIS_display_array <- display_array[,-10]; if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS array filtered ...........')}
WBCIS_display_array <- display_array[,-9]; if (is.function(updateProgress)) {updateProgress(detail = 'WBCIS array filtered ...........')}
#options(warn=-1)
if(!is.null(MNAIS_display_array))
{
MNAIS_display_array <- deduct_district_error_tsi(MNAIS_display_array)
if(nrow(MNAIS_display_array) > 0)
{
MNAIS_display_array = as.data.frame(Convert_Par_to_ID(MNAIS_display_array, adminID.db, Product_type.db))
Message=paste('MNAIS Parameter to ID Conversion successful ....', Sys.time()); print(Message)
#...............................................................................
# ASSUMPTION USER INPUT DOES NOT CONTAIN ANY UNMODELLED DISTRICTS ANY MORE
MNAIS_Exposure.db <- get_mutually_exclusive_exposure(MNAIS_display_array, Exposure.db) # get mutually exclusive modelled states
MNAIS_Dissaggregated_exposure.db <- disaggregate_exposure(MNAIS_Exposure.db, MNAIS_display_array, Aggregate_user_exposure, district_state_level_disaggregation, district_level_disaggregation, state_level_disaggregation)
Message=paste('MNAIS Dissaggregation successful ....', Sys.time()); print(Message)
if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS Dissaggregation successful ...........')}
MNAIS_Dissaggregated_exposure.db <<- as.data.frame(MNAIS_Dissaggregated_exposure.db)
MNAIS_Display_Dissaggregated_exposure.db <<- Convert_ID_to_Par_Dissagregate(MNAIS_Dissaggregated_exposure.db, adminID.db, Product_type.db)
MNAIS_Display_Dissaggregated_exposure.db = MNAIS_Display_Dissaggregated_exposure.db[,c(-6), drop=FALSE] #remove 'is modelled' tab
MNAIS_Display_Dissaggregated_exposure.db[,5] <- format(round((as.numeric(as.character(MNAIS_Display_Dissaggregated_exposure.db[,5]))), 0), numeric = TRUE)
MNAIS_Display_Dissaggregated_exposure.db[,6] <- format(round((as.numeric(as.character(MNAIS_Display_Dissaggregated_exposure.db[,6]))), 0), numeric = TRUE)
MNAIS_Display_Dissaggregated_exposure.db[,5] = format(MNAIS_Display_Dissaggregated_exposure.db[,5], scientific = FALSE)
MNAIS_Display_Dissaggregated_exposure.db[,6] = format(MNAIS_Display_Dissaggregated_exposure.db[,6], scientific = FALSE)
MNAIS_Display_Dissaggregated_exposure.final <<- MNAIS_Display_Dissaggregated_exposure.db[,, drop = FALSE]
isolate({output$MNAISDisplayDissaggregated <- renderDataTable({return(MNAIS_Display_Dissaggregated_exposure.final)}, options = list(orderClasses = TRUE))}) #isolate
#...............................................................................
}
}
if(!is.null(WBCIS_display_array))
{
WBCIS_display_array <- deduct_district_error_tsi(WBCIS_display_array)
if(nrow(WBCIS_display_array) > 0)
{
WBCIS_display_array = as.data.frame(Convert_Par_to_ID(WBCIS_display_array, adminID.db, Product_type.db))
Message=paste('WBCIS Parameter to ID Conversion successful ....', Sys.time()); print(Message)
# #...............................................................................
# # ASSUMPTION USER INPUT DOES NOT CONTAIN ANY UNMODELLED DISTRICTS ANY MORE
WBCIS_Exposure.db <- get_mutually_exclusive_exposure(WBCIS_display_array, Exposure.db)
WBCIS_Dissaggregated_exposure.db <- disaggregate_exposure_WBCIS(WBCIS_Exposure.db, WBCIS_display_array,Aggregate_user_exposure, district_state_level_disaggregation, district_level_disaggregation, state_level_disaggregation)
Message=paste('WBCIS Dissaggregation successful ....', Sys.time()); print(Message)
if (is.function(updateProgress)) {updateProgress(detail = 'WBCIS Dissaggregation successful ...........')}
WBCIS_Dissaggregated_exposure.db <<- as.data.frame(WBCIS_Dissaggregated_exposure.db, drop = FALSE)
WBCIS_Display_Dissaggregated_exposure.db <<- Convert_ID_to_Par_Dissagregate(WBCIS_Dissaggregated_exposure.db, adminID.db, Product_type.db)
WBCIS_Display_Dissaggregated_exposure.db = WBCIS_Display_Dissaggregated_exposure.db[,c(-6), drop = FALSE] #remove 'is modelled' tab
WBCIS_Display_Dissaggregated_exposure.db[,5] <- format(round((as.numeric(as.character(WBCIS_Display_Dissaggregated_exposure.db[,5]))), 0), numeric = TRUE)
WBCIS_Display_Dissaggregated_exposure.db[,6] <- format(round((as.numeric(as.character(WBCIS_Display_Dissaggregated_exposure.db[,6]))), 0), numeric = TRUE)
WBCIS_Display_Dissaggregated_exposure.db[,5] = format(WBCIS_Display_Dissaggregated_exposure.db[,5], scientific = FALSE)
WBCIS_Display_Dissaggregated_exposure.db[,6] = format(WBCIS_Display_Dissaggregated_exposure.db[,6], scientific = FALSE)
WBCIS_Display_Dissaggregated_exposure.final <<- WBCIS_Display_Dissaggregated_exposure.db[,, drop = FALSE]
isolate({output$WBCISDisplayDissaggregated <- renderDataTable({return(WBCIS_Display_Dissaggregated_exposure.final)}, options = list(orderClasses = TRUE))}) #isolate
#...............................................................................
} }
#options(warn=0)
})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Download Dissaggregated Exposure
output$Download_MNAIS_Disaggregated_Exposure <- downloadHandler(
filename = function() { paste('MNAIS_Dissaggregated_exposure.csv', sep='') },
content = function(file) {write.csv(MNAIS_Display_Dissaggregated_exposure.final, file)})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Download Dissaggregated Exposure
output$Download_WBCIS_Disaggregated_Exposure <- downloadHandler(
filename = function() { paste('WBCIS_Dissaggregated_exposure.csv', sep='') },
content = function(file) {write.csv(WBCIS_Display_Dissaggregated_exposure.final, file)})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Compute Simulation
observe({
input$MNAIS_Simulation
if(input$MNAIS_Simulation == 0)
return()
#-------------------------------------------------------------------------------------------------
# Busy Animation
# Create a Progress object
progress <- shiny::Progress$new()
progress$set(message = "Computing ....", value = 0)
on.exit(progress$close())
updateProgress <- function(value = NULL, detail = NULL)
{if (is.null(value)) {value <- progress$getValue(); value <- value + (progress$getMax() - value) / 5; Sys.sleep(1)}
progress$set(value = value, detail = detail)}
#...............................................................................
# Attach Guaranteed Yield
UserInput.db <- MNAIS_Dissaggregated_exposure.db
Historic_gy.db = Get_Guaranteed_gy(Historic_gy.db , UserInput.db, Exposure.db); if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS - Attached Guaranteed Yield to Historic exposure ...........')}
Synthetic_gy.db = Get_Guaranteed_gy(Synthetic_gy.db, UserInput.db, Exposure.db); if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS - Attached Guaranteed Yield to Synthetic exposure ...........')}
Message=paste('MNAIS - Attach Guaranteed Yield ....', Sys.time()); print(Message)
#.................................................................................
#...............................................................................
# Compute Indemnity Loss
# gy.db = Historic_gy.db
if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS - Indemnity loss computing ...............')}
IND_LOSS_Historic_gy.db <<- Compute_Indemnity_loss(Historic_gy.db)
IND_LOSS_Synthetic_gy.db <<- Compute_Indemnity_loss(Synthetic_gy.db)
Display_IND_LOSS_Historic_gy.db <<- Convert_ID_to_Par_detailed_Losses(IND_LOSS_Historic_gy.db, adminID.db, Product_type.db)
Display_IND_LOSS_Synthetic_gy.db <<- Convert_ID_to_Par_detailed_Losses(IND_LOSS_Synthetic_gy.db, adminID.db, Product_type.db)
Message=paste('MNAIS - Indemnity loss computed ....', Sys.time()); print(Message)
if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS - Indemnity loss computed ...............')}
LOSS_Historic_gy.db <- Compute_aggregate(IND_LOSS_Historic_gy.db, Product_type.db, adminID.db)
LOSS_Synthetic_gy.db <- Compute_aggregate(IND_LOSS_Synthetic_gy.db, Product_type.db, adminID.db)
Message=paste('MNAIS - Level Aggregation computed ....', Sys.time()); print(Message)
if (is.function(updateProgress)) {updateProgress(detail = 'MNAIS - Level Aggregation computed ...............')}
L1_loss_Historic_gy.final <<- unique(LOSS_Historic_gy.db[LOSS_Historic_gy.db[,1] == 'level1',])
L2_loss_Historic_gy.final <<- unique(LOSS_Historic_gy.db[LOSS_Historic_gy.db[,1] == 'level2',])
L3_loss_Historic_gy.final <<- unique(LOSS_Historic_gy.db[LOSS_Historic_gy.db[,1] == 'level3',])
L4_loss_Historic_gy.final <<- unique(LOSS_Historic_gy.db[LOSS_Historic_gy.db[,1] == 'level4',])
L1_loss_Synthetic_gy.final <<- unique(LOSS_Synthetic_gy.db[LOSS_Synthetic_gy.db[,1] == 'level1',])
L2_loss_Synthetic_gy.final <<- unique(LOSS_Synthetic_gy.db[LOSS_Synthetic_gy.db[,1] == 'level2',])
L3_loss_Synthetic_gy.final <<- unique(LOSS_Synthetic_gy.db[LOSS_Synthetic_gy.db[,1] == 'level3',])
L4_loss_Synthetic_gy.final <<- unique(LOSS_Synthetic_gy.db[LOSS_Synthetic_gy.db[,1] == 'level4',])
#...............................................................................
#...............................................................................
# Display Losses
Historic_summary_display <- Compute_display_aggregate(IND_LOSS_Historic_gy.db, Product_type.db, adminID.db)
State = rownames(Historic_summary_display)
Historic_summary_display_final <<- cbind(State, format(Historic_summary_display, scientific=FALSE))
Synthetic_summary_display <- Compute_display_aggregate(IND_LOSS_Synthetic_gy.db, Product_type.db, adminID.db)
State = rownames(Synthetic_summary_display)
Synthetic_summary_display_final <<- cbind(State, format(Synthetic_summary_display, scientific=FALSE))
isolate({output$HistoricLosses <- renderDataTable({return(Historic_summary_display_final)}, options = list(orderClasses = TRUE))}) #isolate
isolate({output$ModelledLosses <- renderDataTable({return(Synthetic_summary_display_final)}, options = list(orderClasses = TRUE))}) #isolate
#...............................................................................
})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Compute Simulation
observe({
input$WBCIS_Simulation
if(input$WBCIS_Simulation == 0)
return()
#-------------------------------------------------------------------------------------------------
# Busy Animation
# Create a Progress object
progress <- shiny::Progress$new()
progress$set(message = "Computing ....", value = 0)
on.exit(progress$close())
updateProgress <- function(value = NULL, detail = NULL)
{if (is.null(value)) {value <- progress$getValue(); value <- value + (progress$getMax() - value) / 2; Sys.sleep(1)}
progress$set(value = value, detail = detail)}
#...............................................................................
# Calculate WBCIS Loss
UserInput.db <- as.data.frame(WBCIS_Display_Dissaggregated_exposure.db)
WBCIS_GY <- Convert_ID_to_Par_WBCIS(WBCIS_gy.db, adminID.db, Product_type.db)
t = merge(UserInput.db, WBCIS_GY, by = c('State_Name','District_name','Crop_Name','Season_Name'))
WBCIS.tmp = t[,c(-6,-7,-8)]
WBCIS.tmp[,7] = WBCIS.tmp[,7] / 100
TSI = as.numeric(as.character(WBCIS.tmp[,5]))
LossP = as.numeric(as.character(WBCIS.tmp[,7]))
Indemnity_Loss = TSI * LossP
WBCIS.final <<- cbind(WBCIS.tmp, Indemnity_Loss)
Message=paste('WBCIS - Loss Computed ....', Sys.time()); print(Message)
if (is.function(updateProgress)) {updateProgress(detail = 'WBCIS - Loss Computed .................')}
#...............................................................................
Aggregated_WBCIS_Losses <- Compute_aggregate_WBCIS(WBCIS.final, Product_type.db, adminID.db)
Message=paste('WBCIS - Level Aggregation computed ....', Sys.time()); print(Message)
if (is.function(updateProgress)) {updateProgress(detail = 'WBCIS - Level Aggregation computed .................')}
L1_WBCIS_loss.final <<- unique(Aggregated_WBCIS_Losses[Aggregated_WBCIS_Losses[,1] == 'level1',])
L2_WBCIS_loss.final <<- unique(Aggregated_WBCIS_Losses[Aggregated_WBCIS_Losses[,1] == 'level2',])
L3_WBCIS_loss.final <<- unique(Aggregated_WBCIS_Losses[Aggregated_WBCIS_Losses[,1] == 'level3',])
L4_WBCIS_loss.final <<- unique(Aggregated_WBCIS_Losses[Aggregated_WBCIS_Losses[,1] == 'level4',])
isolate({output$WBCISLosses <- renderDataTable({return(Aggregated_WBCIS_Losses)}, options = list(orderClasses = TRUE))}) #isolate
#------------------------------------------------------------------------
})
#------------------------------------------------------------------------
# Download Historic Losses
output$Download_WBCIS_l1 <- downloadHandler(
filename = function() { paste('Level1_WBCIS_Losses.csv', sep='') },
content = function(file) {write.csv(L1_WBCIS_loss.final, file)})
output$Download_WBCIS_l2 <- downloadHandler(
filename = function() { paste('Level2_WBCIS_Losses.csv', sep='') },
content = function(file) {write.csv(L2_WBCIS_loss.final, file)})
output$Download_WBCIS_l3 <- downloadHandler(
filename = function() { paste('Level3_WBCIS_Losses.csv', sep='') },
content = function(file) {write.csv(L3_WBCIS_loss.final, file)})
output$Download_WBCIS_l4 <- downloadHandler(
filename = function() { paste('Level4_WBCIS_Losses.csv', sep='') },
content = function(file) {write.csv(L4_WBCIS_loss.final, file)})
output$Download_WBCIS_l5 <- downloadHandler(
filename = function() { paste('Level5_WBCIS_Losses.csv', sep='') },
content = function(file) {write.csv(WBCIS.final, file)})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Download Historic Losses
output$Download_historic_l1 <- downloadHandler(
filename = function() { paste('Level1_Historic_Losses', '.csv', sep='') },
content = function(file) {write.csv(L1_loss_Historic_gy.final, file)})
output$Download_historic_l2 <- downloadHandler(
filename = function() { paste('Level2_Historic_Losses', '.csv', sep='') },
content = function(file) {write.csv(L2_loss_Historic_gy.final, file)})
output$Download_historic_l3 <- downloadHandler(
filename = function() { paste('Level3_Historic_Losses', '.csv', sep='') },
content = function(file) {write.csv(L3_loss_Historic_gy.final, file)})
output$Download_historic_l4 <- downloadHandler(
filename = function() { paste('Level4_Historic_Losses', '.csv', sep='') },
content = function(file) {write.csv(L4_loss_Historic_gy.final, file)})
output$Download_historic_l5 <- downloadHandler(
filename = function() { paste('Level5_Historic_Losses', '.csv', sep='') },
content = function(file) {write.csv(Display_IND_LOSS_Historic_gy.db, file)})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Download Synthetic Losses
output$Download_synthetic_l1 <- downloadHandler(
filename = function() { paste('Level1_Synthetic_Losses', '.csv', sep='') },
content = function(file) {write.csv(L1_loss_Synthetic_gy.final, file)})
output$Download_synthetic_l2 <- downloadHandler(
filename = function() { paste('Level2_Synthetic_Losses', '.csv', sep='') },
content = function(file) {write.csv(L2_loss_Synthetic_gy.final, file)})
output$Download_synthetic_l3 <- downloadHandler(
filename = function() { paste('Level3_Synthetic_Losses', '.csv', sep='') },
content = function(file) {write.csv(L3_loss_Synthetic_gy.final, file)})
output$Download_synthetic_l4 <- downloadHandler(
filename = function() { paste('Level4_Synthetic_Losses', '.csv', sep='') },
content = function(file) {write.csv(L4_loss_Synthetic_gy.final, file)})
output$Download_synthetic_l5 <- downloadHandler(
filename = function() { paste('Level5_Synthetic_Losses', '.csv', sep='') },
content = function(file) {write.csv(Display_IND_LOSS_Synthetic_gy.db, file)})
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Display Interactive Map
output$myChart <- renderMap({
map3 <- Leaflet$new()
map3$tileLayer(provider = "MapQuestOpen.OSM")
map3$set(width = 1600, height = 800)
map3$setView(c(20,78), zoom = 4)
map3
})
#------------------------------------------------------------------------
})#on.exit(rm(list=ls(all=TRUE))) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_gallery_subjects.R
\name{get_gallery_subjects}
\alias{get_gallery_subjects}
\title{get gallery subjects}
\usage{
get_gallery_subjects(gallery)
}
\arguments{
\item{gallery}{The gallery in which the subjects are enrolled.}
}
\value{
A vector of subject id's
}
\description{
Returns all subject id's associated with a gallery.
}
\examples{
\donttest{
facerec_init()
# enroll
finn_image <- 'https://upload.wikimedia.org/wikipedia/en/2/2a/Finn-Force_Awakens_\%282015\%29.png'
finn_enroll <- enroll(image = finn_image, subject_id = 'finn', gallery = 'starwars')
# view subjects
get_gallery_subjects(gallery = 'starwars')
}
}
| /man/get_gallery_subjects.Rd | permissive | guptam/facerec | R | false | true | 706 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_gallery_subjects.R
\name{get_gallery_subjects}
\alias{get_gallery_subjects}
\title{get gallery subjects}
\usage{
get_gallery_subjects(gallery)
}
\arguments{
\item{gallery}{The gallery in which the subjects are enrolled.}
}
\value{
A vector of subject id's
}
\description{
Returns all subject id's associated with a gallery.
}
\examples{
\donttest{
facerec_init()
# enroll
finn_image <- 'https://upload.wikimedia.org/wikipedia/en/2/2a/Finn-Force_Awakens_\%282015\%29.png'
finn_enroll <- enroll(image = finn_image, subject_id = 'finn', gallery = 'starwars')
# view subjects
get_gallery_subjects(gallery = 'starwars')
}
}
|
library(shiny)
library(shinydashboard)
# Define UI for application that draws a histogram
shinyUI({
dbHeader <-
dashboardHeader(
title = 'ExploraDatos', titleWidth = 200,
tags$li(a(href = 'https://twitter.com/DBuesos',
img(src = 'https://pbs.twimg.com/profile_images/1101545919105978368/N5qTw1RD_400x400.png',
title = '@DBuesos', height = '30px'),
style = 'padding-top:10px; padding-bottom:10px;'),
class = 'dropdown'),
tags$li(a(href = 'https://github.com/jjsantos01/ExploraDatosMX',
img(src = 'https://image.flaticon.com/icons/svg/25/25231.svg',
title = '@DBuesos', height = '30px'),
style = 'padding-top:10px; padding-bottom:10px;'),
class = 'dropdown')
)
sidebar <- dashboardSidebar(
width = 200,
sidebarMenu(
menuItem("Introducción", tabName = "Intro"),
menuItem("Infografías", tabName = "InfoG"),
menuItem("Gráficas", tabName = "Graficas"),
menuItem("Sistema de Consulta", tabName = "Consulta"),
menuItem("Hallazgos", tabName = "H")
)
)
body <- dashboardBody(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "estilos.css")
),
tags$head(tags$style(HTML('
/* logo */
.skin-black .wrapper .main-header .logo{
color : #ffbfbf;
}
/* main sidebar */
.skin-black .main-sidebar {
background-color: #e2e2e2;
}
/* active selected tab in the sidebarmenu */
.skin-black .main-sidebar .sidebar .sidebar-menu .active a{
font-family: "Poppins", sans-serif;
background-color: #cccccc;
}
/* other links in the sidebarmenu */
.skin-black .main-sidebar .sidebar .sidebar-menu a{
background-color: #e2e2e2;
color: black;
}
/* other links in the sidebarmenu when hovered */
.skin-black .main-sidebar .sidebar .sidebar-menu a:hover{
background-color: white;
}
/* toggle button when hovered */
.skin-black .main-header .navbar .sidebar-toggle:hover{
background-color: white;
}
'))),
tabItems(
tabItem("Intro",
HTML('<html>
<div class=WordSection1>
<div class="contenedor">
<h1>LOS DERECHOS HUMANOS EN EN EL
GASTO PUBLICO: EL CASO DE LOS 31 PROGRAMAS PRESUPUESTARIOS </h1>
</div>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><i><span lang=ES
style="font-family:"Poppins",sans-serif"> </span></i></p>
<h2>Motivaciones</h2>
<p class=MsoNormal style="text-align:justify"><i><span lang=ES
style="font-family:"Poppins",sans-serif"> </span></i></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif">Hemos vivido en una grave crisis de seguridad y en
un contexto de impunidad, que ha favorecido las constantes violaciones de DD.HH</span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoListParagraphCxSpFirst style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">Existen
más de 35 mil desaparecidos. </span></p>
<p class=MsoListParagraphCxSpMiddle style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">Se
han encontrado más de 2 mil fosas clandestinas.</span></p>
<p class=MsoListParagraphCxSpMiddle style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">Al
menos 9 periodistas fueron asesinados durante 2018. </span></p>
<p class=MsoListParagraphCxSpMiddle style="text-indent:-.25in"><span
lang=ES-TRAD style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES-TRAD style="font-family:"Poppins",sans-serif;
color:#282828;background:white">Entre julio 2016 y diciembre han sido
asesinados 29 activistas ambientales.</span></p>
<p class=MsoListParagraphCxSpMiddle style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">El
incremento de municipios forzados a migrar, tan sólo en los municipios de
Chiapas ha habido más de 5 mil personas desplazadas.</span></p>
<p class=MsoListParagraphCxSpLast style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">Y
peor aún, cada vez aumentan las caravanas de migrantes centroamericanos en
nuestro país.</span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif">Además, la fotografía no esta completa… se han
documentado distintas violaciones a los DD.HH de poblaciones vulnerables:
mujeres, lgbt +, afrodescendientes y, niños y adultos mayores.</span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<h2>Objetivo</h2>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif">Ante esta grave situación, </span><span
lang=ES-TRAD style="font-family:"Poppins",sans-serif;color:#14171A;
background:white">hicimos un ejercicio de explorar las herramientas de datos
abiertos </span><span lang=ES style="font-family:"Poppins",sans-serif">de
la plataforma de transparencia presupuestaria de la SHCP.</span><span lang=ES
style="font-family:"Poppins",sans-serif;color:#14171A;background:white"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES-TRAD
style="font-family:"Poppins",sans-serif;color:#14171A;background:white"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES-TRAD
style="font-family:"Poppins",sans-serif;color:#14171A;background:white">Al
buscar en específico “derechos humanos”, se detectaron 31 programas que en
alguna de las clasificaciones presupuestales incluyen este término. </span><span
lang=ES-TRAD style="font-family:"Poppins",sans-serif;color:#14171A;
background:#F5F8FA">De esta manera, con fines de simplificación del análisis,
exploramos los derechos humanos desde las clasificaciones presupuestales de
dicho término.</span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif">Pese a que hay presupuesto etiquetado para los
DD.HH en otras dependencias, otros ramos, y otras funciones, etc, decidimos
enfocarnos exclusivamente en los 31 programas presupuestarios, debido ese fue
el resultado de nuestra interacción con la plataforma de datos abiertos.
Además, que nuestros hallazgos pueden ser replicados, cualquier ciudadano que
escriba en el buscador “derechos humanos” tendrá nuestro mismo resultado, y
podrá realizar los mismos análisis.</span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif">En este sentido, nuestro principal propósito fue explorar
quiénes, cómo y en qué se ejercen los 31 programas presupuestarios para atender
los problemas relativos a los DD.HH. </span></p>
<p class=MsoNormal><span lang=ES> </span></p>
</div>
</body>
</html>')
), # Fin del TabItem Introduccion
tabItem("InfoG",
h1("Infografias"),
tabsetPanel(
tabPanel(h2("Infografía 1: "),
br(),
HTML("<img src='infografias/Infografia 1.png' srcset='infografias/Infografia 1.png 1x, infografias/Infografia 1.png 2x' width='800' height='2000' alt='Infografia_1'/>"),
br(), br()
),
tabPanel(h2("Infografía 2"),
br(),
HTML("<img src='infografias/Infografia 2.png' srcset='infografias/Infografia 2.png 1x, infografias/Infografia 2.png 2x' width='800' height='2000' alt='Infografia_2'/>")
)
)
),
tabItem("Graficas",
h1("Gráficas"),
# tabsetPanel(
# tabPanel("Gasto en programas presupuestarios de Derechos Humanos",
# HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/25.embed'></iframe> ")),
# tabPanel("Distribución del presupuesto por ramo, 2019",
# HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/27.embed'></iframe>")
# ),
# tabPanel("Gasto en programas de población especifica de la CNDH",
# HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/31.embed'></iframe>")
# ),
# tabPanel("Gasto en programas de DDHH, en los ramos Defensa, Segob y PGR",
# HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/33.embed'></iframe>")
# ),
# tabPanel("Gasto en programas presupuestales de DDHH, por entidad, 2019",
# HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/35.embed'></iframe>")
# ),
# tabPanel("Top 10 concepto de gasto en programas presupuestales de DDHH, 2019",
# HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/37.embed'></iframe>")
# ),
# tabPanel("Proporción de representan los programas presupuestales en DDHH en el presupuesto total",
# HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/39.embed'></iframe>")
# ),
# tabPanel("Recomendaciones emitidas por la CNDH",
# HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/41.embed'></iframe>")
# )
# )
#
h2("Gasto en programas presupuestarios de Derechos Humanos"),
HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/25.embed'></iframe> "),
h2("Distribución del presupuesto por ramo, 2019"),
HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/27.embed'></iframe>"),
h2("Gasto en programas de población especifica de la CNDH"),
HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/31.embed'></iframe>"),
h2("Gasto en programas de DDHH, en los ramos Defensa, Segob y PGR"),
HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/33.embed'></iframe>"),
h2("Gasto en programas presupuestales de DDHH, por entidad, 2019"),
HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/35.embed'></iframe>"),
h2("Top 10 concepto de gasto en programas presupuestales de DDHH, 2019"),
HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/37.embed'></iframe>"),
h2("Proporción de representan los programas presupuestales en DDHH en el presupuesto total"),
HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/39.embed'></iframe>"),
h2("Recomendaciones emitidas por la CNDH"),
HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/41.embed'></iframe>")
),
tabItem("Consulta",
h1("Sistema de Consulta"),
p("A continuación, el equipo de DataBuesos ha elaborado un sistema de consulta gráfico
para poder consultar datos de manera gráfica en rubros relativos al
presupuesto de Derechos Humanos en el Gobierno Federal."),
fluidPage(
fluidRow(
column(12, box(title = "Información del presupuesto", status = 'warning', solidHeader = TRUE, width = '100%',
tabsetPanel(
tabPanel("Gasto por Dependencia y Programa",
selectInput(inputId = "Anio_Estatal", label = "Seleccione Año", choices = c(2013, 2014, 2015, 2016, 2017, 2018, 2019)),
HTML("<img src='LeyendaTreeMap.png' srcset='LeyendaTreeMap.png 1x, LeyendaTreeMap.png 2x' height='120' alt='leyenda_1'/>"),
shinycssloaders::withSpinner(highchartOutput('tm', width = 1000, height = 500))
),
tabPanel("Evolución de Gasto",
selectInput(inputId = "Sel_ramo", label = "Seleccione ramo o dependencia", choices = niveles(b1_ramo_year$DESC_RAMO)),
shinycssloaders::withSpinner(plotOutput("plot1", width = 1000, height = 500)),
shinycssloaders::withSpinner(plotlyOutput("plot2", width = 1000, height = 500))
)
)
)
)
)
)
),
tabItem("H",
h1("Hallazgos"),
HTML('<html>
<head>
<meta http-equiv=Content-Type content="text/html; charset=utf-8">
<meta name=Generator content="Microsoft Word 15 (filtered)">
<style>
<!--
/* Font Definitions */
@font-face
{font-family:Wingdings;
panose-1:5 0 0 0 0 0 0 0 0 0;}
@font-face
{font-family:"Cambria Math";
panose-1:2 4 5 3 5 4 6 3 2 4;}
@font-face
{font-family:Calibri;
panose-1:2 15 5 2 2 2 4 3 2 4;}
@font-face
{font-family:"Poppins";
panose-1:2 15 3 2 2 2 4 3 2 4;}
/* Style Definitions */
p.MsoNormal, li.MsoNormal, div.MsoNormal
{margin:0in;
margin-bottom:.0001pt;
font-size:12.0pt;
font-family:"Calibri",sans-serif;}
p.MsoListParagraph, li.MsoListParagraph, div.MsoListParagraph
{margin-top:0in;
margin-right:0in;
margin-bottom:0in;
margin-left:.5in;
margin-bottom:.0001pt;
font-size:12.0pt;
font-family:"Calibri",sans-serif;}
p.MsoListParagraphCxSpFirst, li.MsoListParagraphCxSpFirst, div.MsoListParagraphCxSpFirst
{margin-top:0in;
margin-right:0in;
margin-bottom:0in;
margin-left:.5in;
margin-bottom:.0001pt;
font-size:12.0pt;
font-family:"Calibri",sans-serif;}
p.MsoListParagraphCxSpMiddle, li.MsoListParagraphCxSpMiddle, div.MsoListParagraphCxSpMiddle
{margin-top:0in;
margin-right:0in;
margin-bottom:0in;
margin-left:.5in;
margin-bottom:.0001pt;
font-size:12.0pt;
font-family:"Calibri",sans-serif;}
p.MsoListParagraphCxSpLast, li.MsoListParagraphCxSpLast, div.MsoListParagraphCxSpLast
{margin-top:0in;
margin-right:0in;
margin-bottom:0in;
margin-left:.5in;
margin-bottom:.0001pt;
font-size:12.0pt;
font-family:"Calibri",sans-serif;}
.MsoChpDefault
{font-family:"Calibri",sans-serif;}
@page WordSection1
{size:8.5in 11.0in;
margin:70.85pt 85.05pt 70.85pt 85.05pt;}
div.WordSection1
{page:WordSection1;}
/* List Definitions */
ol
{margin-bottom:0in;}
ul
{margin-bottom:0in;}
-->
</style>
</head>
<body lang=ES-US>
<div class=WordSection1>
<h2>Principales hallazgos</h2>
<p class=MsoNormal style="text-align:justify"><i><span lang=ES
style="font-family:"Poppins",sans-serif"> </span></i></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif">a) La agenda relativa a los DD.HH, no es una
prioridad dentro de los programas presupuestarios.</span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoListParagraph style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">De
un total de 740 programas presupuestarios, sólo hay 31 programas relativos a
los DD.HH. </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoListParagraph style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">De
los cuales 24 son operados por la CNDH, 2 por Defensa Nacional, 2 por
Gobernación, 1 por el INAI, 1 por la Marina y 1 por la PGR, ahora FGR. </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif">b) El recurso de los programas presupuestarios ha
sido insuficiente para atender la problemática relacionada con los DD.HH, y
peor aún, ha disminuido en el tiempo. </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoListParagraph style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">Durante
el periodo 2013-2019, se aprobó un presupuesto de 18,542 millones de pesos para
los 31 programas presupuestarios.</span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoListParagraph style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">El
presupuesto aprobado en 2018 fue de 2,902 mdp y, en 2019 2,552 millones de
pesos, hablamos de un disminución de 12%, en términos reales.</span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoListParagraph><span lang=ES style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><i><span lang=ES
style="font-family:"Poppins",sans-serif">c) </span></i><span lang=ES
style="font-family:"Poppins",sans-serif">Al respecto de las poblaciones vulnerables
</span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoListParagraphCxSpFirst style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">El
presupuesto de 2019 disminuyó con respecto de 2018, para todos los casos: </span></p>
<p class=MsoListParagraphCxSpMiddle style="margin-left:1.0in;text-align:justify;
text-indent:-.25in"><span lang=ES style="font-family:"Courier New"">o<span
style="font:7.0pt "Times New Roman""> </span></span><span lang=ES
style="font-family:"Poppins",sans-serif">personas migrantes; </span></p>
<p class=MsoListParagraphCxSpMiddle style="margin-left:1.0in;text-align:justify;
text-indent:-.25in"><span lang=ES style="font-family:"Courier New"">o<span
style="font:7.0pt "Times New Roman""> </span></span><span lang=ES
style="font-family:"Poppins",sans-serif">igualdad de género; </span></p>
<p class=MsoListParagraphCxSpMiddle style="margin-left:1.0in;text-align:justify;
text-indent:-.25in"><span lang=ES style="font-family:"Courier New"">o<span
style="font:7.0pt "Times New Roman""> </span></span><span lang=ES
style="font-family:"Poppins",sans-serif">personas desaparecidas; </span></p>
<p class=MsoListParagraphCxSpMiddle style="margin-left:1.0in;text-align:justify;
text-indent:-.25in"><span lang=ES style="font-family:"Courier New"">o<span
style="font:7.0pt "Times New Roman""> </span></span><span lang=ES
style="font-family:"Poppins",sans-serif">periodistas y defensores de
DD.HH;</span></p>
<p class=MsoListParagraphCxSpMiddle style="margin-left:1.0in;text-align:justify;
text-indent:-.25in"><span lang=ES style="font-family:"Courier New"">o<span
style="font:7.0pt "Times New Roman""> </span></span><span lang=ES
style="font-family:"Poppins",sans-serif">niñas, niños y adolescentes; y </span></p>
<p class=MsoListParagraphCxSpMiddle style="margin-left:1.0in;text-align:justify;
text-indent:-.25in"><span lang=ES style="font-family:"Courier New"">o<span
style="font:7.0pt "Times New Roman""> </span></span><span lang=ES
style="font-family:"Poppins",sans-serif">sexualidad, salud y VIH. </span></p>
<p class=MsoListParagraphCxSpMiddle style="text-align:justify"><span lang=ES
style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoListParagraphCxSpMiddle style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">Los
programas presupuestarios de la CNDH dirigidos a personas migrantes han tenido
el mayor recurso aprobado, en más de 100 millones de pesos para los años 2018 y
2019.</span></p>
<p class=MsoListParagraphCxSpMiddle style="margin-left:0in;text-align:justify"><span
lang=ES style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoListParagraphCxSpMiddle style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">En
caso contrario, los programas presupuestarios dirigidos a la sexualidad, salud
y VIH y, niños, niñas y adolescentes, no alcanzó ni 20 millones de pesos en
ambos años. </span></p>
<p class=MsoListParagraphCxSpLast style="text-align:justify"><span lang=ES
style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoNormal><i><span lang=ES style="font-family:"Poppins",sans-serif">d)
Al respecto de las entidades</span></i></p>
<p class=MsoListParagraph style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">La
Ciudad de México representa casi la totalidad del recurso asignado para los 31
programas presupuestarios, esto se debe a la centralización de las
instituciones encargadas de ejecutar dichos programas. </span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif"> </span></p>
<h2>Referencias:</h2>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif">https://www.transparenciapresupuestaria.gob.mx/es/PTP/programas#consultas</span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif">https://www.animalpolitico.com/2018/11/desapariciones-comite-onu-impunidad/</span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif">https://www.animalpolitico.com/2018/11/fosas-clandestinas-2-mil-hallazgos-mexico/</span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif">https://www.animalpolitico.com/2017/11/indigenas-desplazados-chiapas-violencia/</span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif">https://www.animalpolitico.com/2018/12/mexico-peligroso-periodistas/</span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif">https://www.jornada.com.mx/ultimas/2018/03/05/suman-29-activistas-ambientales-asesinados-en-mexico-informe-4286.html</span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoNormal><span lang=ES-TRAD> </span></p>
</div>
</body>
</html>
')
)
)
)
dashboardPage(dbHeader, sidebar, body, skin = 'black')
})
| /ShinyApp/ui.R | no_license | JuveCampos/ExploraDatosMX | R | false | false | 33,703 | r |
library(shiny)
library(shinydashboard)
# Define UI for application that draws a histogram
shinyUI({
dbHeader <-
dashboardHeader(
title = 'ExploraDatos', titleWidth = 200,
tags$li(a(href = 'https://twitter.com/DBuesos',
img(src = 'https://pbs.twimg.com/profile_images/1101545919105978368/N5qTw1RD_400x400.png',
title = '@DBuesos', height = '30px'),
style = 'padding-top:10px; padding-bottom:10px;'),
class = 'dropdown'),
tags$li(a(href = 'https://github.com/jjsantos01/ExploraDatosMX',
img(src = 'https://image.flaticon.com/icons/svg/25/25231.svg',
title = '@DBuesos', height = '30px'),
style = 'padding-top:10px; padding-bottom:10px;'),
class = 'dropdown')
)
sidebar <- dashboardSidebar(
width = 200,
sidebarMenu(
menuItem("Introducción", tabName = "Intro"),
menuItem("Infografías", tabName = "InfoG"),
menuItem("Gráficas", tabName = "Graficas"),
menuItem("Sistema de Consulta", tabName = "Consulta"),
menuItem("Hallazgos", tabName = "H")
)
)
body <- dashboardBody(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "estilos.css")
),
tags$head(tags$style(HTML('
/* logo */
.skin-black .wrapper .main-header .logo{
color : #ffbfbf;
}
/* main sidebar */
.skin-black .main-sidebar {
background-color: #e2e2e2;
}
/* active selected tab in the sidebarmenu */
.skin-black .main-sidebar .sidebar .sidebar-menu .active a{
font-family: "Poppins", sans-serif;
background-color: #cccccc;
}
/* other links in the sidebarmenu */
.skin-black .main-sidebar .sidebar .sidebar-menu a{
background-color: #e2e2e2;
color: black;
}
/* other links in the sidebarmenu when hovered */
.skin-black .main-sidebar .sidebar .sidebar-menu a:hover{
background-color: white;
}
/* toggle button when hovered */
.skin-black .main-header .navbar .sidebar-toggle:hover{
background-color: white;
}
'))),
tabItems(
tabItem("Intro",
HTML('<html>
<div class=WordSection1>
<div class="contenedor">
<h1>LOS DERECHOS HUMANOS EN EN EL
GASTO PUBLICO: EL CASO DE LOS 31 PROGRAMAS PRESUPUESTARIOS </h1>
</div>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><i><span lang=ES
style="font-family:"Poppins",sans-serif"> </span></i></p>
<h2>Motivaciones</h2>
<p class=MsoNormal style="text-align:justify"><i><span lang=ES
style="font-family:"Poppins",sans-serif"> </span></i></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif">Hemos vivido en una grave crisis de seguridad y en
un contexto de impunidad, que ha favorecido las constantes violaciones de DD.HH</span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoListParagraphCxSpFirst style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">Existen
más de 35 mil desaparecidos. </span></p>
<p class=MsoListParagraphCxSpMiddle style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">Se
han encontrado más de 2 mil fosas clandestinas.</span></p>
<p class=MsoListParagraphCxSpMiddle style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">Al
menos 9 periodistas fueron asesinados durante 2018. </span></p>
<p class=MsoListParagraphCxSpMiddle style="text-indent:-.25in"><span
lang=ES-TRAD style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES-TRAD style="font-family:"Poppins",sans-serif;
color:#282828;background:white">Entre julio 2016 y diciembre han sido
asesinados 29 activistas ambientales.</span></p>
<p class=MsoListParagraphCxSpMiddle style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">El
incremento de municipios forzados a migrar, tan sólo en los municipios de
Chiapas ha habido más de 5 mil personas desplazadas.</span></p>
<p class=MsoListParagraphCxSpLast style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">Y
peor aún, cada vez aumentan las caravanas de migrantes centroamericanos en
nuestro país.</span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif">Además, la fotografía no esta completa… se han
documentado distintas violaciones a los DD.HH de poblaciones vulnerables:
mujeres, lgbt +, afrodescendientes y, niños y adultos mayores.</span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<h2>Objetivo</h2>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif">Ante esta grave situación, </span><span
lang=ES-TRAD style="font-family:"Poppins",sans-serif;color:#14171A;
background:white">hicimos un ejercicio de explorar las herramientas de datos
abiertos </span><span lang=ES style="font-family:"Poppins",sans-serif">de
la plataforma de transparencia presupuestaria de la SHCP.</span><span lang=ES
style="font-family:"Poppins",sans-serif;color:#14171A;background:white"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES-TRAD
style="font-family:"Poppins",sans-serif;color:#14171A;background:white"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES-TRAD
style="font-family:"Poppins",sans-serif;color:#14171A;background:white">Al
buscar en específico “derechos humanos”, se detectaron 31 programas que en
alguna de las clasificaciones presupuestales incluyen este término. </span><span
lang=ES-TRAD style="font-family:"Poppins",sans-serif;color:#14171A;
background:#F5F8FA">De esta manera, con fines de simplificación del análisis,
exploramos los derechos humanos desde las clasificaciones presupuestales de
dicho término.</span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif">Pese a que hay presupuesto etiquetado para los
DD.HH en otras dependencias, otros ramos, y otras funciones, etc, decidimos
enfocarnos exclusivamente en los 31 programas presupuestarios, debido ese fue
el resultado de nuestra interacción con la plataforma de datos abiertos.
Además, que nuestros hallazgos pueden ser replicados, cualquier ciudadano que
escriba en el buscador “derechos humanos” tendrá nuestro mismo resultado, y
podrá realizar los mismos análisis.</span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif">En este sentido, nuestro principal propósito fue explorar
quiénes, cómo y en qué se ejercen los 31 programas presupuestarios para atender
los problemas relativos a los DD.HH. </span></p>
<p class=MsoNormal><span lang=ES> </span></p>
</div>
</body>
</html>')
), # Fin del TabItem Introduccion
tabItem("InfoG",
h1("Infografias"),
tabsetPanel(
tabPanel(h2("Infografía 1: "),
br(),
HTML("<img src='infografias/Infografia 1.png' srcset='infografias/Infografia 1.png 1x, infografias/Infografia 1.png 2x' width='800' height='2000' alt='Infografia_1'/>"),
br(), br()
),
tabPanel(h2("Infografía 2"),
br(),
HTML("<img src='infografias/Infografia 2.png' srcset='infografias/Infografia 2.png 1x, infografias/Infografia 2.png 2x' width='800' height='2000' alt='Infografia_2'/>")
)
)
),
tabItem("Graficas",
h1("Gráficas"),
# tabsetPanel(
# tabPanel("Gasto en programas presupuestarios de Derechos Humanos",
# HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/25.embed'></iframe> ")),
# tabPanel("Distribución del presupuesto por ramo, 2019",
# HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/27.embed'></iframe>")
# ),
# tabPanel("Gasto en programas de población especifica de la CNDH",
# HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/31.embed'></iframe>")
# ),
# tabPanel("Gasto en programas de DDHH, en los ramos Defensa, Segob y PGR",
# HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/33.embed'></iframe>")
# ),
# tabPanel("Gasto en programas presupuestales de DDHH, por entidad, 2019",
# HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/35.embed'></iframe>")
# ),
# tabPanel("Top 10 concepto de gasto en programas presupuestales de DDHH, 2019",
# HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/37.embed'></iframe>")
# ),
# tabPanel("Proporción de representan los programas presupuestales en DDHH en el presupuesto total",
# HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/39.embed'></iframe>")
# ),
# tabPanel("Recomendaciones emitidas por la CNDH",
# HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/41.embed'></iframe>")
# )
# )
#
h2("Gasto en programas presupuestarios de Derechos Humanos"),
HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/25.embed'></iframe> "),
h2("Distribución del presupuesto por ramo, 2019"),
HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/27.embed'></iframe>"),
h2("Gasto en programas de población especifica de la CNDH"),
HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/31.embed'></iframe>"),
h2("Gasto en programas de DDHH, en los ramos Defensa, Segob y PGR"),
HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/33.embed'></iframe>"),
h2("Gasto en programas presupuestales de DDHH, por entidad, 2019"),
HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/35.embed'></iframe>"),
h2("Top 10 concepto de gasto en programas presupuestales de DDHH, 2019"),
HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/37.embed'></iframe>"),
h2("Proporción de representan los programas presupuestales en DDHH en el presupuesto total"),
HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/39.embed'></iframe>"),
h2("Recomendaciones emitidas por la CNDH"),
HTML("<iframe width='1000' height='800' frameborder='0' scrolling='no' src='https://plot.ly/~jjsantos/41.embed'></iframe>")
),
tabItem("Consulta",
h1("Sistema de Consulta"),
p("A continuación, el equipo de DataBuesos ha elaborado un sistema de consulta gráfico
para poder consultar datos de manera gráfica en rubros relativos al
presupuesto de Derechos Humanos en el Gobierno Federal."),
fluidPage(
fluidRow(
column(12, box(title = "Información del presupuesto", status = 'warning', solidHeader = TRUE, width = '100%',
tabsetPanel(
tabPanel("Gasto por Dependencia y Programa",
selectInput(inputId = "Anio_Estatal", label = "Seleccione Año", choices = c(2013, 2014, 2015, 2016, 2017, 2018, 2019)),
HTML("<img src='LeyendaTreeMap.png' srcset='LeyendaTreeMap.png 1x, LeyendaTreeMap.png 2x' height='120' alt='leyenda_1'/>"),
shinycssloaders::withSpinner(highchartOutput('tm', width = 1000, height = 500))
),
tabPanel("Evolución de Gasto",
selectInput(inputId = "Sel_ramo", label = "Seleccione ramo o dependencia", choices = niveles(b1_ramo_year$DESC_RAMO)),
shinycssloaders::withSpinner(plotOutput("plot1", width = 1000, height = 500)),
shinycssloaders::withSpinner(plotlyOutput("plot2", width = 1000, height = 500))
)
)
)
)
)
)
),
tabItem("H",
h1("Hallazgos"),
HTML('<html>
<head>
<meta http-equiv=Content-Type content="text/html; charset=utf-8">
<meta name=Generator content="Microsoft Word 15 (filtered)">
<style>
<!--
/* Font Definitions */
@font-face
{font-family:Wingdings;
panose-1:5 0 0 0 0 0 0 0 0 0;}
@font-face
{font-family:"Cambria Math";
panose-1:2 4 5 3 5 4 6 3 2 4;}
@font-face
{font-family:Calibri;
panose-1:2 15 5 2 2 2 4 3 2 4;}
@font-face
{font-family:"Poppins";
panose-1:2 15 3 2 2 2 4 3 2 4;}
/* Style Definitions */
p.MsoNormal, li.MsoNormal, div.MsoNormal
{margin:0in;
margin-bottom:.0001pt;
font-size:12.0pt;
font-family:"Calibri",sans-serif;}
p.MsoListParagraph, li.MsoListParagraph, div.MsoListParagraph
{margin-top:0in;
margin-right:0in;
margin-bottom:0in;
margin-left:.5in;
margin-bottom:.0001pt;
font-size:12.0pt;
font-family:"Calibri",sans-serif;}
p.MsoListParagraphCxSpFirst, li.MsoListParagraphCxSpFirst, div.MsoListParagraphCxSpFirst
{margin-top:0in;
margin-right:0in;
margin-bottom:0in;
margin-left:.5in;
margin-bottom:.0001pt;
font-size:12.0pt;
font-family:"Calibri",sans-serif;}
p.MsoListParagraphCxSpMiddle, li.MsoListParagraphCxSpMiddle, div.MsoListParagraphCxSpMiddle
{margin-top:0in;
margin-right:0in;
margin-bottom:0in;
margin-left:.5in;
margin-bottom:.0001pt;
font-size:12.0pt;
font-family:"Calibri",sans-serif;}
p.MsoListParagraphCxSpLast, li.MsoListParagraphCxSpLast, div.MsoListParagraphCxSpLast
{margin-top:0in;
margin-right:0in;
margin-bottom:0in;
margin-left:.5in;
margin-bottom:.0001pt;
font-size:12.0pt;
font-family:"Calibri",sans-serif;}
.MsoChpDefault
{font-family:"Calibri",sans-serif;}
@page WordSection1
{size:8.5in 11.0in;
margin:70.85pt 85.05pt 70.85pt 85.05pt;}
div.WordSection1
{page:WordSection1;}
/* List Definitions */
ol
{margin-bottom:0in;}
ul
{margin-bottom:0in;}
-->
</style>
</head>
<body lang=ES-US>
<div class=WordSection1>
<h2>Principales hallazgos</h2>
<p class=MsoNormal style="text-align:justify"><i><span lang=ES
style="font-family:"Poppins",sans-serif"> </span></i></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif">a) La agenda relativa a los DD.HH, no es una
prioridad dentro de los programas presupuestarios.</span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoListParagraph style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">De
un total de 740 programas presupuestarios, sólo hay 31 programas relativos a
los DD.HH. </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoListParagraph style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">De
los cuales 24 son operados por la CNDH, 2 por Defensa Nacional, 2 por
Gobernación, 1 por el INAI, 1 por la Marina y 1 por la PGR, ahora FGR. </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif">b) El recurso de los programas presupuestarios ha
sido insuficiente para atender la problemática relacionada con los DD.HH, y
peor aún, ha disminuido en el tiempo. </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoListParagraph style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">Durante
el periodo 2013-2019, se aprobó un presupuesto de 18,542 millones de pesos para
los 31 programas presupuestarios.</span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoListParagraph style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">El
presupuesto aprobado en 2018 fue de 2,902 mdp y, en 2019 2,552 millones de
pesos, hablamos de un disminución de 12%, en términos reales.</span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoListParagraph><span lang=ES style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoNormal style="text-align:justify"><i><span lang=ES
style="font-family:"Poppins",sans-serif">c) </span></i><span lang=ES
style="font-family:"Poppins",sans-serif">Al respecto de las poblaciones vulnerables
</span></p>
<p class=MsoNormal style="text-align:justify"><span lang=ES style="font-family:
"Poppins",sans-serif"> </span></p>
<p class=MsoListParagraphCxSpFirst style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">El
presupuesto de 2019 disminuyó con respecto de 2018, para todos los casos: </span></p>
<p class=MsoListParagraphCxSpMiddle style="margin-left:1.0in;text-align:justify;
text-indent:-.25in"><span lang=ES style="font-family:"Courier New"">o<span
style="font:7.0pt "Times New Roman""> </span></span><span lang=ES
style="font-family:"Poppins",sans-serif">personas migrantes; </span></p>
<p class=MsoListParagraphCxSpMiddle style="margin-left:1.0in;text-align:justify;
text-indent:-.25in"><span lang=ES style="font-family:"Courier New"">o<span
style="font:7.0pt "Times New Roman""> </span></span><span lang=ES
style="font-family:"Poppins",sans-serif">igualdad de género; </span></p>
<p class=MsoListParagraphCxSpMiddle style="margin-left:1.0in;text-align:justify;
text-indent:-.25in"><span lang=ES style="font-family:"Courier New"">o<span
style="font:7.0pt "Times New Roman""> </span></span><span lang=ES
style="font-family:"Poppins",sans-serif">personas desaparecidas; </span></p>
<p class=MsoListParagraphCxSpMiddle style="margin-left:1.0in;text-align:justify;
text-indent:-.25in"><span lang=ES style="font-family:"Courier New"">o<span
style="font:7.0pt "Times New Roman""> </span></span><span lang=ES
style="font-family:"Poppins",sans-serif">periodistas y defensores de
DD.HH;</span></p>
<p class=MsoListParagraphCxSpMiddle style="margin-left:1.0in;text-align:justify;
text-indent:-.25in"><span lang=ES style="font-family:"Courier New"">o<span
style="font:7.0pt "Times New Roman""> </span></span><span lang=ES
style="font-family:"Poppins",sans-serif">niñas, niños y adolescentes; y </span></p>
<p class=MsoListParagraphCxSpMiddle style="margin-left:1.0in;text-align:justify;
text-indent:-.25in"><span lang=ES style="font-family:"Courier New"">o<span
style="font:7.0pt "Times New Roman""> </span></span><span lang=ES
style="font-family:"Poppins",sans-serif">sexualidad, salud y VIH. </span></p>
<p class=MsoListParagraphCxSpMiddle style="text-align:justify"><span lang=ES
style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoListParagraphCxSpMiddle style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">Los
programas presupuestarios de la CNDH dirigidos a personas migrantes han tenido
el mayor recurso aprobado, en más de 100 millones de pesos para los años 2018 y
2019.</span></p>
<p class=MsoListParagraphCxSpMiddle style="margin-left:0in;text-align:justify"><span
lang=ES style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoListParagraphCxSpMiddle style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">En
caso contrario, los programas presupuestarios dirigidos a la sexualidad, salud
y VIH y, niños, niñas y adolescentes, no alcanzó ni 20 millones de pesos en
ambos años. </span></p>
<p class=MsoListParagraphCxSpLast style="text-align:justify"><span lang=ES
style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoNormal><i><span lang=ES style="font-family:"Poppins",sans-serif">d)
Al respecto de las entidades</span></i></p>
<p class=MsoListParagraph style="text-align:justify;text-indent:-.25in"><span
lang=ES style="font-family:Symbol">·<span style="font:7.0pt "Times New Roman"">
</span></span><span lang=ES style="font-family:"Poppins",sans-serif">La
Ciudad de México representa casi la totalidad del recurso asignado para los 31
programas presupuestarios, esto se debe a la centralización de las
instituciones encargadas de ejecutar dichos programas. </span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif"> </span></p>
<h2>Referencias:</h2>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif">https://www.transparenciapresupuestaria.gob.mx/es/PTP/programas#consultas</span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif">https://www.animalpolitico.com/2018/11/desapariciones-comite-onu-impunidad/</span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif">https://www.animalpolitico.com/2018/11/fosas-clandestinas-2-mil-hallazgos-mexico/</span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif">https://www.animalpolitico.com/2017/11/indigenas-desplazados-chiapas-violencia/</span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif">https://www.animalpolitico.com/2018/12/mexico-peligroso-periodistas/</span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif">https://www.jornada.com.mx/ultimas/2018/03/05/suman-29-activistas-ambientales-asesinados-en-mexico-informe-4286.html</span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoNormal><span lang=ES style="font-family:"Poppins",sans-serif"> </span></p>
<p class=MsoNormal><span lang=ES-TRAD> </span></p>
</div>
</body>
</html>
')
)
)
)
dashboardPage(dbHeader, sidebar, body, skin = 'black')
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extractSaveData.R
\name{l_getSavedata_Fileinfo}
\alias{l_getSavedata_Fileinfo}
\title{local function that does the work of getSaveData_Fileinfo}
\usage{
l_getSavedata_Fileinfo(outfile, outfiletext, summaries)
}
\arguments{
\item{outfile}{A character string giving the name of the Mplus
output file.}
\item{outfiletext}{The contents of the output file, for example as read by \code{scan}}
}
\value{
A list that includes:
\item{fileName}{The name of the file containing the analysis dataset created by the Mplus SAVEDATA command.}
\item{fileVarNames}{A character vector containing the names of variables in the dataset.}
\item{fileVarFormats}{A character vector containing the Fortran-style formats of variables in the dataset.}
\item{fileVarWidths}{A numeric vector containing the widths of variables in the dataset (which is stored in fixed-width format).}
\item{bayesFile}{The name of the BPARAMETERS file containing draws from the posterior distribution created by
the Mplus SAVEDATA BPARAMETERS command.}
\item{bayesVarNames}{A character vector containing the names of variables in the BPARAMETERS dataset.}
\item{tech3File}{A character vector of the tech 3 output.}
\item{tech4File}{A character vector of the tech 4 output.}
}
\description{
This function is split out so that \code{getSaveData_Fileinfo} is
exposed to the user, but the parsing function can be used by
\code{readModels}
}
\examples{
# make me!
}
\seealso{
\code{\link{getSavedata_Data}}
}
\keyword{internal}
| /man/l_getSavedata_Fileinfo.Rd | no_license | myunghoshin/MplusAutomation | R | false | true | 1,599 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extractSaveData.R
\name{l_getSavedata_Fileinfo}
\alias{l_getSavedata_Fileinfo}
\title{local function that does the work of getSaveData_Fileinfo}
\usage{
l_getSavedata_Fileinfo(outfile, outfiletext, summaries)
}
\arguments{
\item{outfile}{A character string giving the name of the Mplus
output file.}
\item{outfiletext}{The contents of the output file, for example as read by \code{scan}}
}
\value{
A list that includes:
\item{fileName}{The name of the file containing the analysis dataset created by the Mplus SAVEDATA command.}
\item{fileVarNames}{A character vector containing the names of variables in the dataset.}
\item{fileVarFormats}{A character vector containing the Fortran-style formats of variables in the dataset.}
\item{fileVarWidths}{A numeric vector containing the widths of variables in the dataset (which is stored in fixed-width format).}
\item{bayesFile}{The name of the BPARAMETERS file containing draws from the posterior distribution created by
the Mplus SAVEDATA BPARAMETERS command.}
\item{bayesVarNames}{A character vector containing the names of variables in the BPARAMETERS dataset.}
\item{tech3File}{A character vector of the tech 3 output.}
\item{tech4File}{A character vector of the tech 4 output.}
}
\description{
This function is split out so that \code{getSaveData_Fileinfo} is
exposed to the user, but the parsing function can be used by
\code{readModels}
}
\examples{
# make me!
}
\seealso{
\code{\link{getSavedata_Data}}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s3control_operations.R
\name{s3control_put_public_access_block}
\alias{s3control_put_public_access_block}
\title{Creates or modifies the PublicAccessBlock configuration for an AWS
account}
\usage{
s3control_put_public_access_block(PublicAccessBlockConfiguration,
AccountId)
}
\arguments{
\item{PublicAccessBlockConfiguration}{[required] The \code{PublicAccessBlock} configuration that you want to apply to the
specified AWS account.}
\item{AccountId}{[required] The account ID for the AWS account whose \code{PublicAccessBlock}
configuration you want to set.}
}
\value{
An empty list.
}
\description{
Creates or modifies the \code{PublicAccessBlock} configuration for an AWS
account. For more information, see \href{https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html}{Using Amazon S3 block public access}.
Related actions include:
\itemize{
\item \code{\link[=s3control_get_public_access_block]{get_public_access_block}}
\item \code{\link[=s3control_delete_public_access_block]{delete_public_access_block}}
}
}
\section{Request syntax}{
\preformatted{svc$put_public_access_block(
PublicAccessBlockConfiguration = list(
BlockPublicAcls = TRUE|FALSE,
IgnorePublicAcls = TRUE|FALSE,
BlockPublicPolicy = TRUE|FALSE,
RestrictPublicBuckets = TRUE|FALSE
),
AccountId = "string"
)
}
}
\keyword{internal}
| /cran/paws.storage/man/s3control_put_public_access_block.Rd | permissive | TWarczak/paws | R | false | true | 1,444 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s3control_operations.R
\name{s3control_put_public_access_block}
\alias{s3control_put_public_access_block}
\title{Creates or modifies the PublicAccessBlock configuration for an AWS
account}
\usage{
s3control_put_public_access_block(PublicAccessBlockConfiguration,
AccountId)
}
\arguments{
\item{PublicAccessBlockConfiguration}{[required] The \code{PublicAccessBlock} configuration that you want to apply to the
specified AWS account.}
\item{AccountId}{[required] The account ID for the AWS account whose \code{PublicAccessBlock}
configuration you want to set.}
}
\value{
An empty list.
}
\description{
Creates or modifies the \code{PublicAccessBlock} configuration for an AWS
account. For more information, see \href{https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html}{Using Amazon S3 block public access}.
Related actions include:
\itemize{
\item \code{\link[=s3control_get_public_access_block]{get_public_access_block}}
\item \code{\link[=s3control_delete_public_access_block]{delete_public_access_block}}
}
}
\section{Request syntax}{
\preformatted{svc$put_public_access_block(
PublicAccessBlockConfiguration = list(
BlockPublicAcls = TRUE|FALSE,
IgnorePublicAcls = TRUE|FALSE,
BlockPublicPolicy = TRUE|FALSE,
RestrictPublicBuckets = TRUE|FALSE
),
AccountId = "string"
)
}
}
\keyword{internal}
|
#' @include desc_statby.R utilities_base.R utilities_color.R
NULL
#' @import ggplot2
#' @importFrom magrittr %>%
#' @importFrom dplyr group_by_
#' @importFrom dplyr group_by
#' @importFrom dplyr arrange_
#' @importFrom dplyr mutate
#' @importFrom dplyr do
#' @importFrom dplyr summarise
#' @importFrom dplyr everything
#' @importFrom grid drawDetails
#' @importFrom rlang !!
#' @importFrom rlang !!!
#' @importFrom rlang syms
# Unnesting, adapt to tidyr 1.0.0
unnest <- function(data, cols = "data", ...){
if(is_pkg_version_sup("tidyr", "0.8.3")){
results <- tidyr::unnest(data, cols = cols, ...)
}
else {results <- tidyr::unnest(data, ...)}
results
}
# Check if an installed package version is superior to a specified version
# Version, pkg: character vector
is_pkg_version_sup<- function(pkg, version){
vv <- as.character(utils::packageVersion(pkg))
cc <- utils::compareVersion(vv, version) > 0
cc
}
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Execute a geom_* function from ggplot2
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# geomfunc : gem_*() functions
# data data for mapping
# ... argument accepeted by the function
# return a plot if geomfunc!=Null or a list(option, mapping) if geomfunc = NULL
.geom_exec <- function (geomfunc = NULL, data = NULL,
position = NULL, ...) {
geom_exec(geomfunc = geomfunc, data = data, position = position, ...)
}
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Official argument from ggplot2
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# bar plot arguments
.barplot_params <- function(...){
x <- list(...)
res <- list()
res$width <- x$width
res$binwidth <- x$binwidth
res$na.rm <- ifelse(!is.null(x$na.rm), x$na.rm, FALSE)
res$show.legend <- ifelse(!is.null(x$show.legend), x$show.legend, NA)
res$inherit.aes <- ifelse(!is.null(x$inherit.aes), x$inherit.aes, TRUE)
return(res)
}
# box plot arguments
.boxplot_params <- function(...){
x <- list(...)
res <- list()
res$outlier.colour <- x$outlier.colour
res$outlier.shape <- ifelse(!is.null(x$outlier.shape), x$outlier.shape, 19)
res$outlier.size <- ifelse(!is.null(x$outlier.size), x$outlier.size, 1.5)
res$outlier.stroke <- ifelse(!is.null(x$outlier.stroke), x$outlier.stroke, 0.5)
res$notch <- ifelse(!is.null(x$notch), x$notch, FALSE)
res$notchwidth <- ifelse(!is.null(x$notchwidth), x$notchwidth, 0.5)
res$varwidth <- ifelse(!is.null(x$varwidth), x$varwidth, FALSE)
res$na.rm <- ifelse(!is.null(x$na.rm), x$na.rm, FALSE)
res$show.legend <- ifelse(!is.null(x$show.legend), x$show.legend, NA)
res$inherit.aes <- ifelse(!is.null(x$inherit.aes), x$inherit.aes, TRUE)
return(res)
}
.dotplot_params <- function(...){
x <- list(...)
res <- list()
res$stackratio <- ifelse(!is.null(x$stackratio ), x$stackratio, 1)
res$width <- ifelse(!is.null(x$width), x$width, 0.9)
return(res)
}
.violin_params <- function(...){
x <- list(...)
res <- list()
res$stat <- ifelse(!is.null(x$stat ), x$stat, "ydensity")
res$draw_quantiles <- x$draw_quantiles
res$scale <- ifelse(!is.null(x$scale), x$scale, "area")
res$trim <- ifelse(!is.null(x$trim), x$trim, TRUE)
return(res)
}
.hist_params <- function(...){
x <- list(...)
res <- list()
res$binwidth <- x$binwidth
res$bins <- x$bins
return(res)
}
.standard_params <- function(...){
x <- list(...)
res <- list()
res$color <- ifelse(!is.null(x$color), x$color, "black")
res$color <- ifelse(!is.null(x$colour), x$colour, res$color)
res$linetype <- ifelse(!is.null(x$linetype), x$linetype, "solid")
res$size <- ifelse(!is.null(x$size), x$size, 1)
res$fill <- ifelse(!is.null(x$fill), x$fill, "black")
res$shape <- ifelse(!is.null(x$shape), x$shape, 19)
res
}
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Graphical parameters
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Set plot orientation
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.set_orientation <-
function(p, orientation = c("vertical", "horizontal", "reverse")) {
ori <- match.arg(orientation)
if (ori == "horizontal") p + coord_flip()
else if (ori == "reverse")
p + scale_y_reverse()
else p
}
# Change title and labels
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.labs <- function(p, main = NULL, xlab = NULL, ylab = NULL,
font.main = NULL, font.x = NULL, font.y = NULL,
submain = NULL, caption = NULL,
font.submain = NULL, font.caption = NULL)
{
font.main <- .parse_font(font.main)
font.x <- .parse_font(font.x)
font.y <- .parse_font(font.y)
font.submain <- .parse_font(font.submain)
font.caption <- .parse_font(font.caption)
if(is.logical(main)){
if(!main) main <- NULL
}
if(is.logical(submain)){
if(!submain) submain <- NULL
}
if(is.logical(caption)){
if(!caption) caption <- NULL
}
if (!is.null(main)) {
p <- p + labs(title = main)
}
if (!is.null(submain)) {
p <- p + labs(subtitle = submain)
}
if (!is.null(caption)) {
p <- p + labs(caption = caption)
}
if (!is.null(xlab)) {
if (xlab == FALSE)
p <- p + theme(axis.title.x = element_blank())
else
p <- p + labs(x = xlab)
}
if (!is.null(ylab)) {
if (ylab == FALSE)
p <- p + theme(axis.title.y = element_blank())
else
p <- p + labs(y = ylab)
}
if (!is.null(font.main))
p <-
p + theme(
plot.title = element_text(
size = font.main$size,
lineheight = 1.0, face = font.main$face, colour = font.main$color
)
)
if (!is.null(font.submain))
p <-
p + theme(
plot.subtitle = element_text(
size = font.submain$size,
lineheight = 1.0, face = font.submain$face, colour = font.submain$color
)
)
if (!is.null(font.caption))
p <-
p + theme(
plot.caption = element_text(
size = font.caption$size,
lineheight = 1.0, face = font.caption$face, colour = font.caption$color
)
)
if (!is.null(font.x))
p <-
p + theme(axis.title.x = element_text(
size = font.x$size,
face = font.x$face, colour = font.x$color
))
if (!is.null(font.y))
p <-
p + theme(axis.title.y = element_text(
size = font.y$size,
face = font.y$face, colour = font.y$color
))
p
}
# ticks
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.set_ticks <-
function(ticks = TRUE, tickslab = TRUE, font.tickslab = NULL,
xtickslab.rt = NULL, ytickslab.rt = NULL,
font.xtickslab = font.tickslab, font.ytickslab = font.tickslab)
{
. <- xhjust <- NULL
if(!is.null(xtickslab.rt)) {
if(xtickslab.rt > 5) xhjust <- 1
}
else xhjust <- NULL
if (ticks)
ticks <-
element_line(colour = "black")
else
ticks <- element_blank()
if (is.null(font.xtickslab)) font.x <- list()
else font.x <- .parse_font(font.xtickslab)
if (is.null(font.ytickslab)) font.y <- list()
else font.y <- .parse_font(font.ytickslab)
if (tickslab) {
xtickslab <- font.x %>% .add_item(hjust = xhjust, angle = xtickslab.rt) %>%
do.call(element_text, .)
ytickslab <- font.y %>% .add_item(angle = ytickslab.rt) %>% do.call(element_text, .)
}
else {
xtickslab <- element_blank()
ytickslab <- element_blank()
}
theme(
axis.ticks = ticks, axis.text.x = xtickslab, axis.text.y = ytickslab
)
}
# Change Axis limits
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.set_axis_limits <- function(xlim = NULL, ylim = NULL){
if(!is.null(xlim) | !is.null(ylim)) coord_cartesian(xlim, ylim)
}
# Axis scales
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.set_scale <- function (p, xscale = c("none", "log2", "log10", "sqrt"),
yscale = c("none", "log2", "log10", "sqrt"),
format.scale = FALSE)
{
xscale <- match.arg(xscale)
yscale <- match.arg(yscale)
.x <- ".x"
if(format.scale){
if(!requireNamespace("scales")) stop("The R package 'scales' is required.")
if(yscale == "log2"){
p <- p + scale_y_continuous(trans = scales::log2_trans(),
breaks = scales::trans_breaks("log2", function(x) 2^x),
labels = scales::trans_format("log2", scales::math_format(2^.x)))
}
else if(yscale == "log10"){
p <- p + scale_y_continuous(trans = scales::log10_trans(),
breaks = scales::trans_breaks("log10", function(x) 10^x),
labels = scales::trans_format("log10", scales::math_format(10^.x)))
}
if(xscale == "log2"){
p <- p + scale_x_continuous(trans = scales::log2_trans(),
breaks = scales::trans_breaks("log2", function(x) 2^x),
labels = scales::trans_format("log2", scales::math_format(2^.x)))
}
else if(xscale == "log10"){
p <- p + scale_x_continuous(trans = scales::log10_trans(),
breaks = scales::trans_breaks("log10", function(x) 10^x),
labels = scales::trans_format("log10", scales::math_format(10^.x)))
}
}
else{
if(xscale != "none") p <- p + scale_x_continuous(trans = xscale)
if(yscale != "none") p <- p + scale_y_continuous(trans = yscale)
}
p
}
# Legends
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.set_legend <- function(p, legend = NULL,
legend.title = NULL, font.legend = NULL)
{
if(is.null(legend.title)) legend.title = waiver()
font <- .parse_font(font.legend)
if(!is.null(legend)) p <- p + theme(legend.position = legend)
if(!.is_empty(legend.title)){
if(.is_list(legend.title)) p <- p + do.call(ggplot2::labs, legend.title)
else p <- p +
labs(color = legend.title, fill = legend.title, linetype = legend.title, shape = legend.title)
}
if(!is.null(font)){
p <- p + theme(
legend.text = element_text(size = font$size,
face = font$face, colour = font$color),
legend.title = element_text(size = font$size,
face = font$face, colour = font$color)
)
}
p
}
# Set ticks by
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.set_ticksby <- function(p, xticks.by = NULL, yticks.by = NULL)
{
.data <- p$data
# .mapping <- as.character(p$mapping)
.mapping <- .get_gg_xy_variables(p)
if(!is.null(yticks.by)) {
y <- .data[, .mapping["y"]]
ybreaks <- seq(0, max(y, na.rm = TRUE), by = yticks.by)
p <- p + scale_y_continuous(breaks = ybreaks)
}
else if(!is.null(xticks.by)) {
x <- .data[, .mapping["x"]]
xbreaks <- seq(0, max(x, na.rm = TRUE), by = xticks.by)
p <- p + scale_x_continuous(breaks = xbreaks)
}
p
}
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Add stat
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.check_add.params <- function(add, add.params, error.plot, data, color, fill, ...){
if(color %in% names(data) & is.null(add.params$color)) add.params$color <- color
if(fill %in% names(data) & is.null(add.params$fill)) add.params$fill <- fill
if(is.null(add.params$color)) add.params$color <- color
if(is.null(add.params$fill) & ("crossbar" %in% error.plot | "boxplot" %in% add | "violin" %in% add)) add.params$fill <- fill
if(is.null(add.params$fill)) add.params$fill <- add.params$color
#else add.params$fill <- add.params$color
if(!is.null(list(...)$shape) & is.null(add.params$shape)) add.params$shape <- list(...)$shape
add.params
}
# Allowed values for add are one or the combination of: "none",
# "dotplot", "jitter", "boxplot", "mean", "mean_se", "mean_sd", "mean_ci", "mean_range",
# "median", "median_iqr", "median_mad", "median_range"
# p_geom character, e.g "geom_line"
.add <- function(p,
add = NULL,
add.params = list(color = "black", fill = "white", shape = 19, width = 1),
data = NULL, position = position_dodge(0.8),
error.plot = c("pointrange", "linerange", "crossbar", "errorbar",
"upper_errorbar", "lower_errorbar", "upper_pointrange", "lower_pointrange",
"upper_linerange", "lower_linerange"),
p_geom = ""
)
{
if(is.null(data)) data <- p$data
pms <- add.params
if("none" %in% add) add <- "none"
error.plot = match.arg(error.plot)
color <- ifelse(is.null(pms$color), "black",pms$color)
fill <- ifelse(is.null(pms$fill), "white", pms$fill)
shape <- ifelse(is.null(pms$shape), 19, pms$shape)
width <- ifelse(is.null(pms$width), 1, pms$width)
shape <- ifelse(is.null(add.params$shape), 19, add.params$shape)
# size <- ifelse(is.null(add.params$size), 1, add.params$size)
# stat summary
#.mapping <- as.character(p$mapping)
.mapping <- .get_gg_xy_variables(p)
x <- .mapping["x"]
y <- .mapping["y"]
errors <- c("mean", "mean_se", "mean_sd", "mean_ci", "mean_range", "median", "median_iqr", "median_mad", "median_range")
if(any(errors %in% add)) stat_sum <- desc_statby(data, measure.var = .mapping["y"],
grps = intersect(c(.mapping["x"], color, fill), names(data)))
if ("boxplot" %in% add) {
# size <- ifelse(is.null(add.params$size), 1, add.params$size)
p <- p + .geom_exec(geom_boxplot, data = data,
color = color, fill = fill,
position = position, width = width, size = add.params$size)
}
if ("violin" %in% add) {
# size <- ifelse(is.null(add.params$size), 1, add.params$size)
p <- p + .geom_exec(geom_violin, data = data, trim = FALSE,
color = color, fill = fill,
position = position, width = width, size = add.params$size)
}
if ( "dotplot" %in% add ) {
dotsize <- ifelse(is.null(add.params$size), 0.9, add.params$size)
p <- p + .geom_exec(geom_dotplot, data = data, binaxis = 'y', stackdir = 'center',
color = color, fill = fill, dotsize = dotsize,
position = position, stackratio = 1.2, binwidth = add.params$binwidth)
}
if ( "jitter" %in% add ){
set.seed(123)
# jitter.size <- ifelse(is.null(add.params$size), 2, add.params$size)
ngrps <- length(intersect(names(data), c(.mapping["x"], fill, color)))
if(p_geom == "geom_line" | ngrps == 1) .jitter = position_jitter(0.4)
else if(ngrps > 1) .jitter <- position_dodge(0.8)
if(is.null(add.params$jitter)) .jitter = position_jitter(0.4)
else if(is.numeric(add.params$jitter))
.jitter <- position_jitter(add.params$jitter)
else .jitter <- add.params$jitter
p <- p + .geom_exec(geom_jitter, data = data,
color = color, fill = fill, shape = shape, size = add.params$size,
position = .jitter )
}
if ( "point" %in% add ) {
p <- p + .geom_exec(geom_point, data = data,
color = color, size = add.params$size,
position = position)
}
if ( "line" %in% add ) {
p <- p + .geom_exec(geom_line, data = data, group = 1,
color = color, size = add.params$size,
position = position)
}
# Add mean or median
center <- intersect(c("mean", "median"), add)
if(length(center) == 2)
stop("Use mean or mdedian, but not both at the same time.")
if(length(center) == 1){
center.size <- ifelse(is.null(add.params$size), 1, add.params$size)
p <- p %>%
add_summary(fun = center, color = color, shape = shape,
position = position, size = center.size)
}
# Add errors
errors <- c("mean_se", "mean_sd", "mean_ci", "mean_range", "median_iqr", "median_mad", "median_range")
errors <- intersect(errors, add)
if(length(errors) >= 2)
stop("Choose one these: ", paste(errors, collapse =", "))
if(length(errors) == 1){
errors <- strsplit(errors, "_", fixed = TRUE)[[1]]
.center <- errors[1]
.errors <- errors[2]
stat_sum$ymin <- stat_sum[, .center] - stat_sum[, .errors]
stat_sum$ymax <- stat_sum[, .center] + stat_sum[, .errors]
names(stat_sum)[which(names(stat_sum) == .center)] <- y
size <- ifelse(is.null(add.params$size), 1, add.params$size)
if(error.plot %in% c("upper_errorbar", "upper_pointrange", "upper_linerange")) {
ymin <- y
ymax <- "ymax"
}
else if(error.plot %in% c("lower_errorbar", "lower_pointrange", "lower_linerange")){
ymin <- "ymin"
ymax <- y
}
else {
ymin <- "ymin"
ymax <- "ymax"
}
if(error.plot %in% c("pointrange", "lower_pointrange", "upper_pointrange"))
p <- p + .geom_exec(geom_pointrange, data = stat_sum,
color = color, shape = shape, ymin = ymin, ymax = ymax,
position = position, size = size)
else if(error.plot %in% c("linerange", "lower_linerange", "upper_linerange"))
p <- p + .geom_exec(geom_linerange, data = stat_sum,
color = color, ymin = ymin, ymax = ymax,
position = position, size = size)
else if(error.plot %in% c("errorbar", "lower_errorbar", "upper_errorbar"))
p <- p + .geom_exec(geom_errorbar, data = stat_sum,
color = color, ymin = ymin, ymax = ymax,
position = position, size = size, width = 0.2)
else if(error.plot == "crossbar")
p <- p + .geom_exec(geom_crossbar, data = stat_sum, fill = fill,
color = color, ymin = "ymin", ymax = "ymax",
position = position, width = width, size = size)
}
p
}
# Calculate the mean and the SD in each group
#+++++++++++++++++++++++++
# data : a data frame
# varname : the name of the variable to be summariezed
# grps : column names to be used as grouping variables
# .mean_sd <- function(data, varname, grps){
# summary_func <- function(x, col){
# c(mean = base::mean(x[[col]], na.rm=TRUE),
# sd = stats::sd(x[[col]], na.rm=TRUE))
# }
# data_sum <- plyr::ddply(data, grps, .fun=summary_func, varname)
# data_sum$ymin <- data_sum$mean-data_sum$sd
# data_sum$ymax <- data_sum$mean+data_sum$sd
# names(data_sum)[ncol(data_sum)-3] <- varname
# # data_sum <- plyr::rename(data_sum, c("mean" = varname))
# return(data_sum)
# }
# Summary functions
.summary_functions <- function(){
c("mean", "mean_se", "mean_sd", "mean_ci",
"mean_range", "median", "median_iqr", "median_mad", "median_range")
}
# parse font
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.parse_font <- function(font){
if(is.null(font)) res <- NULL
else if(inherits(font, "list")) res <- font
else{
# matching size and face
size <- grep("^[0-9]+$", font, perl = TRUE)
face <- grep("plain|bold|italic|bold.italic", font, perl = TRUE)
if(length(size) == 0) size <- NULL else size <- as.numeric(font[size])
if(length(face) == 0) face <- NULL else face <- font[face]
color <- setdiff(font, c(size, face))
if(length(color) == 0) color <- NULL
res <- list(size=size, face = face, color = color)
}
res
}
# Add annotation to a plot
# label: text to be added to a plot
# size: text size
# coord: x and coordinates
.ggannotate <- function (label, size = 12, coord = c(NULL, NULL)){
if(is.null(unique(coord))){
grob <- grid::grobTree(grid::textGrob(label, x = 0.3, y = 0.80, hjust=0,
gp = grid::gpar(col = "black", fontsize = size, fontface = "plain")))
ggplot2::annotation_custom(grob)
}
else{
ggplot2::annotate("text", x = coord[1], y = coord[2],
label = label, size = size/3)
}
}
#:::::::::::::::::::::::::::::::::::::::::
# Check the data provided by user
#:::::::::::::::::::::::::::::::::::::::::
# combine: if TRUE, gather y variables
# return a list(data, x, y)
.check_data <- function(data, x, y, combine = FALSE)
{
if(missing(x) & missing(y)){
if(!is.numeric(data))
stop("x and y are missing. In this case data should be a numeric vector.")
else{
data <- data.frame(y = data, x = rep(1, length(data)))
x <- "x"
y <- "y"
}
}
else if(missing(x)) {
x <- "x"
if(is.numeric(data)) data <- data.frame(x = data)
else data$x <- rep("1", nrow(data))
}
# A list of y elements to plot
else if(length(y) > 1){
if(!all(y %in% colnames(data))){
not_found <- setdiff(y , colnames(data))
y <- intersect(y, colnames(data))
if(.is_empty(y))
stop("Can't find the y elements in the data.")
else if(!.is_empty(not_found))
warning("Can't find the following element in the data: ",
.collapse(not_found))
}
}
if(inherits(data, c("tbl_df", "tbl")))
data <- as.data.frame(data)
# Combining y variables
#......................................................
if(is.null(y)) y <- ""
if(combine & length(y) > 1){
data <- tidyr::gather_(data, key_col = ".y.", value_col = ".value.",
gather_cols = y)
data[, ".y."] <- factor(data[, ".y."], levels = unique(data[, ".y."]))
y <- ".value."
}
# Combining x variables: Case of density plot or histograms
#......................................................
else if(combine & length(x) > 1 & y[1] %in% c("..density..", "..count..", "..ecdf..", "..qq..")){
data <- tidyr::gather_(data, key_col = ".y.", value_col = ".value.",
gather_cols = x)
data[, ".y."] <- factor(data[, ".y."], levels = unique(data[, ".y."]))
x <- ".value."
}
# If not factor, x elements on the plot should
# appear in the same order as in the data
if(is.character(data[, x]))
data[, x] <- factor(data[, x], levels = unique(data[, x]))
y <- unique(y)
names(y) <- y
x <- unique(x)
names(x) <- x
if(y[1] %in% c("..density..", "..count..", "..ecdf..", "..qq.."))
list(x = x, data = data, y = y) # The name of plots are x variables
else
list(y = y, data = data, x = x) # The name of plots will be y variables
}
# Adjust shape when ngroups > 6, to avoid ggplot warnings
.scale_point_shape <- function(p, data, shape){
if(shape %in% colnames(data)){
grp <- data[, shape]
if(!inherits(grp, "factor")) grp <- as.factor(grp)
ngroups <- length(levels(data[, shape]))
if(ngroups > 6) p <- p + scale_shape_manual(values=1:ngroups, labels = levels(data[, shape]))
}
p
}
# Get not numeric columns in a data.frame
.get_not_numeric_vars <- function(data_frame){
is_numeric <- sapply(data_frame, is.numeric)
if(sum(!is_numeric) == 0) res = NULL
else res <- colnames(data_frame[, !is_numeric, drop = FALSE])
res
}
# Get the current color used in ggplot
.get_ggplot_ncolors <- function(p){
g <- ggplot_build(p)
gdata <- g$data[[1]]
cols <- fills <- 1
if("colour" %in% names(gdata)) cols <- unique(unlist(gdata["colour"]))
if("fills" %in% names(gdata)) fills <- unique(unlist(gdata["fill"]))
max(length(cols), length(fills))
}
# Check if character string is a valid color representation
.is_color <- function(x) {
sapply(x, function(X) {
tryCatch(is.matrix(grDevices::col2rgb(X)),
error = function(e) FALSE)
})
}
# Collapse one or two vectors
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
.collapse <- function(x, y = NULL, sep = "."){
if(missing(y))
paste(x, collapse = sep)
else if(is.null(x) & is.null(y))
return(NULL)
else if(is.null(x))
return (as.character(y))
else if(is.null(y))
return(as.character(x))
else
paste0(x, sep, y)
}
# Check if en object is empty
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
.is_empty <- function(x){
length(x) == 0
}
# Remove NULL items in a vector or list
#
# x a vector or list
.compact <- function(x){Filter(Negate(is.null), x)}
# Check if is a list
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
.is_list <- function(x){
inherits(x, "list")
}
# Returns the levels of a factor variable
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
.levels <- function(x){
if(!is.factor(x)) x <- as.factor(x)
levels(x)
}
# Remove items from a list
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
.remove_item <- function(.list, items){
for(item in items)
.list[[item]] <- NULL
.list
}
# Additems in a list
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
.add_item <- function(.list, ...){
pms <- list(...)
for(pms.names in names(pms)){
.list[[pms.names]] <- pms[[pms.names]]
}
.list
}
# Select a colun as vector from tiblle data frame
.select_vec <- function(df, column){
dplyr::pull(df, column)
}
# Select the top up or down rows of a data frame sorted by variables
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# - df: data frame
# - x: x axis variables (grouping variables)
# - y: y axis variables (sorting variables)
# - n the number of rows
# - grps: other grouping variables
.top_up <- function(df, x, y, n, grouping.vars = NULL){
. <- NULL
grouping.vars <- c(x, grouping.vars) %>%
unique()
df %>%
arrange_(.dots = c(grouping.vars, y)) %>%
group_by_(.dots = grouping.vars) %>%
do(utils::tail(., n))
}
.top_down <- function(df, x, y, n, grouping.vars = NULL){
. <- NULL
grouping.vars <- c(x, grouping.vars) %>%
unique()
df %>%
arrange_(.dots = c(grouping.vars, y)) %>%
group_by_(.dots = grouping.vars) %>%
do(utils::head(., n))
}
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# Apply ggpubr functions on a data
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# fun: function, can be ggboxplot, ggdotplot, ggstripchart, ...
.plotter <- function(fun, data, x, y, combine = FALSE, merge = FALSE,
color = "black", fill = "white",
title = NULL, xlab = NULL, ylab = NULL,
legend = NULL, legend.title = NULL,
facet.by = NULL,
select = NULL, remove = NULL, order = NULL,
add = "none", add.params = list(),
label = NULL, font.label = list(size = 11, color = "black"),
label.select = NULL, repel = FALSE, label.rectangle = FALSE,
ggtheme = theme_pubr(),
fun_name = "", group = 1, # used only by ggline
show.legend.text = NA,
...)
{
if(is.logical(merge)){
if(merge) merge = "asis"
else merge = "none"
}
if(combine & merge != "none")
stop("You should use either combine = TRUE or merge = TRUE, but not both together.")
font.label <- .parse_font(font.label)
if(is.null(label) & fun_name == "barplot") label <- FALSE
.lab <- label
if(fun_name != "barplot") .lab <- NULL
if(!missing(x) & !missing(y)){
if(length(y) == 1 & length(x) == 1){
combine <- FALSE
merge <- "none"
}
}
# Check data
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# - returns a list of updated main options:
# list(y, data, x)
opts <- .check_data(data, x, y, combine = combine | merge != "none")
data <- opts$data
x <- opts$x
y <- opts$y
is_density_plot <- y[1] %in% c("..count..", "..density..", "..ecdf..", "..qq..")
if(combine) facet.by <- ".y." # Faceting by y variables
if(merge != "none"){
if(!is_density_plot) facet.by <- NULL
if(is.null(legend.title)) legend.title <- "" # remove .y. in the legend
}
# Updating parameters after merging
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# Special case for density and histograms:
# x are variables and y is ..count.. or ..density..
# after merging ggpubr add a new column .y. which hold x variables
# User might want to color by x variables as follow color = ".x." and
# he aren't aware that the column is ".y." --> so we should translate this (see from line 1055)
user.add.color <- add.params$color
geom.text.position <- "identity"
if(merge == "asis" ){
.grouping.var <- ".y." # y variables become grouping variable
}
else if(merge == "flip"){
.grouping.var <- opts$x # x variable becomes grouping variable
opts$x <- ".y." # y variables become x tick labels
if(is.null(xlab)) xlab <- FALSE
}
if(merge == "asis" | merge == "flip"){
if(is_density_plot){
color <- ifelse(color == ".x.", ".y.", color)
fill <- ifelse(fill == ".x.", ".y.", fill)
}
if(any(c(color, fill) %in% names(data))){
add.params$color <- font.label$color <- ifelse(color %in% names(data), color, fill)
}
else if(!all(c(color, fill) %in% names(data))){
color <- add.params$color <- font.label$color <- .grouping.var
#fill <- "white"
}
group <- .grouping.var
geom.text.position <- position_dodge(0.8)
}
if(!combine & merge == "none" & length(opts$y) > 1 & is.null(title))
title <- opts$y
if(!combine & merge == "none" & is.null(title)){
if(length(opts$y) > 1) title <- opts$y
else if (length(opts$x) > 1 & is_density_plot) # case of density plot
title <- opts$x
}
# Item to display
x <- opts$data[, opts$x] %>% as.vector()
if(!is.null(select))
opts$data <- subset(opts$data, x %in% select)
if(!is.null(remove))
opts$data <- subset(opts$data, !(x %in% remove))
if(!is.null(order)) opts$data[, opts$x] <- factor(opts$data[, opts$x], levels = order)
# Add additional options, which can be potentially vectorized
# when multiple plots
opts <- opts %>% c(list(title = title, xlab = xlab, ylab = ylab)) %>%
.compact()
data <- opts$data
opts$data <- list(opts$data)
if(fun_name %in% c("ggline", "ggdotchart")) opts$group <- group
# Plotting
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# Apply function to each y variables
p <- purrr::pmap(opts, fun, color = color, fill = fill, legend = legend,
legend.title = legend.title, ggtheme = ggtheme, facet.by = facet.by,
add = add, add.params = add.params ,
# group = group, # for line plot
user.add.color = user.add.color,
label = .lab, # used only in ggbarplot
font.label = font.label, repel = repel, label.rectangle = label.rectangle,
...)
# Faceting
if(!is.null(facet.by))
p <-purrr::map(p, facet, facet.by = facet.by, ...)
# Add labels
if(!is.null(label) & fun_name != "barplot"){
if(is.logical(label)){
if(label) label <- opts$y
}
grouping.vars <- intersect(c(facet.by, color, fill), colnames(data))
label.opts <- font.label %>%
.add_item(data = data, x = opts$x, y = opts$y,
label = label, label.select = label.select,
repel = repel, label.rectangle = label.rectangle, ggtheme = NULL,
grouping.vars = grouping.vars, facet.by = facet.by, position = geom.text.position,
show.legend = show.legend.text)
p <- purrr::map(p,
function(p, label.opts){
. <- NULL
label.opts %>% .add_item(ggp = p) %>%
do.call(ggtext, .)
},
label.opts
)
}
# Take into account the legend argument, when the main plot has no legend and ggtext has legend
p <-purrr::map(p, ggpar, legend = legend, legend.title = legend.title)
if(.is_list(p) & length(p) == 1) p <- p[[1]]
p
}
# get the geometry of the first layer
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::
.geom <- function(p, .layer = 1){
. <- NULL
if(is.null(p) | .is_empty(p$layers)) return("")
class(p$layers[[.layer]]$geom)[1] %>%
tolower() %>%
gsub("geom", "", .)
}
# Get the mapping variables of the first layer
#:::::::::::::::::::::::::::::::::::::::::::::::::
.mapping <- function(p){
if(is.null(p)) return(list())
. <- NULL
layer0.mapping <- as.character(p$mapping) %>% gsub("~", "", .)
layer0.mapping.labels <- p$mapping %>% names()
names(layer0.mapping) <- layer0.mapping.labels
layer1.mapping <- NULL
if(!.is_empty(p$layers)){
layer1.mapping <- p$layers[[1]]$mapping %>%
as.character() %>% gsub("~", "", .)
layer1.mapping.labels <- p$layers[[1]]$mapping %>%
names()
names(layer1.mapping) <- layer1.mapping.labels
}
c(layer0.mapping, layer1.mapping) %>%
as.list()
}
# Call geom_exec function to update a plot
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::
.update_plot <- function(opts, p){
p + do.call(geom_exec, opts)
}
# Get ggplot2 x and y variable
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
.get_gg_xy_variables <- function(p){
. <- NULL
x <- p$mapping['x'] %>% as.character() %>% gsub("~", "", .)
y <- p$mapping['y'] %>% as.character() %>% gsub("~", "", .)
xy <- c(x, y)
names(xy) <- c("x", "y")
return(xy)
}
# Add mean or median line
# used by ggdensity and gghistogram
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# p: main plot
# data: data frame
# x: measure variables
# add: center to add
# grouping.vars: grouping variables
.add_center_line <- function(p, add = c("none", "mean", "median"), grouping.vars = NULL,
color = "black", linetype = "dashed", size = NULL)
{
add <- match.arg(add)
data <- p$data
# x <- .mapping(p)$x
.mapping <- .get_gg_xy_variables(p)
x <- .mapping["x"]
if(!(add %in% c("mean", "median")))
return(p)
compute_center <- switch(add, mean = mean, median = stats::median)
# NO grouping variable
if(.is_empty(grouping.vars)) {
m <- ifelse(add == "mean",
mean(data[, x], na.rm = TRUE),
stats::median(data[, x], na.rm = TRUE))
p <- p + geom_exec(geom_vline, data = data,
xintercept = m, color = color,
linetype = linetype, size = size)
}
# Case of grouping variable
else {
data_sum <- data %>%
group_by(!!!syms(grouping.vars)) %>%
summarise(.center = compute_center(!!sym(x), na.rm = TRUE))
p <- p + geom_exec(geom_vline, data = data_sum,
xintercept = ".center", color = color,
linetype = linetype, size = size)
}
p
}
# Check legend argument
.check_legend <- function(legend){
allowed.values <- c("top", "bottom", "left", "right", "none")
if(is.null(legend) | is.numeric(legend))
return(legend)
else if(is.logical(legend)){
if(legend) legend <- "top"
else legend <- "none"
}
else if(is.character(legend)){
legend <- legend[1]
if(!legend %in% allowed.values)
stop("Argument legend should be one of ", .collapse(allowed.values, sep = ", "))
}
return (legend)
}
| /R/utilities.R | no_license | Chiamh/ggpubr | R | false | false | 34,974 | r | #' @include desc_statby.R utilities_base.R utilities_color.R
NULL
#' @import ggplot2
#' @importFrom magrittr %>%
#' @importFrom dplyr group_by_
#' @importFrom dplyr group_by
#' @importFrom dplyr arrange_
#' @importFrom dplyr mutate
#' @importFrom dplyr do
#' @importFrom dplyr summarise
#' @importFrom dplyr everything
#' @importFrom grid drawDetails
#' @importFrom rlang !!
#' @importFrom rlang !!!
#' @importFrom rlang syms
# Unnesting, adapt to tidyr 1.0.0
unnest <- function(data, cols = "data", ...){
if(is_pkg_version_sup("tidyr", "0.8.3")){
results <- tidyr::unnest(data, cols = cols, ...)
}
else {results <- tidyr::unnest(data, ...)}
results
}
# Check if an installed package version is superior to a specified version
# Version, pkg: character vector
is_pkg_version_sup<- function(pkg, version){
vv <- as.character(utils::packageVersion(pkg))
cc <- utils::compareVersion(vv, version) > 0
cc
}
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Execute a geom_* function from ggplot2
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# geomfunc : gem_*() functions
# data data for mapping
# ... argument accepeted by the function
# return a plot if geomfunc!=Null or a list(option, mapping) if geomfunc = NULL
.geom_exec <- function (geomfunc = NULL, data = NULL,
position = NULL, ...) {
geom_exec(geomfunc = geomfunc, data = data, position = position, ...)
}
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Official argument from ggplot2
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# bar plot arguments
.barplot_params <- function(...){
x <- list(...)
res <- list()
res$width <- x$width
res$binwidth <- x$binwidth
res$na.rm <- ifelse(!is.null(x$na.rm), x$na.rm, FALSE)
res$show.legend <- ifelse(!is.null(x$show.legend), x$show.legend, NA)
res$inherit.aes <- ifelse(!is.null(x$inherit.aes), x$inherit.aes, TRUE)
return(res)
}
# box plot arguments
.boxplot_params <- function(...){
x <- list(...)
res <- list()
res$outlier.colour <- x$outlier.colour
res$outlier.shape <- ifelse(!is.null(x$outlier.shape), x$outlier.shape, 19)
res$outlier.size <- ifelse(!is.null(x$outlier.size), x$outlier.size, 1.5)
res$outlier.stroke <- ifelse(!is.null(x$outlier.stroke), x$outlier.stroke, 0.5)
res$notch <- ifelse(!is.null(x$notch), x$notch, FALSE)
res$notchwidth <- ifelse(!is.null(x$notchwidth), x$notchwidth, 0.5)
res$varwidth <- ifelse(!is.null(x$varwidth), x$varwidth, FALSE)
res$na.rm <- ifelse(!is.null(x$na.rm), x$na.rm, FALSE)
res$show.legend <- ifelse(!is.null(x$show.legend), x$show.legend, NA)
res$inherit.aes <- ifelse(!is.null(x$inherit.aes), x$inherit.aes, TRUE)
return(res)
}
.dotplot_params <- function(...){
x <- list(...)
res <- list()
res$stackratio <- ifelse(!is.null(x$stackratio ), x$stackratio, 1)
res$width <- ifelse(!is.null(x$width), x$width, 0.9)
return(res)
}
.violin_params <- function(...){
x <- list(...)
res <- list()
res$stat <- ifelse(!is.null(x$stat ), x$stat, "ydensity")
res$draw_quantiles <- x$draw_quantiles
res$scale <- ifelse(!is.null(x$scale), x$scale, "area")
res$trim <- ifelse(!is.null(x$trim), x$trim, TRUE)
return(res)
}
.hist_params <- function(...){
x <- list(...)
res <- list()
res$binwidth <- x$binwidth
res$bins <- x$bins
return(res)
}
.standard_params <- function(...){
x <- list(...)
res <- list()
res$color <- ifelse(!is.null(x$color), x$color, "black")
res$color <- ifelse(!is.null(x$colour), x$colour, res$color)
res$linetype <- ifelse(!is.null(x$linetype), x$linetype, "solid")
res$size <- ifelse(!is.null(x$size), x$size, 1)
res$fill <- ifelse(!is.null(x$fill), x$fill, "black")
res$shape <- ifelse(!is.null(x$shape), x$shape, 19)
res
}
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Graphical parameters
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Set plot orientation
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.set_orientation <-
function(p, orientation = c("vertical", "horizontal", "reverse")) {
ori <- match.arg(orientation)
if (ori == "horizontal") p + coord_flip()
else if (ori == "reverse")
p + scale_y_reverse()
else p
}
# Change title and labels
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.labs <- function(p, main = NULL, xlab = NULL, ylab = NULL,
font.main = NULL, font.x = NULL, font.y = NULL,
submain = NULL, caption = NULL,
font.submain = NULL, font.caption = NULL)
{
font.main <- .parse_font(font.main)
font.x <- .parse_font(font.x)
font.y <- .parse_font(font.y)
font.submain <- .parse_font(font.submain)
font.caption <- .parse_font(font.caption)
if(is.logical(main)){
if(!main) main <- NULL
}
if(is.logical(submain)){
if(!submain) submain <- NULL
}
if(is.logical(caption)){
if(!caption) caption <- NULL
}
if (!is.null(main)) {
p <- p + labs(title = main)
}
if (!is.null(submain)) {
p <- p + labs(subtitle = submain)
}
if (!is.null(caption)) {
p <- p + labs(caption = caption)
}
if (!is.null(xlab)) {
if (xlab == FALSE)
p <- p + theme(axis.title.x = element_blank())
else
p <- p + labs(x = xlab)
}
if (!is.null(ylab)) {
if (ylab == FALSE)
p <- p + theme(axis.title.y = element_blank())
else
p <- p + labs(y = ylab)
}
if (!is.null(font.main))
p <-
p + theme(
plot.title = element_text(
size = font.main$size,
lineheight = 1.0, face = font.main$face, colour = font.main$color
)
)
if (!is.null(font.submain))
p <-
p + theme(
plot.subtitle = element_text(
size = font.submain$size,
lineheight = 1.0, face = font.submain$face, colour = font.submain$color
)
)
if (!is.null(font.caption))
p <-
p + theme(
plot.caption = element_text(
size = font.caption$size,
lineheight = 1.0, face = font.caption$face, colour = font.caption$color
)
)
if (!is.null(font.x))
p <-
p + theme(axis.title.x = element_text(
size = font.x$size,
face = font.x$face, colour = font.x$color
))
if (!is.null(font.y))
p <-
p + theme(axis.title.y = element_text(
size = font.y$size,
face = font.y$face, colour = font.y$color
))
p
}
# ticks
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.set_ticks <-
function(ticks = TRUE, tickslab = TRUE, font.tickslab = NULL,
xtickslab.rt = NULL, ytickslab.rt = NULL,
font.xtickslab = font.tickslab, font.ytickslab = font.tickslab)
{
. <- xhjust <- NULL
if(!is.null(xtickslab.rt)) {
if(xtickslab.rt > 5) xhjust <- 1
}
else xhjust <- NULL
if (ticks)
ticks <-
element_line(colour = "black")
else
ticks <- element_blank()
if (is.null(font.xtickslab)) font.x <- list()
else font.x <- .parse_font(font.xtickslab)
if (is.null(font.ytickslab)) font.y <- list()
else font.y <- .parse_font(font.ytickslab)
if (tickslab) {
xtickslab <- font.x %>% .add_item(hjust = xhjust, angle = xtickslab.rt) %>%
do.call(element_text, .)
ytickslab <- font.y %>% .add_item(angle = ytickslab.rt) %>% do.call(element_text, .)
}
else {
xtickslab <- element_blank()
ytickslab <- element_blank()
}
theme(
axis.ticks = ticks, axis.text.x = xtickslab, axis.text.y = ytickslab
)
}
# Change Axis limits
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.set_axis_limits <- function(xlim = NULL, ylim = NULL){
if(!is.null(xlim) | !is.null(ylim)) coord_cartesian(xlim, ylim)
}
# Axis scales
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.set_scale <- function (p, xscale = c("none", "log2", "log10", "sqrt"),
yscale = c("none", "log2", "log10", "sqrt"),
format.scale = FALSE)
{
xscale <- match.arg(xscale)
yscale <- match.arg(yscale)
.x <- ".x"
if(format.scale){
if(!requireNamespace("scales")) stop("The R package 'scales' is required.")
if(yscale == "log2"){
p <- p + scale_y_continuous(trans = scales::log2_trans(),
breaks = scales::trans_breaks("log2", function(x) 2^x),
labels = scales::trans_format("log2", scales::math_format(2^.x)))
}
else if(yscale == "log10"){
p <- p + scale_y_continuous(trans = scales::log10_trans(),
breaks = scales::trans_breaks("log10", function(x) 10^x),
labels = scales::trans_format("log10", scales::math_format(10^.x)))
}
if(xscale == "log2"){
p <- p + scale_x_continuous(trans = scales::log2_trans(),
breaks = scales::trans_breaks("log2", function(x) 2^x),
labels = scales::trans_format("log2", scales::math_format(2^.x)))
}
else if(xscale == "log10"){
p <- p + scale_x_continuous(trans = scales::log10_trans(),
breaks = scales::trans_breaks("log10", function(x) 10^x),
labels = scales::trans_format("log10", scales::math_format(10^.x)))
}
}
else{
if(xscale != "none") p <- p + scale_x_continuous(trans = xscale)
if(yscale != "none") p <- p + scale_y_continuous(trans = yscale)
}
p
}
# Legends
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.set_legend <- function(p, legend = NULL,
legend.title = NULL, font.legend = NULL)
{
if(is.null(legend.title)) legend.title = waiver()
font <- .parse_font(font.legend)
if(!is.null(legend)) p <- p + theme(legend.position = legend)
if(!.is_empty(legend.title)){
if(.is_list(legend.title)) p <- p + do.call(ggplot2::labs, legend.title)
else p <- p +
labs(color = legend.title, fill = legend.title, linetype = legend.title, shape = legend.title)
}
if(!is.null(font)){
p <- p + theme(
legend.text = element_text(size = font$size,
face = font$face, colour = font$color),
legend.title = element_text(size = font$size,
face = font$face, colour = font$color)
)
}
p
}
# Set ticks by
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.set_ticksby <- function(p, xticks.by = NULL, yticks.by = NULL)
{
.data <- p$data
# .mapping <- as.character(p$mapping)
.mapping <- .get_gg_xy_variables(p)
if(!is.null(yticks.by)) {
y <- .data[, .mapping["y"]]
ybreaks <- seq(0, max(y, na.rm = TRUE), by = yticks.by)
p <- p + scale_y_continuous(breaks = ybreaks)
}
else if(!is.null(xticks.by)) {
x <- .data[, .mapping["x"]]
xbreaks <- seq(0, max(x, na.rm = TRUE), by = xticks.by)
p <- p + scale_x_continuous(breaks = xbreaks)
}
p
}
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Add stat
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.check_add.params <- function(add, add.params, error.plot, data, color, fill, ...){
if(color %in% names(data) & is.null(add.params$color)) add.params$color <- color
if(fill %in% names(data) & is.null(add.params$fill)) add.params$fill <- fill
if(is.null(add.params$color)) add.params$color <- color
if(is.null(add.params$fill) & ("crossbar" %in% error.plot | "boxplot" %in% add | "violin" %in% add)) add.params$fill <- fill
if(is.null(add.params$fill)) add.params$fill <- add.params$color
#else add.params$fill <- add.params$color
if(!is.null(list(...)$shape) & is.null(add.params$shape)) add.params$shape <- list(...)$shape
add.params
}
# Allowed values for add are one or the combination of: "none",
# "dotplot", "jitter", "boxplot", "mean", "mean_se", "mean_sd", "mean_ci", "mean_range",
# "median", "median_iqr", "median_mad", "median_range"
# p_geom character, e.g "geom_line"
.add <- function(p,
add = NULL,
add.params = list(color = "black", fill = "white", shape = 19, width = 1),
data = NULL, position = position_dodge(0.8),
error.plot = c("pointrange", "linerange", "crossbar", "errorbar",
"upper_errorbar", "lower_errorbar", "upper_pointrange", "lower_pointrange",
"upper_linerange", "lower_linerange"),
p_geom = ""
)
{
if(is.null(data)) data <- p$data
pms <- add.params
if("none" %in% add) add <- "none"
error.plot = match.arg(error.plot)
color <- ifelse(is.null(pms$color), "black",pms$color)
fill <- ifelse(is.null(pms$fill), "white", pms$fill)
shape <- ifelse(is.null(pms$shape), 19, pms$shape)
width <- ifelse(is.null(pms$width), 1, pms$width)
shape <- ifelse(is.null(add.params$shape), 19, add.params$shape)
# size <- ifelse(is.null(add.params$size), 1, add.params$size)
# stat summary
#.mapping <- as.character(p$mapping)
.mapping <- .get_gg_xy_variables(p)
x <- .mapping["x"]
y <- .mapping["y"]
errors <- c("mean", "mean_se", "mean_sd", "mean_ci", "mean_range", "median", "median_iqr", "median_mad", "median_range")
if(any(errors %in% add)) stat_sum <- desc_statby(data, measure.var = .mapping["y"],
grps = intersect(c(.mapping["x"], color, fill), names(data)))
if ("boxplot" %in% add) {
# size <- ifelse(is.null(add.params$size), 1, add.params$size)
p <- p + .geom_exec(geom_boxplot, data = data,
color = color, fill = fill,
position = position, width = width, size = add.params$size)
}
if ("violin" %in% add) {
# size <- ifelse(is.null(add.params$size), 1, add.params$size)
p <- p + .geom_exec(geom_violin, data = data, trim = FALSE,
color = color, fill = fill,
position = position, width = width, size = add.params$size)
}
if ( "dotplot" %in% add ) {
dotsize <- ifelse(is.null(add.params$size), 0.9, add.params$size)
p <- p + .geom_exec(geom_dotplot, data = data, binaxis = 'y', stackdir = 'center',
color = color, fill = fill, dotsize = dotsize,
position = position, stackratio = 1.2, binwidth = add.params$binwidth)
}
if ( "jitter" %in% add ){
set.seed(123)
# jitter.size <- ifelse(is.null(add.params$size), 2, add.params$size)
ngrps <- length(intersect(names(data), c(.mapping["x"], fill, color)))
if(p_geom == "geom_line" | ngrps == 1) .jitter = position_jitter(0.4)
else if(ngrps > 1) .jitter <- position_dodge(0.8)
if(is.null(add.params$jitter)) .jitter = position_jitter(0.4)
else if(is.numeric(add.params$jitter))
.jitter <- position_jitter(add.params$jitter)
else .jitter <- add.params$jitter
p <- p + .geom_exec(geom_jitter, data = data,
color = color, fill = fill, shape = shape, size = add.params$size,
position = .jitter )
}
if ( "point" %in% add ) {
p <- p + .geom_exec(geom_point, data = data,
color = color, size = add.params$size,
position = position)
}
if ( "line" %in% add ) {
p <- p + .geom_exec(geom_line, data = data, group = 1,
color = color, size = add.params$size,
position = position)
}
# Add mean or median
center <- intersect(c("mean", "median"), add)
if(length(center) == 2)
stop("Use mean or mdedian, but not both at the same time.")
if(length(center) == 1){
center.size <- ifelse(is.null(add.params$size), 1, add.params$size)
p <- p %>%
add_summary(fun = center, color = color, shape = shape,
position = position, size = center.size)
}
# Add errors
errors <- c("mean_se", "mean_sd", "mean_ci", "mean_range", "median_iqr", "median_mad", "median_range")
errors <- intersect(errors, add)
if(length(errors) >= 2)
stop("Choose one these: ", paste(errors, collapse =", "))
if(length(errors) == 1){
errors <- strsplit(errors, "_", fixed = TRUE)[[1]]
.center <- errors[1]
.errors <- errors[2]
stat_sum$ymin <- stat_sum[, .center] - stat_sum[, .errors]
stat_sum$ymax <- stat_sum[, .center] + stat_sum[, .errors]
names(stat_sum)[which(names(stat_sum) == .center)] <- y
size <- ifelse(is.null(add.params$size), 1, add.params$size)
if(error.plot %in% c("upper_errorbar", "upper_pointrange", "upper_linerange")) {
ymin <- y
ymax <- "ymax"
}
else if(error.plot %in% c("lower_errorbar", "lower_pointrange", "lower_linerange")){
ymin <- "ymin"
ymax <- y
}
else {
ymin <- "ymin"
ymax <- "ymax"
}
if(error.plot %in% c("pointrange", "lower_pointrange", "upper_pointrange"))
p <- p + .geom_exec(geom_pointrange, data = stat_sum,
color = color, shape = shape, ymin = ymin, ymax = ymax,
position = position, size = size)
else if(error.plot %in% c("linerange", "lower_linerange", "upper_linerange"))
p <- p + .geom_exec(geom_linerange, data = stat_sum,
color = color, ymin = ymin, ymax = ymax,
position = position, size = size)
else if(error.plot %in% c("errorbar", "lower_errorbar", "upper_errorbar"))
p <- p + .geom_exec(geom_errorbar, data = stat_sum,
color = color, ymin = ymin, ymax = ymax,
position = position, size = size, width = 0.2)
else if(error.plot == "crossbar")
p <- p + .geom_exec(geom_crossbar, data = stat_sum, fill = fill,
color = color, ymin = "ymin", ymax = "ymax",
position = position, width = width, size = size)
}
p
}
# Calculate the mean and the SD in each group
#+++++++++++++++++++++++++
# data : a data frame
# varname : the name of the variable to be summariezed
# grps : column names to be used as grouping variables
# .mean_sd <- function(data, varname, grps){
# summary_func <- function(x, col){
# c(mean = base::mean(x[[col]], na.rm=TRUE),
# sd = stats::sd(x[[col]], na.rm=TRUE))
# }
# data_sum <- plyr::ddply(data, grps, .fun=summary_func, varname)
# data_sum$ymin <- data_sum$mean-data_sum$sd
# data_sum$ymax <- data_sum$mean+data_sum$sd
# names(data_sum)[ncol(data_sum)-3] <- varname
# # data_sum <- plyr::rename(data_sum, c("mean" = varname))
# return(data_sum)
# }
# Summary functions
.summary_functions <- function(){
c("mean", "mean_se", "mean_sd", "mean_ci",
"mean_range", "median", "median_iqr", "median_mad", "median_range")
}
# parse font
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.parse_font <- function(font){
if(is.null(font)) res <- NULL
else if(inherits(font, "list")) res <- font
else{
# matching size and face
size <- grep("^[0-9]+$", font, perl = TRUE)
face <- grep("plain|bold|italic|bold.italic", font, perl = TRUE)
if(length(size) == 0) size <- NULL else size <- as.numeric(font[size])
if(length(face) == 0) face <- NULL else face <- font[face]
color <- setdiff(font, c(size, face))
if(length(color) == 0) color <- NULL
res <- list(size=size, face = face, color = color)
}
res
}
# Add annotation to a plot
# label: text to be added to a plot
# size: text size
# coord: x and coordinates
.ggannotate <- function (label, size = 12, coord = c(NULL, NULL)){
if(is.null(unique(coord))){
grob <- grid::grobTree(grid::textGrob(label, x = 0.3, y = 0.80, hjust=0,
gp = grid::gpar(col = "black", fontsize = size, fontface = "plain")))
ggplot2::annotation_custom(grob)
}
else{
ggplot2::annotate("text", x = coord[1], y = coord[2],
label = label, size = size/3)
}
}
#:::::::::::::::::::::::::::::::::::::::::
# Check the data provided by user
#:::::::::::::::::::::::::::::::::::::::::
# combine: if TRUE, gather y variables
# return a list(data, x, y)
.check_data <- function(data, x, y, combine = FALSE)
{
if(missing(x) & missing(y)){
if(!is.numeric(data))
stop("x and y are missing. In this case data should be a numeric vector.")
else{
data <- data.frame(y = data, x = rep(1, length(data)))
x <- "x"
y <- "y"
}
}
else if(missing(x)) {
x <- "x"
if(is.numeric(data)) data <- data.frame(x = data)
else data$x <- rep("1", nrow(data))
}
# A list of y elements to plot
else if(length(y) > 1){
if(!all(y %in% colnames(data))){
not_found <- setdiff(y , colnames(data))
y <- intersect(y, colnames(data))
if(.is_empty(y))
stop("Can't find the y elements in the data.")
else if(!.is_empty(not_found))
warning("Can't find the following element in the data: ",
.collapse(not_found))
}
}
if(inherits(data, c("tbl_df", "tbl")))
data <- as.data.frame(data)
# Combining y variables
#......................................................
if(is.null(y)) y <- ""
if(combine & length(y) > 1){
data <- tidyr::gather_(data, key_col = ".y.", value_col = ".value.",
gather_cols = y)
data[, ".y."] <- factor(data[, ".y."], levels = unique(data[, ".y."]))
y <- ".value."
}
# Combining x variables: Case of density plot or histograms
#......................................................
else if(combine & length(x) > 1 & y[1] %in% c("..density..", "..count..", "..ecdf..", "..qq..")){
data <- tidyr::gather_(data, key_col = ".y.", value_col = ".value.",
gather_cols = x)
data[, ".y."] <- factor(data[, ".y."], levels = unique(data[, ".y."]))
x <- ".value."
}
# If not factor, x elements on the plot should
# appear in the same order as in the data
if(is.character(data[, x]))
data[, x] <- factor(data[, x], levels = unique(data[, x]))
y <- unique(y)
names(y) <- y
x <- unique(x)
names(x) <- x
if(y[1] %in% c("..density..", "..count..", "..ecdf..", "..qq.."))
list(x = x, data = data, y = y) # The name of plots are x variables
else
list(y = y, data = data, x = x) # The name of plots will be y variables
}
# Adjust shape when ngroups > 6, to avoid ggplot warnings
.scale_point_shape <- function(p, data, shape){
if(shape %in% colnames(data)){
grp <- data[, shape]
if(!inherits(grp, "factor")) grp <- as.factor(grp)
ngroups <- length(levels(data[, shape]))
if(ngroups > 6) p <- p + scale_shape_manual(values=1:ngroups, labels = levels(data[, shape]))
}
p
}
# Get not numeric columns in a data.frame
.get_not_numeric_vars <- function(data_frame){
is_numeric <- sapply(data_frame, is.numeric)
if(sum(!is_numeric) == 0) res = NULL
else res <- colnames(data_frame[, !is_numeric, drop = FALSE])
res
}
# Get the current color used in ggplot
.get_ggplot_ncolors <- function(p){
g <- ggplot_build(p)
gdata <- g$data[[1]]
cols <- fills <- 1
if("colour" %in% names(gdata)) cols <- unique(unlist(gdata["colour"]))
if("fills" %in% names(gdata)) fills <- unique(unlist(gdata["fill"]))
max(length(cols), length(fills))
}
# Check if character string is a valid color representation
.is_color <- function(x) {
sapply(x, function(X) {
tryCatch(is.matrix(grDevices::col2rgb(X)),
error = function(e) FALSE)
})
}
# Collapse one or two vectors
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
.collapse <- function(x, y = NULL, sep = "."){
if(missing(y))
paste(x, collapse = sep)
else if(is.null(x) & is.null(y))
return(NULL)
else if(is.null(x))
return (as.character(y))
else if(is.null(y))
return(as.character(x))
else
paste0(x, sep, y)
}
# Check if en object is empty
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
.is_empty <- function(x){
length(x) == 0
}
# Remove NULL items in a vector or list
#
# x a vector or list
.compact <- function(x){Filter(Negate(is.null), x)}
# Check if is a list
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
.is_list <- function(x){
inherits(x, "list")
}
# Returns the levels of a factor variable
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
.levels <- function(x){
if(!is.factor(x)) x <- as.factor(x)
levels(x)
}
# Remove items from a list
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
.remove_item <- function(.list, items){
for(item in items)
.list[[item]] <- NULL
.list
}
# Additems in a list
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
.add_item <- function(.list, ...){
pms <- list(...)
for(pms.names in names(pms)){
.list[[pms.names]] <- pms[[pms.names]]
}
.list
}
# Select a colun as vector from tiblle data frame
.select_vec <- function(df, column){
dplyr::pull(df, column)
}
# Select the top up or down rows of a data frame sorted by variables
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# - df: data frame
# - x: x axis variables (grouping variables)
# - y: y axis variables (sorting variables)
# - n the number of rows
# - grps: other grouping variables
.top_up <- function(df, x, y, n, grouping.vars = NULL){
. <- NULL
grouping.vars <- c(x, grouping.vars) %>%
unique()
df %>%
arrange_(.dots = c(grouping.vars, y)) %>%
group_by_(.dots = grouping.vars) %>%
do(utils::tail(., n))
}
.top_down <- function(df, x, y, n, grouping.vars = NULL){
. <- NULL
grouping.vars <- c(x, grouping.vars) %>%
unique()
df %>%
arrange_(.dots = c(grouping.vars, y)) %>%
group_by_(.dots = grouping.vars) %>%
do(utils::head(., n))
}
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# Apply ggpubr functions on a data
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# fun: function, can be ggboxplot, ggdotplot, ggstripchart, ...
.plotter <- function(fun, data, x, y, combine = FALSE, merge = FALSE,
color = "black", fill = "white",
title = NULL, xlab = NULL, ylab = NULL,
legend = NULL, legend.title = NULL,
facet.by = NULL,
select = NULL, remove = NULL, order = NULL,
add = "none", add.params = list(),
label = NULL, font.label = list(size = 11, color = "black"),
label.select = NULL, repel = FALSE, label.rectangle = FALSE,
ggtheme = theme_pubr(),
fun_name = "", group = 1, # used only by ggline
show.legend.text = NA,
...)
{
if(is.logical(merge)){
if(merge) merge = "asis"
else merge = "none"
}
if(combine & merge != "none")
stop("You should use either combine = TRUE or merge = TRUE, but not both together.")
font.label <- .parse_font(font.label)
if(is.null(label) & fun_name == "barplot") label <- FALSE
.lab <- label
if(fun_name != "barplot") .lab <- NULL
if(!missing(x) & !missing(y)){
if(length(y) == 1 & length(x) == 1){
combine <- FALSE
merge <- "none"
}
}
# Check data
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# - returns a list of updated main options:
# list(y, data, x)
opts <- .check_data(data, x, y, combine = combine | merge != "none")
data <- opts$data
x <- opts$x
y <- opts$y
is_density_plot <- y[1] %in% c("..count..", "..density..", "..ecdf..", "..qq..")
if(combine) facet.by <- ".y." # Faceting by y variables
if(merge != "none"){
if(!is_density_plot) facet.by <- NULL
if(is.null(legend.title)) legend.title <- "" # remove .y. in the legend
}
# Updating parameters after merging
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# Special case for density and histograms:
# x are variables and y is ..count.. or ..density..
# after merging ggpubr add a new column .y. which hold x variables
# User might want to color by x variables as follow color = ".x." and
# he aren't aware that the column is ".y." --> so we should translate this (see from line 1055)
user.add.color <- add.params$color
geom.text.position <- "identity"
if(merge == "asis" ){
.grouping.var <- ".y." # y variables become grouping variable
}
else if(merge == "flip"){
.grouping.var <- opts$x # x variable becomes grouping variable
opts$x <- ".y." # y variables become x tick labels
if(is.null(xlab)) xlab <- FALSE
}
if(merge == "asis" | merge == "flip"){
if(is_density_plot){
color <- ifelse(color == ".x.", ".y.", color)
fill <- ifelse(fill == ".x.", ".y.", fill)
}
if(any(c(color, fill) %in% names(data))){
add.params$color <- font.label$color <- ifelse(color %in% names(data), color, fill)
}
else if(!all(c(color, fill) %in% names(data))){
color <- add.params$color <- font.label$color <- .grouping.var
#fill <- "white"
}
group <- .grouping.var
geom.text.position <- position_dodge(0.8)
}
if(!combine & merge == "none" & length(opts$y) > 1 & is.null(title))
title <- opts$y
if(!combine & merge == "none" & is.null(title)){
if(length(opts$y) > 1) title <- opts$y
else if (length(opts$x) > 1 & is_density_plot) # case of density plot
title <- opts$x
}
# Item to display
x <- opts$data[, opts$x] %>% as.vector()
if(!is.null(select))
opts$data <- subset(opts$data, x %in% select)
if(!is.null(remove))
opts$data <- subset(opts$data, !(x %in% remove))
if(!is.null(order)) opts$data[, opts$x] <- factor(opts$data[, opts$x], levels = order)
# Add additional options, which can be potentially vectorized
# when multiple plots
opts <- opts %>% c(list(title = title, xlab = xlab, ylab = ylab)) %>%
.compact()
data <- opts$data
opts$data <- list(opts$data)
if(fun_name %in% c("ggline", "ggdotchart")) opts$group <- group
# Plotting
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# Apply function to each y variables
p <- purrr::pmap(opts, fun, color = color, fill = fill, legend = legend,
legend.title = legend.title, ggtheme = ggtheme, facet.by = facet.by,
add = add, add.params = add.params ,
# group = group, # for line plot
user.add.color = user.add.color,
label = .lab, # used only in ggbarplot
font.label = font.label, repel = repel, label.rectangle = label.rectangle,
...)
# Faceting
if(!is.null(facet.by))
p <-purrr::map(p, facet, facet.by = facet.by, ...)
# Add labels
if(!is.null(label) & fun_name != "barplot"){
if(is.logical(label)){
if(label) label <- opts$y
}
grouping.vars <- intersect(c(facet.by, color, fill), colnames(data))
label.opts <- font.label %>%
.add_item(data = data, x = opts$x, y = opts$y,
label = label, label.select = label.select,
repel = repel, label.rectangle = label.rectangle, ggtheme = NULL,
grouping.vars = grouping.vars, facet.by = facet.by, position = geom.text.position,
show.legend = show.legend.text)
p <- purrr::map(p,
function(p, label.opts){
. <- NULL
label.opts %>% .add_item(ggp = p) %>%
do.call(ggtext, .)
},
label.opts
)
}
# Take into account the legend argument, when the main plot has no legend and ggtext has legend
p <-purrr::map(p, ggpar, legend = legend, legend.title = legend.title)
if(.is_list(p) & length(p) == 1) p <- p[[1]]
p
}
# get the geometry of the first layer
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::
.geom <- function(p, .layer = 1){
. <- NULL
if(is.null(p) | .is_empty(p$layers)) return("")
class(p$layers[[.layer]]$geom)[1] %>%
tolower() %>%
gsub("geom", "", .)
}
# Get the mapping variables of the first layer
#:::::::::::::::::::::::::::::::::::::::::::::::::
.mapping <- function(p){
if(is.null(p)) return(list())
. <- NULL
layer0.mapping <- as.character(p$mapping) %>% gsub("~", "", .)
layer0.mapping.labels <- p$mapping %>% names()
names(layer0.mapping) <- layer0.mapping.labels
layer1.mapping <- NULL
if(!.is_empty(p$layers)){
layer1.mapping <- p$layers[[1]]$mapping %>%
as.character() %>% gsub("~", "", .)
layer1.mapping.labels <- p$layers[[1]]$mapping %>%
names()
names(layer1.mapping) <- layer1.mapping.labels
}
c(layer0.mapping, layer1.mapping) %>%
as.list()
}
# Call geom_exec function to update a plot
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::
.update_plot <- function(opts, p){
p + do.call(geom_exec, opts)
}
# Get ggplot2 x and y variable
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
.get_gg_xy_variables <- function(p){
. <- NULL
x <- p$mapping['x'] %>% as.character() %>% gsub("~", "", .)
y <- p$mapping['y'] %>% as.character() %>% gsub("~", "", .)
xy <- c(x, y)
names(xy) <- c("x", "y")
return(xy)
}
# Add mean or median line
# used by ggdensity and gghistogram
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# p: main plot
# data: data frame
# x: measure variables
# add: center to add
# grouping.vars: grouping variables
.add_center_line <- function(p, add = c("none", "mean", "median"), grouping.vars = NULL,
color = "black", linetype = "dashed", size = NULL)
{
add <- match.arg(add)
data <- p$data
# x <- .mapping(p)$x
.mapping <- .get_gg_xy_variables(p)
x <- .mapping["x"]
if(!(add %in% c("mean", "median")))
return(p)
compute_center <- switch(add, mean = mean, median = stats::median)
# NO grouping variable
if(.is_empty(grouping.vars)) {
m <- ifelse(add == "mean",
mean(data[, x], na.rm = TRUE),
stats::median(data[, x], na.rm = TRUE))
p <- p + geom_exec(geom_vline, data = data,
xintercept = m, color = color,
linetype = linetype, size = size)
}
# Case of grouping variable
else {
data_sum <- data %>%
group_by(!!!syms(grouping.vars)) %>%
summarise(.center = compute_center(!!sym(x), na.rm = TRUE))
p <- p + geom_exec(geom_vline, data = data_sum,
xintercept = ".center", color = color,
linetype = linetype, size = size)
}
p
}
# Check legend argument
.check_legend <- function(legend){
allowed.values <- c("top", "bottom", "left", "right", "none")
if(is.null(legend) | is.numeric(legend))
return(legend)
else if(is.logical(legend)){
if(legend) legend <- "top"
else legend <- "none"
}
else if(is.character(legend)){
legend <- legend[1]
if(!legend %in% allowed.values)
stop("Argument legend should be one of ", .collapse(allowed.values, sep = ", "))
}
return (legend)
}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/pleura.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.01,family="gaussian",standardize=TRUE)
sink('./pleura_005.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Correlation/pleura/pleura_005.R | no_license | esbgkannan/QSMART | R | false | false | 350 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/pleura.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.01,family="gaussian",standardize=TRUE)
sink('./pleura_005.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
# t student
t.corr <- function(r, sample) {
t <- r * (sqrt(sample - 2)) / sqrt(1 - r^2)
xd <- seq(0.001, 0.999, 0.001)
distr <- qt(xd, df = sample - 2)
significance <- xd[which.min(abs(distr - t))]
out <- list(corr = r, tvalue = t, signif = significance)
return(out)
}
# extract significant for correlation
t.sign.corr <- function(significance, sample, method = "two-tailed", verbose = F) {
if (method == "two-tailed") {
sign <- significance + (1 - significance) / 2
}
if (method == "one-tailed") {
sign <- significance
}
t <- qt(sign, df = sample - 2)
r <- t / (sqrt(sample - 2 + t^2))
if (verbose) {
print(paste("Signifance level is", significance * 100, "% with", method, "distribution"))
print(paste("Corresponding t-Student quantile is", t))
print(paste("Critical Pearson's correlation coefficient is", r))
}
return(r)
}
# function to extract the first significant year (when all the following are significant)
year_mannkendall <- function(onepoint) {
ff <- NA
# remove empty values and set a startyear
onepoint2 <- onepoint[!is.na(onepoint)]
startyear <- 1990
if (length(onepoint2) > 0 & !all(onepoint2 == 0)) {
lastyear <- max(as.numeric(names(onepoint2)))
# apply mannkendall
# dput(onepoint2)
test <- sapply(startyear:lastyear, function(x) {
serie <- onepoint2[which(names(onepoint2) %in% (1951:x))]
if (!all(serie == 0)) {
return(MannKendall(serie)$sl)
} else {
return(NULL)
}
})
names(test) <- startyear:lastyear
check <- which(unlist(test) < 0.05)
# print(check)
# if there is a sequence, and the last value is from the last year, go for it
if (length(check) > 0) {
if (names(check[length(check)]) == as.numeric(names(onepoint2[length(onepoint2)]))) {
if (length(check) == 1) {
ff <- lastyear
} else {
# all the values after the first guess are significant
if (rle(rev(diff(check)))$values[1] == 1) {
lastsequence <- rle(rev(diff(check)))$lengths[1] - 1
ff <- lastyear - lastsequence
}
}
}
}
}
return(ff)
}
# kendall
round.pi <- function(pivalue) {
if (round(pivalue, 2) == 1) {
value <- 0.99
} else if (round(pivalue, 2) == 0.99 | round(pivalue, 2) == 0.98) {
value <- round(pivalue, 2)
} else {
value <- 0.05 * round(round(pivalue, 2) / 0.05)
}
return(value)
}
| /script/routines/statistics.R | permissive | oloapinivad/MiLES | R | false | false | 2,451 | r | # t student
t.corr <- function(r, sample) {
t <- r * (sqrt(sample - 2)) / sqrt(1 - r^2)
xd <- seq(0.001, 0.999, 0.001)
distr <- qt(xd, df = sample - 2)
significance <- xd[which.min(abs(distr - t))]
out <- list(corr = r, tvalue = t, signif = significance)
return(out)
}
# extract significant for correlation
t.sign.corr <- function(significance, sample, method = "two-tailed", verbose = F) {
if (method == "two-tailed") {
sign <- significance + (1 - significance) / 2
}
if (method == "one-tailed") {
sign <- significance
}
t <- qt(sign, df = sample - 2)
r <- t / (sqrt(sample - 2 + t^2))
if (verbose) {
print(paste("Signifance level is", significance * 100, "% with", method, "distribution"))
print(paste("Corresponding t-Student quantile is", t))
print(paste("Critical Pearson's correlation coefficient is", r))
}
return(r)
}
# function to extract the first significant year (when all the following are significant)
year_mannkendall <- function(onepoint) {
ff <- NA
# remove empty values and set a startyear
onepoint2 <- onepoint[!is.na(onepoint)]
startyear <- 1990
if (length(onepoint2) > 0 & !all(onepoint2 == 0)) {
lastyear <- max(as.numeric(names(onepoint2)))
# apply mannkendall
# dput(onepoint2)
test <- sapply(startyear:lastyear, function(x) {
serie <- onepoint2[which(names(onepoint2) %in% (1951:x))]
if (!all(serie == 0)) {
return(MannKendall(serie)$sl)
} else {
return(NULL)
}
})
names(test) <- startyear:lastyear
check <- which(unlist(test) < 0.05)
# print(check)
# if there is a sequence, and the last value is from the last year, go for it
if (length(check) > 0) {
if (names(check[length(check)]) == as.numeric(names(onepoint2[length(onepoint2)]))) {
if (length(check) == 1) {
ff <- lastyear
} else {
# all the values after the first guess are significant
if (rle(rev(diff(check)))$values[1] == 1) {
lastsequence <- rle(rev(diff(check)))$lengths[1] - 1
ff <- lastyear - lastsequence
}
}
}
}
}
return(ff)
}
# kendall
round.pi <- function(pivalue) {
if (round(pivalue, 2) == 1) {
value <- 0.99
} else if (round(pivalue, 2) == 0.99 | round(pivalue, 2) == 0.98) {
value <- round(pivalue, 2)
} else {
value <- 0.05 * round(round(pivalue, 2) / 0.05)
}
return(value)
}
|
library(testthat)
library(freqdom)
test_check("freqdom")
| /tests/testthat/testthat.R | no_license | kidzik/freqdom | R | false | false | 58 | r | library(testthat)
library(freqdom)
test_check("freqdom")
|
get_allchronologies <- function(all_downloads, settings) {
chron_file <- paste0("data/output/chronologies_v",
settings$version, ".rds")
if (!file.exists(chron_file)) {
chronologies <- list()
for (i in 1:length(all_downloads)) {
chronologies[[i]] <- try(get_chroncontrol(all_downloads[[i]]),
silent = TRUE)
flush.console()
if ("try-error" %in% class(chronologies[[i]])) {
chronologies[[i]] <- list(empty = NA)
}
}
saveRDS(chronologies,
file = chron_file)
} else{
chronologies <- readRDS(chron_file)
}
return(chronologies)
}
| /R/get_allchronologies.R | no_license | PalEON-Project/stepps-baconizing | R | false | false | 664 | r | get_allchronologies <- function(all_downloads, settings) {
chron_file <- paste0("data/output/chronologies_v",
settings$version, ".rds")
if (!file.exists(chron_file)) {
chronologies <- list()
for (i in 1:length(all_downloads)) {
chronologies[[i]] <- try(get_chroncontrol(all_downloads[[i]]),
silent = TRUE)
flush.console()
if ("try-error" %in% class(chronologies[[i]])) {
chronologies[[i]] <- list(empty = NA)
}
}
saveRDS(chronologies,
file = chron_file)
} else{
chronologies <- readRDS(chron_file)
}
return(chronologies)
}
|
library(nycflights13)
library(tidyverse)
# ! dplyr overwrites some base functions, so you need to type eg. stats::lag() or stats::filter()
sqrt(2) ^ 2 == 2 # gives FALSE, because these are number approximations
near(sqrt(2) ^ 2, 2)
nov_dec <- filter(flights, month == 11 | month == 12)
nov_dec <- filter(flights, month %in% c(11, 12))
arrange(flights, year, month, day)
select(flights, dep_time, dep_delay, arr_time, arr_delay)
select(flights, starts_with("dep"), starts_with("arr"))
select(flights, ends_with("time"), ends_with("delay"))
select(flights, matches("^(del|arr)_(time|delay)$"))
select(flights, contains("TIME")) # bye default contains() ignores case letters
mutate(flights, gain = dep_delay - arr_delay, speed = distance / air_time * 60,
gain_per_hour = gain / hours) # use transmute() to only keep new columns
# Cumulative functions
cumsum()
cummean()
mutate(flights, min = as.numeric(str_sub(as.character(dep_time), -2, -1)),
hou = as.numeric(str_sub(as.character(dep_time), -4, -3)),
min_from_midnight = hou * 60 + min)
x <- mutate(flights, del_rank = min_rank(desc(dep_delay)))
filter(x, del_rank <= 10)
not_cancelled <- flights %>% filter(!is.na(dep_delay), !is.na(arr_delay))
not_cancelled %>% group_by(dest) %>% summarise(n = n())
not_cancelled %>% group_by(tailnum) %>% summarise(plane_dist = sum(distance))
cancelled_delayed <- flights %>%
mutate(cancelled = is.na(dep_delay) | is.na(arr_delay)) %>%
group_by(year, month, day) %>%
summarise(no_canc = mean(cancelled), mean_del = mean(arr_delay, na.rm = T))
daleyed_carriers <- flights %>%
group_by(carrier) %>%
summarise(mean_delay = mean(cancelled), mean_del = mean(arr_delay, na.rm = T))
worst_carriers <- flights %>%
group_by(carrier) %>%
summarise(mean_delay = mean(arr_delay, na.rm = T)) %>%
arrange(desc(mean_delay)) %>%
slice(1:10)
worst_airports <- flights %>%
group_by(carrier, dest) %>%
summarise(n(), mean = mean(arr_delay, na.rm = T))
worst_plane <- flights %>%
group_by(tailnum) %>%
summarise(mean = mean(arr_delay, na.rm = T)) %>%
arrange(desc(mean))
best_time <- flights %>%
group_by(hour) %>%
summarise(mean = mean(arr_delay, na.rm = T)) %>%
arrange(mean)
delay_for_dest <- flights %>%
group_by(dest) %>%
summarise(total_delay = sum(arr_delay, na.rm = T))
delay_prop <- flights %>%
group_by(dest) %>%
mutate(prop = arr_delay / sum(arr_delay, na.rm = T))
dest_2_carriers <- flights %>%
group_by(dest) %>%
mutate(n_carrier = n_distinct(carrier)) %>%
filter(n_carrier >= 2)
# How many flights of the plane before first delay bigger than 1 hour
before_delay <- flights %>%
arrange(tailnum, year, month, day, hour, minute) %>%
select(tailnum, year, month, day, hour, minute, dep_delay) %>%
group_by(tailnum) %>%
mutate(late = dep_delay > 60) %>%
mutate(before = cumsum(late)) %>%
filter(before < 1) %>%
count(sort = T)
| /5_Data_transformation.R | no_license | Alicja1990/r4ds | R | false | false | 2,937 | r | library(nycflights13)
library(tidyverse)
# ! dplyr overwrites some base functions, so you need to type eg. stats::lag() or stats::filter()
sqrt(2) ^ 2 == 2 # gives FALSE, because these are number approximations
near(sqrt(2) ^ 2, 2)
nov_dec <- filter(flights, month == 11 | month == 12)
nov_dec <- filter(flights, month %in% c(11, 12))
arrange(flights, year, month, day)
select(flights, dep_time, dep_delay, arr_time, arr_delay)
select(flights, starts_with("dep"), starts_with("arr"))
select(flights, ends_with("time"), ends_with("delay"))
select(flights, matches("^(del|arr)_(time|delay)$"))
select(flights, contains("TIME")) # bye default contains() ignores case letters
mutate(flights, gain = dep_delay - arr_delay, speed = distance / air_time * 60,
gain_per_hour = gain / hours) # use transmute() to only keep new columns
# Cumulative functions
cumsum()
cummean()
mutate(flights, min = as.numeric(str_sub(as.character(dep_time), -2, -1)),
hou = as.numeric(str_sub(as.character(dep_time), -4, -3)),
min_from_midnight = hou * 60 + min)
x <- mutate(flights, del_rank = min_rank(desc(dep_delay)))
filter(x, del_rank <= 10)
not_cancelled <- flights %>% filter(!is.na(dep_delay), !is.na(arr_delay))
not_cancelled %>% group_by(dest) %>% summarise(n = n())
not_cancelled %>% group_by(tailnum) %>% summarise(plane_dist = sum(distance))
cancelled_delayed <- flights %>%
mutate(cancelled = is.na(dep_delay) | is.na(arr_delay)) %>%
group_by(year, month, day) %>%
summarise(no_canc = mean(cancelled), mean_del = mean(arr_delay, na.rm = T))
daleyed_carriers <- flights %>%
group_by(carrier) %>%
summarise(mean_delay = mean(cancelled), mean_del = mean(arr_delay, na.rm = T))
worst_carriers <- flights %>%
group_by(carrier) %>%
summarise(mean_delay = mean(arr_delay, na.rm = T)) %>%
arrange(desc(mean_delay)) %>%
slice(1:10)
worst_airports <- flights %>%
group_by(carrier, dest) %>%
summarise(n(), mean = mean(arr_delay, na.rm = T))
worst_plane <- flights %>%
group_by(tailnum) %>%
summarise(mean = mean(arr_delay, na.rm = T)) %>%
arrange(desc(mean))
best_time <- flights %>%
group_by(hour) %>%
summarise(mean = mean(arr_delay, na.rm = T)) %>%
arrange(mean)
delay_for_dest <- flights %>%
group_by(dest) %>%
summarise(total_delay = sum(arr_delay, na.rm = T))
delay_prop <- flights %>%
group_by(dest) %>%
mutate(prop = arr_delay / sum(arr_delay, na.rm = T))
dest_2_carriers <- flights %>%
group_by(dest) %>%
mutate(n_carrier = n_distinct(carrier)) %>%
filter(n_carrier >= 2)
# How many flights of the plane before first delay bigger than 1 hour
before_delay <- flights %>%
arrange(tailnum, year, month, day, hour, minute) %>%
select(tailnum, year, month, day, hour, minute, dep_delay) %>%
group_by(tailnum) %>%
mutate(late = dep_delay > 60) %>%
mutate(before = cumsum(late)) %>%
filter(before < 1) %>%
count(sort = T)
|
#'Generate random deviates shifted Poisson pdf
#'
#'This function generates random deviates from the shifted Poisson distribution.
#'The shift is fixed to 1.
#'@param n the number of random deviates to generate
#'@param lambda vector of positive means (of an ordinary Poisson distribution)
#'@return A vector of shifted Poisson random deviates
#'@export
rshiftpois=function(n,lambda){
out=rpois(n,lambda)+1
return(out)
} | /R/rshiftpois.R | no_license | benaug/move.HMM | R | false | false | 425 | r | #'Generate random deviates shifted Poisson pdf
#'
#'This function generates random deviates from the shifted Poisson distribution.
#'The shift is fixed to 1.
#'@param n the number of random deviates to generate
#'@param lambda vector of positive means (of an ordinary Poisson distribution)
#'@return A vector of shifted Poisson random deviates
#'@export
rshiftpois=function(n,lambda){
out=rpois(n,lambda)+1
return(out)
} |
x = c(3.545, 2.6, 3.245, 3.93, 3.995, 3.115, 3.235, 3.225, 2.44, 3.24, 2.29, 2.5, 4.02)
y = c(30, 32, 30, 24, 26, 30, 33, 27, 37, 32, 37, 34, 26)
data = data.frame(x, y)
varx = var(x)
vary = var(y)
varxy = var(x, y)
cor = varxy / sqrt(varx * vary)
data
cor
plot(x, y, main="Weight vs Fuel Efficiency", xlab="Weight", ylab="Fuel Efficieny", col="red") | /VIT/MAT2001 - Statistics for Engineers/LAB Assessment 2/Q2.R | no_license | namsnath/MiscellaneousCode | R | false | false | 359 | r | x = c(3.545, 2.6, 3.245, 3.93, 3.995, 3.115, 3.235, 3.225, 2.44, 3.24, 2.29, 2.5, 4.02)
y = c(30, 32, 30, 24, 26, 30, 33, 27, 37, 32, 37, 34, 26)
data = data.frame(x, y)
varx = var(x)
vary = var(y)
varxy = var(x, y)
cor = varxy / sqrt(varx * vary)
data
cor
plot(x, y, main="Weight vs Fuel Efficiency", xlab="Weight", ylab="Fuel Efficieny", col="red") |
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
| /cachematrix.R | no_license | paliashivani0/ProgrammingAssignment2 | R | false | false | 807 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
# Bivariate kernel function centered at 0,0 with isotropic bandwidth h
kernel2d <- function(x, y, h) {
1/(2*pi*h*h)*exp(-0.5*(x*x+y*y)/(h*h))
}
# Approximation of definite integral of f over a grid with gridsize dx x dy via summation
dintegral <- function(f, dx, dy) {
sum(f)*dx*dy
}
# Fast fourier transform of a 2D Gaussian based on the continuous FT, where
# sigma is the standard deviation, ds,dt is the time resolution in x,y
# and 2n*2n the number of points
kernel2d_fft <- function(sigma, ds, dt, n) {
kernel_fft <- function(sigma., dt., n.) {
f <- c(0:(n.-1),-n.:-1)*pi*sigma./(n.*dt.)
exp(-0.5*f*f)/dt.
}
fZ.x <- kernel_fft(sigma, ds, n)
fZ.y <- kernel_fft(sigma, dt, n)
fZ.y %*% t(fZ.x)
} | /R/kernel2d.R | no_license | tilmandavies/sparr | R | false | false | 723 | r | # Bivariate kernel function centered at 0,0 with isotropic bandwidth h
kernel2d <- function(x, y, h) {
1/(2*pi*h*h)*exp(-0.5*(x*x+y*y)/(h*h))
}
# Approximation of definite integral of f over a grid with gridsize dx x dy via summation
dintegral <- function(f, dx, dy) {
sum(f)*dx*dy
}
# Fast fourier transform of a 2D Gaussian based on the continuous FT, where
# sigma is the standard deviation, ds,dt is the time resolution in x,y
# and 2n*2n the number of points
kernel2d_fft <- function(sigma, ds, dt, n) {
kernel_fft <- function(sigma., dt., n.) {
f <- c(0:(n.-1),-n.:-1)*pi*sigma./(n.*dt.)
exp(-0.5*f*f)/dt.
}
fZ.x <- kernel_fft(sigma, ds, n)
fZ.y <- kernel_fft(sigma, dt, n)
fZ.y %*% t(fZ.x)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/smoteNew.R
\name{smoteNew}
\alias{smoteNew}
\title{smoteNew is a necessary function that modifies the SMOTE algorithm.}
\usage{
smoteNew(data.x, data.y, K, dup_size = 0, class.to.oversample)
}
\arguments{
\item{data.x}{A data frame or matrix of numeric-attributed dataset}
\item{data.y}{A vector of a target class attribute corresponding to a dataset X}
\item{K}{The number of nearest neighbors during sampling process}
\item{dup_size}{The number or vector representing the desired times of synthetic minority instances over the original number of majority instances}
\item{class.to.oversample}{Class to be oversampled}
}
\description{
smoteNewis a necessary function that modifies the SMOTE algorithm in the following ways: (1) correct bug in original
smotefamily::SMOTE() function and (2) lets the user specifiy which class to be oversampled.
}
\examples{
library(smotefamily)
library(sambia)
data.example = sample_generator(10000,ratio = 0.80)
genData = sambia:::smoteNew(data.example[,-3],data.example[,3],K = 5,class.to.oversample = 'p')
}
\author{
Norbert Krautenbacher, Kevin Strauss, Maximilian Mandl, Christiane Fuchs
}
| /man/smoteNew.Rd | no_license | cran/sambia | R | false | true | 1,211 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/smoteNew.R
\name{smoteNew}
\alias{smoteNew}
\title{smoteNew is a necessary function that modifies the SMOTE algorithm.}
\usage{
smoteNew(data.x, data.y, K, dup_size = 0, class.to.oversample)
}
\arguments{
\item{data.x}{A data frame or matrix of numeric-attributed dataset}
\item{data.y}{A vector of a target class attribute corresponding to a dataset X}
\item{K}{The number of nearest neighbors during sampling process}
\item{dup_size}{The number or vector representing the desired times of synthetic minority instances over the original number of majority instances}
\item{class.to.oversample}{Class to be oversampled}
}
\description{
smoteNewis a necessary function that modifies the SMOTE algorithm in the following ways: (1) correct bug in original
smotefamily::SMOTE() function and (2) lets the user specifiy which class to be oversampled.
}
\examples{
library(smotefamily)
library(sambia)
data.example = sample_generator(10000,ratio = 0.80)
genData = sambia:::smoteNew(data.example[,-3],data.example[,3],K = 5,class.to.oversample = 'p')
}
\author{
Norbert Krautenbacher, Kevin Strauss, Maximilian Mandl, Christiane Fuchs
}
|
### DAVID Functions:
# Change-log:
# V1.0 20-06-2014 Erik Initiation of this file
# V2.0 17-07-2014 Erik also accepting gene lists in "DAVID"
# V2.1 18-07-2014 Erik debug "DAVID"
# V2.2 19-07-2014 Erik also accepting a vector of probe_names in "get.mod.members" for dat & adjust "DAVID"
# V2.3 08-05-2015 Erik Added "sum.DAVID" to have a quick look at the results
## Defining a SubFunction for extracting the probe names of modules:
get.mod.members <- function(net,dat){
require("WGCNA")
require("Biobase")
require("BiocGenerics")
require("parallel")
IND <- split(1:length(net$colors),as.factor(labels2colors(net$colors)))
for(i in 1:length(IND)){
if(is.matrix(dat)){
names(IND[[i]]) <- rownames(dat)[IND[[i]]]
} else {
names(IND[[i]]) <- dat[IND[[i]]]
}
}
return(IND)
}
### Defining a SubFunction for initiating my DAVID analysis:
init.DAVID <- function(){
library("RDAVIDWebService")
Dummi <- try(DAVIDWebService$new(email="e.b.van_den_akker@lumc.nl"),silent=TRUE) # This always gives an error the first time ...
david <- DAVIDWebService$new(email="e.b.van_den_akker@lumc.nl")
return(david)
}
### Defining a SubFunction for adding a gene list to DAVID WebService:
add.foreground.list <- function(con,geneIDs,listName,idType="AFFYMETRIX_3PRIME_IVT_ID"){
# Initiate Result:
Result <- list(succes.upload=FALSE,inDavid=NA,unmappedIds=NA,error.upload=NA,geneIDs=NA)
# Try to add a list:
cat(paste0("Trying to upload: \'",listName,"\' ... "))
Dummi <- try(addList(con,geneIDs,listName=listName,idType=idType,listType="Gene"),silent=TRUE)
# Check for error:
if(is(Dummi,"try-error")){
cat("FAILED! \n")
Result$error.upload <- Dummi
} else {
cat("OK! \n")
Result$succes.upload <- TRUE
Result$inDavid <- Dummi$inDavid
Result$unmappedIds <- Dummi$unmappedIds
Result$geneIDs <- geneIDs
}
return(Result)
}
### Defining a SubFunction for adding a list of background genes to DAVID WebService:
add.background.list <- function(con,geneIDs,listName="Background",idType="AFFYMETRIX_3PRIME_IVT_ID"){
# Initiate Result:
Result <- list(succes=FALSE,inDavid=NA,unMapped=NA,error=NA)
# Try to add a list:
cat(paste0("Trying to upload: \'",listName,"\' ... "))
Dummi <- try(addList(con,geneIDs,listName=listName,idType=idType,listType="Background"),silent=TRUE)
# Check for error:
if(is(Dummi,"try-error")){
cat("FAILED! \n")
Result$error <- Dummi
} else {
cat("OK! \n")
Result$succes <- TRUE
Result$inDavid <- Dummi$inDavid
Result$unmappedIds <- Dummi$unmappedIds
}
return(Result)
}
### Defining a SubFunction for analyzing set listName in DAVID:
analyse.list <- function(con,listName){
# Initiate Result:
Result <- list(succes.analyse=FALSE,error.analyse=NA,enr=NA)
# Check name:
NAMES <- getGeneListNames(con)
if(!(listName %in% NAMES)){
Result$error <- "Genelist ot correctly uploaded"
return(Result)
}
# Set list:
setCurrentGeneListPosition(con,which(NAMES %in% listName))
cat(paste0("Trying to analyse: \'",listName,"\' ... "))
pathTEMP <- tempfile()
Dummi <- try(getFunctionalAnnotationChartFile(con,fileName=pathTEMP),TRUE)
if(is(Dummi,"try-error")){
cat("FAILED! \n")
Result$error.analyse <- Dummi
} else {
cat("OK! \n")
Result$succes.analyse <- TRUE
# Read results & sort:
Dummi <- read.delim(pathTEMP,stringsAsFactors=FALSE,header=TRUE)
Dummi <- Dummi[sort(Dummi$Benjamini,index.return=TRUE)$ix,]
Result$enr <- Dummi
}
return(Result)
}
### Defining a SubFunction for performing an DAVID enrichment analysis:
DAVID <- function(net,dat,idType="AFFYMETRIX_3PRIME_IVT_ID",annot=NULL){
## Network results in WGCNA are not returned in its own object :(
# Hence we need some hacking:
if(is.list(net)){
req.list.names <- c("colors","unmergedColors","MEs","goodSamples","goodGenes","dendrograms","TOMFiles","blockGenes","blocks","MEsOK")
if(all(req.list.names %in% names(net))){
# These are probably network results, thus extract module names and genes:
cat("| Input: network \n")
geneList <- sapply(get.mod.members(net,dat),names)
} else {
# This is probably a list of genes:
cat("| Input: list of genes \n")
geneList <- net
}
}
## Remove possible NA's:
geneList <- sapply(geneList,function(x) x[which(!is.na(x))])
## Initiate:
david <- init.DAVID()
david
## Set Annotation categories:
if(!is.null(annot)){
NAMES <- getAllAnnotationCategoryNames(david)
if(!all(annot %in% NAMES)){
stop("Not all supplied annotation categories were recognized ...")
}
}
setAnnotationCategories(david,annot)
## Upload geneLists:
if(!is.list(geneList)){
geneList <- list(geneList=geneList)
}
cat("=== Uploading gene sets to DAVID ===\n")
Result1 <- lapply(1:length(geneList),function(x){
add.foreground.list(con=david,geneIDs=geneList[[x]],listName=names(geneList)[x],idType=idType)
})
names(Result1) <- names(geneList)
cat("=== DONE! ===\n")
## Upload background:
cat("=== Uploading background set to DAVID ===\n")
if(is.matrix(dat)){
allGenes <- rownames(dat)
} else {
allGenes <- dat
}
Dummi <- add.background.list(con=david,geneIDs=allGenes,idType=idType)
cat("=== DONE! ===\n")
## Analyzing lists:
cat("=== Analyzing succesfully uploaded lists ===\n")
NAMES <- getGeneListNames(david)
Result2 <- lapply(1:length(NAMES),function(x){
analyse.list(con=david,listName=NAMES[x])
})
names(Result2) <- geneList
Result <- lapply(1:length(geneList),function(x) c(Result1[[x]],Result2[[x]]))
names(Result) <- names(geneList)
cat("=== DONE! ===\n")
return(Result)
}
### Defining a SubFunction for writing DAVID output:
write.DAVID <- function(david_res,folderOUT){
## Create folder if not already present:
if(!file.exists(folderOUT)){
Dummi <- dir.create(folderOUT)
}
## Plot a csv per module:
## Defining a helper function:
rewrite.result <- function(i){
mapped <- setdiff(david_res[[i]]$geneIDs,david_res[[i]]$unmappedIds)
mapped <- data.frame(mapped,i,names(david_res)[i],stringsAsFactors=FALSE)
result <- david_res[[i]]$enr
## Make columns even long:
L <- nrow(mapped)-nrow(result)
if(L>0){
Dummi <- data.frame(matrix(data="",ncol=ncol(result),nrow=abs(L)),stringsAsFactors=FALSE)
colnames(Dummi) <- colnames(result)
result <- rbind(result,Dummi)
}
if(L<0){
Dummi <- data.frame(matrix(data="",ncol=ncol(mapped),nrow=abs(L)),stringsAsFactors=FALSE)
colnames(Dummi) <- colnames(mapped)
mapped <- rbind(mapped,Dummi)
}
result <- apply(cbind(mapped,result),1,function(x) paste(x,collapse="\t"))
result <- c(paste(c("Affy","Module","Color","Category","Term","Count","%","PValue","Genes","List Total",
"Pop Hits","Pop Total","Fold Enrichment","Bonferroni","Benjamini","FDR"),collapse="\t"),result)
return(result)
}
pathsOUT <- paste0(folderOUT,"/",names(david_res),".txt")
Dummi <- sapply(1:length(pathsOUT),function(x) writeLines(rewrite.result(x),pathsOUT[x]))
return(TRUE)
}
### Defining a SubFunction for getting a summary of DAVID results:
sum.DAVID <- function(david_res){
return(lapply(david_res,function(x) head(x$enr)[,c("Term","Fold.Enrichment","PValue","Benjamini")]))
} | /RFunctions/DAVID.functions_V2.3.R | no_license | transcriptome-in-transition/Transcriptome-in-transition | R | false | false | 7,113 | r |
### DAVID Functions:
# Change-log:
# V1.0 20-06-2014 Erik Initiation of this file
# V2.0 17-07-2014 Erik also accepting gene lists in "DAVID"
# V2.1 18-07-2014 Erik debug "DAVID"
# V2.2 19-07-2014 Erik also accepting a vector of probe_names in "get.mod.members" for dat & adjust "DAVID"
# V2.3 08-05-2015 Erik Added "sum.DAVID" to have a quick look at the results
## Defining a SubFunction for extracting the probe names of modules:
get.mod.members <- function(net,dat){
require("WGCNA")
require("Biobase")
require("BiocGenerics")
require("parallel")
IND <- split(1:length(net$colors),as.factor(labels2colors(net$colors)))
for(i in 1:length(IND)){
if(is.matrix(dat)){
names(IND[[i]]) <- rownames(dat)[IND[[i]]]
} else {
names(IND[[i]]) <- dat[IND[[i]]]
}
}
return(IND)
}
### Defining a SubFunction for initiating my DAVID analysis:
init.DAVID <- function(){
library("RDAVIDWebService")
Dummi <- try(DAVIDWebService$new(email="e.b.van_den_akker@lumc.nl"),silent=TRUE) # This always gives an error the first time ...
david <- DAVIDWebService$new(email="e.b.van_den_akker@lumc.nl")
return(david)
}
### Defining a SubFunction for adding a gene list to DAVID WebService:
add.foreground.list <- function(con,geneIDs,listName,idType="AFFYMETRIX_3PRIME_IVT_ID"){
# Initiate Result:
Result <- list(succes.upload=FALSE,inDavid=NA,unmappedIds=NA,error.upload=NA,geneIDs=NA)
# Try to add a list:
cat(paste0("Trying to upload: \'",listName,"\' ... "))
Dummi <- try(addList(con,geneIDs,listName=listName,idType=idType,listType="Gene"),silent=TRUE)
# Check for error:
if(is(Dummi,"try-error")){
cat("FAILED! \n")
Result$error.upload <- Dummi
} else {
cat("OK! \n")
Result$succes.upload <- TRUE
Result$inDavid <- Dummi$inDavid
Result$unmappedIds <- Dummi$unmappedIds
Result$geneIDs <- geneIDs
}
return(Result)
}
### Defining a SubFunction for adding a list of background genes to DAVID WebService:
add.background.list <- function(con,geneIDs,listName="Background",idType="AFFYMETRIX_3PRIME_IVT_ID"){
# Initiate Result:
Result <- list(succes=FALSE,inDavid=NA,unMapped=NA,error=NA)
# Try to add a list:
cat(paste0("Trying to upload: \'",listName,"\' ... "))
Dummi <- try(addList(con,geneIDs,listName=listName,idType=idType,listType="Background"),silent=TRUE)
# Check for error:
if(is(Dummi,"try-error")){
cat("FAILED! \n")
Result$error <- Dummi
} else {
cat("OK! \n")
Result$succes <- TRUE
Result$inDavid <- Dummi$inDavid
Result$unmappedIds <- Dummi$unmappedIds
}
return(Result)
}
### Defining a SubFunction for analyzing set listName in DAVID:
analyse.list <- function(con,listName){
# Initiate Result:
Result <- list(succes.analyse=FALSE,error.analyse=NA,enr=NA)
# Check name:
NAMES <- getGeneListNames(con)
if(!(listName %in% NAMES)){
Result$error <- "Genelist ot correctly uploaded"
return(Result)
}
# Set list:
setCurrentGeneListPosition(con,which(NAMES %in% listName))
cat(paste0("Trying to analyse: \'",listName,"\' ... "))
pathTEMP <- tempfile()
Dummi <- try(getFunctionalAnnotationChartFile(con,fileName=pathTEMP),TRUE)
if(is(Dummi,"try-error")){
cat("FAILED! \n")
Result$error.analyse <- Dummi
} else {
cat("OK! \n")
Result$succes.analyse <- TRUE
# Read results & sort:
Dummi <- read.delim(pathTEMP,stringsAsFactors=FALSE,header=TRUE)
Dummi <- Dummi[sort(Dummi$Benjamini,index.return=TRUE)$ix,]
Result$enr <- Dummi
}
return(Result)
}
### Defining a SubFunction for performing an DAVID enrichment analysis:
DAVID <- function(net,dat,idType="AFFYMETRIX_3PRIME_IVT_ID",annot=NULL){
## Network results in WGCNA are not returned in its own object :(
# Hence we need some hacking:
if(is.list(net)){
req.list.names <- c("colors","unmergedColors","MEs","goodSamples","goodGenes","dendrograms","TOMFiles","blockGenes","blocks","MEsOK")
if(all(req.list.names %in% names(net))){
# These are probably network results, thus extract module names and genes:
cat("| Input: network \n")
geneList <- sapply(get.mod.members(net,dat),names)
} else {
# This is probably a list of genes:
cat("| Input: list of genes \n")
geneList <- net
}
}
## Remove possible NA's:
geneList <- sapply(geneList,function(x) x[which(!is.na(x))])
## Initiate:
david <- init.DAVID()
david
## Set Annotation categories:
if(!is.null(annot)){
NAMES <- getAllAnnotationCategoryNames(david)
if(!all(annot %in% NAMES)){
stop("Not all supplied annotation categories were recognized ...")
}
}
setAnnotationCategories(david,annot)
## Upload geneLists:
if(!is.list(geneList)){
geneList <- list(geneList=geneList)
}
cat("=== Uploading gene sets to DAVID ===\n")
Result1 <- lapply(1:length(geneList),function(x){
add.foreground.list(con=david,geneIDs=geneList[[x]],listName=names(geneList)[x],idType=idType)
})
names(Result1) <- names(geneList)
cat("=== DONE! ===\n")
## Upload background:
cat("=== Uploading background set to DAVID ===\n")
if(is.matrix(dat)){
allGenes <- rownames(dat)
} else {
allGenes <- dat
}
Dummi <- add.background.list(con=david,geneIDs=allGenes,idType=idType)
cat("=== DONE! ===\n")
## Analyzing lists:
cat("=== Analyzing succesfully uploaded lists ===\n")
NAMES <- getGeneListNames(david)
Result2 <- lapply(1:length(NAMES),function(x){
analyse.list(con=david,listName=NAMES[x])
})
names(Result2) <- geneList
Result <- lapply(1:length(geneList),function(x) c(Result1[[x]],Result2[[x]]))
names(Result) <- names(geneList)
cat("=== DONE! ===\n")
return(Result)
}
### Defining a SubFunction for writing DAVID output:
write.DAVID <- function(david_res,folderOUT){
## Create folder if not already present:
if(!file.exists(folderOUT)){
Dummi <- dir.create(folderOUT)
}
## Plot a csv per module:
## Defining a helper function:
rewrite.result <- function(i){
mapped <- setdiff(david_res[[i]]$geneIDs,david_res[[i]]$unmappedIds)
mapped <- data.frame(mapped,i,names(david_res)[i],stringsAsFactors=FALSE)
result <- david_res[[i]]$enr
## Make columns even long:
L <- nrow(mapped)-nrow(result)
if(L>0){
Dummi <- data.frame(matrix(data="",ncol=ncol(result),nrow=abs(L)),stringsAsFactors=FALSE)
colnames(Dummi) <- colnames(result)
result <- rbind(result,Dummi)
}
if(L<0){
Dummi <- data.frame(matrix(data="",ncol=ncol(mapped),nrow=abs(L)),stringsAsFactors=FALSE)
colnames(Dummi) <- colnames(mapped)
mapped <- rbind(mapped,Dummi)
}
result <- apply(cbind(mapped,result),1,function(x) paste(x,collapse="\t"))
result <- c(paste(c("Affy","Module","Color","Category","Term","Count","%","PValue","Genes","List Total",
"Pop Hits","Pop Total","Fold Enrichment","Bonferroni","Benjamini","FDR"),collapse="\t"),result)
return(result)
}
pathsOUT <- paste0(folderOUT,"/",names(david_res),".txt")
Dummi <- sapply(1:length(pathsOUT),function(x) writeLines(rewrite.result(x),pathsOUT[x]))
return(TRUE)
}
### Defining a SubFunction for getting a summary of DAVID results:
sum.DAVID <- function(david_res){
return(lapply(david_res,function(x) head(x$enr)[,c("Term","Fold.Enrichment","PValue","Benjamini")]))
} |
EveryBarState = R6Class('EveryBarState',
public = list(
curhigh = -Inf,
curlow = Inf,
#newhighflag = F
update = function(d)
{
high = as.numeric(d$High)
low = as.numeric(d$Low)
if(high > self$curhigh)
{
self$curhigh = high
}
if(low < self$curlow)
{
self$curlow = low
}
}
)
)
NbarState = R6Class('nbarstate',
public=list(
judge = Judge$new(),
upcount = 0,
downcount = 0,
update = function(d){
if(self$judge$is_up(d))
{
self$upcount = self$upcount+1
self$downcount = 0
}
else if(self$judge$is_down(d))
{
self$upcount = 0
self$downcount = self$downcount+1
}
else if(self$judge$is_cross(d))
{
self$upcount = 0
self$downcount = 0
}
}
))
nbar_strategy = function(d,position,nbarstate,losspoint=10,winpoint=10,n=3,pred)
{
time = as.character(index(d))
open = as.numeric(d$Open)
sma = as.numeric(d$sma)
curpostion = position
high = as.numeric(d$High)
low = as.numeric(d$Low)
atr = trunc(as.numeric(d$atr)) * 2
len = nrow(pred)
prehigh = as.numeric(pred[len,]$High)+1
prelow = as.numeric(pred[len,]$Low)-1
long_base = max(as.numeric(pred$High)) + 1
short_base = min(as.numeric(pred$Low)) - 1
#line = trunc((prehigh+prelow)/2)
# line = 15
if(nbarstate$upcount == n && high > prehigh)
{
barstate = EveryBarState$new()
movefixpoints = MoveFixPoints$new(barstate)
#openbuy()
op = ifelse(open > prehigh,open,prehigh)
#op = op + 1
stoploss = ifelse((open-short_base)>30,(op-30),short_base)#op - losspoint #op - losspoint##prelow# op - losspoint
stopwin = op + winpoint
r = data.frame(opentime=time,closetime=NA,open=op,close=NA,stopwin=stopwin,stoploss=stoploss,type='long',exittype='')
trade = Trade$new(r,stopwin=NULL,stoploss=defaultstoploss,movestop=movefixpoints)
curpostion$add(trade)
}
if(nbarstate$downcount == n && low < prelow )
{
#opensell()
barstate = EveryBarState$new()
movefixpoints = MoveFixPoints$new(barstate)
op = ifelse(open < prelow,open,prelow)
# op = op - 1
stoploss = ifelse((long_base-open)>30,(op+30),long_base)#long_base#op + losspoint#+ losspoint#line+1#prehigh#op + losspoint
stopwin = op - winpoint
r = data.frame(opentime=time,closetime=NA,open=op,close=NA,stopwin=stopwin,stoploss=stoploss,type='short',exittype='')
trade = Trade$new(r,stopwin=NULL,stoploss=defaultstoploss,movestop=movefixpoints)
curpostion$add(trade)
}
return(curpostion)
}
| /src/strategy/shock/nbarclass.R | no_license | zhurui1351/alpha | R | false | false | 3,534 | r |
EveryBarState = R6Class('EveryBarState',
public = list(
curhigh = -Inf,
curlow = Inf,
#newhighflag = F
update = function(d)
{
high = as.numeric(d$High)
low = as.numeric(d$Low)
if(high > self$curhigh)
{
self$curhigh = high
}
if(low < self$curlow)
{
self$curlow = low
}
}
)
)
NbarState = R6Class('nbarstate',
public=list(
judge = Judge$new(),
upcount = 0,
downcount = 0,
update = function(d){
if(self$judge$is_up(d))
{
self$upcount = self$upcount+1
self$downcount = 0
}
else if(self$judge$is_down(d))
{
self$upcount = 0
self$downcount = self$downcount+1
}
else if(self$judge$is_cross(d))
{
self$upcount = 0
self$downcount = 0
}
}
))
nbar_strategy = function(d,position,nbarstate,losspoint=10,winpoint=10,n=3,pred)
{
time = as.character(index(d))
open = as.numeric(d$Open)
sma = as.numeric(d$sma)
curpostion = position
high = as.numeric(d$High)
low = as.numeric(d$Low)
atr = trunc(as.numeric(d$atr)) * 2
len = nrow(pred)
prehigh = as.numeric(pred[len,]$High)+1
prelow = as.numeric(pred[len,]$Low)-1
long_base = max(as.numeric(pred$High)) + 1
short_base = min(as.numeric(pred$Low)) - 1
#line = trunc((prehigh+prelow)/2)
# line = 15
if(nbarstate$upcount == n && high > prehigh)
{
barstate = EveryBarState$new()
movefixpoints = MoveFixPoints$new(barstate)
#openbuy()
op = ifelse(open > prehigh,open,prehigh)
#op = op + 1
stoploss = ifelse((open-short_base)>30,(op-30),short_base)#op - losspoint #op - losspoint##prelow# op - losspoint
stopwin = op + winpoint
r = data.frame(opentime=time,closetime=NA,open=op,close=NA,stopwin=stopwin,stoploss=stoploss,type='long',exittype='')
trade = Trade$new(r,stopwin=NULL,stoploss=defaultstoploss,movestop=movefixpoints)
curpostion$add(trade)
}
if(nbarstate$downcount == n && low < prelow )
{
#opensell()
barstate = EveryBarState$new()
movefixpoints = MoveFixPoints$new(barstate)
op = ifelse(open < prelow,open,prelow)
# op = op - 1
stoploss = ifelse((long_base-open)>30,(op+30),long_base)#long_base#op + losspoint#+ losspoint#line+1#prehigh#op + losspoint
stopwin = op - winpoint
r = data.frame(opentime=time,closetime=NA,open=op,close=NA,stopwin=stopwin,stoploss=stoploss,type='short',exittype='')
trade = Trade$new(r,stopwin=NULL,stoploss=defaultstoploss,movestop=movefixpoints)
curpostion$add(trade)
}
return(curpostion)
}
|
#Construct data.frame of simulation values to use for R simulations
library(data.table)
def_wd <- "/Users/Scott/Documents/Dissertation/Paper1/SimulationCode/RSimulationCode"
setwd(def_wd)
RSimVals <- data.table(nsamp = 10, ncondlevels = 2, condEffSizeStr = "c(1,1)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1)
RSimVals <- rbind(RSimVals, data.table(nsamp = 100, ncondlevels = 2, condEffSizeStr = "c(1,1)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 250, ncondlevels = 2, condEffSizeStr = "c(1,1)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 10, ncondlevels = 2, condEffSizeStr = "c(1,1.025)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 100, ncondlevels = 2, condEffSizeStr = "c(1,1.025)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 250, ncondlevels = 2, condEffSizeStr = "c(1,1.025)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 10, ncondlevels = 2, condEffSizeStr = "c(1,1.05)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 100, ncondlevels = 2, condEffSizeStr = "c(1,1.05)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 250, ncondlevels = 2, condEffSizeStr = "c(1,1.05)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 10, ncondlevels = 2, condEffSizeStr = "c(1,1.10)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 100, ncondlevels = 2, condEffSizeStr = "c(1,1.10)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 250, ncondlevels = 2, condEffSizeStr = "c(1,1.10)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 10, ncondlevels = 2, condEffSizeStr = "c(1,1.25)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 100, ncondlevels = 2, condEffSizeStr = "c(1,1.25)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 250, ncondlevels = 2, condEffSizeStr = "c(1,1.25)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 10, ncondlevels = 2, condEffSizeStr = "c(1,1.50)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 100, ncondlevels = 2, condEffSizeStr = "c(1,1.50)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 250, ncondlevels = 2, condEffSizeStr = "c(1,1.50)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 10, ncondlevels = 2, condEffSizeStr = "c(1,2.00)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 100, ncondlevels = 2, condEffSizeStr = "c(1,2.00)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 250, ncondlevels = 2, condEffSizeStr = "c(1,2.00)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
curr_nrow <- nrow(RSimVals)
nsimblocks <- 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 1.50
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 2
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 4
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 10
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 20
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "BetweenCovMultFactor"] <- 1.50
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "BetweenCovMultFactor"] <- 2
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "BetweenCovMultFactor"] <- 4
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "BetweenCovMultFactor"] <- 10
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "BetweenCovMultFactor"] <- 20
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 0.50
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 0.25
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 0.10
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 0.05
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 0.01
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 0
nsimblocks <- nsimblocks + 1
#Add simulations for 26 and 50 samples
#Add these new nsamp values below below the other simulation values to make sure the results saved for lower number correspond
#properly to how they were run
#Need to split the RSimsVals because the cluster is unable to accept array values greater than 40000
sub_all_sims <- RSimVals[seq(1, nrow(RSimVals), by = 3),]
sub_all_sims2 <- sub_all_sims
sub_all_sims3 <- sub_all_sims
sub_all_sims$nsamp <- 26
sub_all_sims2$nsamp <- 50
sub_all_sims3$nsamp <- 80
RSimVals <- rbind(RSimVals, sub_all_sims, sub_all_sims2, sub_all_sims3)
#Now, add results for effect sizes c(1,1.375) and c(1,1.75)
#Want to calculate these results for every scenario mentioned above, so subset to other unique combinations of this
t2 <- subset(RSimVals, RSimVals$condEffSizeStr=="c(1,1)")
t3 <- t2
t2$condEffSizeStr <- "c(1,1.375)"
t3$condEffSizeStr <- "c(1,1.75)"
RSimVals <- rbind(RSimVals, t2, t3)
RSimVals$UniqString <- paste0("nsamp", RSimVals$nsamp, "condEffSizeStr", RSimVals$condEffSizeStr,
"GenWithMeasError", RSimVals$GenWithMeasError, "MeasErrorMultFactor", RSimVals$MeasErrorMultFactor,
"BetweenCovMultFactor", RSimVals$BetweenCovMultFactor)
RSimVals$SimVal <- 1:nrow(RSimVals)
RSimVals$StartSeed <- RSimVals$SimVal * 1e6
save(RSimVals, file = "RSimVals.RData", version = 2)
#curr_nrow <- nrow(RSimVals)
#RSimVals <- rbind(RSimVals, RSimVals)
#RSimVals[((1):(curr_nrow)), "GenWithMeasError"] <- TRUE
| /RSimulationCode/ConstructRSimulationValues.R | no_license | skvanburen/CompDTUPaperCode | R | false | false | 8,393 | r | #Construct data.frame of simulation values to use for R simulations
library(data.table)
def_wd <- "/Users/Scott/Documents/Dissertation/Paper1/SimulationCode/RSimulationCode"
setwd(def_wd)
RSimVals <- data.table(nsamp = 10, ncondlevels = 2, condEffSizeStr = "c(1,1)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1)
RSimVals <- rbind(RSimVals, data.table(nsamp = 100, ncondlevels = 2, condEffSizeStr = "c(1,1)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 250, ncondlevels = 2, condEffSizeStr = "c(1,1)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 10, ncondlevels = 2, condEffSizeStr = "c(1,1.025)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 100, ncondlevels = 2, condEffSizeStr = "c(1,1.025)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 250, ncondlevels = 2, condEffSizeStr = "c(1,1.025)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 10, ncondlevels = 2, condEffSizeStr = "c(1,1.05)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 100, ncondlevels = 2, condEffSizeStr = "c(1,1.05)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 250, ncondlevels = 2, condEffSizeStr = "c(1,1.05)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 10, ncondlevels = 2, condEffSizeStr = "c(1,1.10)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 100, ncondlevels = 2, condEffSizeStr = "c(1,1.10)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 250, ncondlevels = 2, condEffSizeStr = "c(1,1.10)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 10, ncondlevels = 2, condEffSizeStr = "c(1,1.25)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 100, ncondlevels = 2, condEffSizeStr = "c(1,1.25)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 250, ncondlevels = 2, condEffSizeStr = "c(1,1.25)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 10, ncondlevels = 2, condEffSizeStr = "c(1,1.50)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 100, ncondlevels = 2, condEffSizeStr = "c(1,1.50)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 250, ncondlevels = 2, condEffSizeStr = "c(1,1.50)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 10, ncondlevels = 2, condEffSizeStr = "c(1,2.00)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 100, ncondlevels = 2, condEffSizeStr = "c(1,2.00)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
RSimVals <- rbind(RSimVals, data.table(nsamp = 250, ncondlevels = 2, condEffSizeStr = "c(1,2.00)", GenWithMeasError = TRUE, MeasErrorMultFactor = 1, BetweenCovMultFactor = 1))
curr_nrow <- nrow(RSimVals)
nsimblocks <- 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 1.50
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 2
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 4
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 10
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 20
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "BetweenCovMultFactor"] <- 1.50
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "BetweenCovMultFactor"] <- 2
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "BetweenCovMultFactor"] <- 4
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "BetweenCovMultFactor"] <- 10
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "BetweenCovMultFactor"] <- 20
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 0.50
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 0.25
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 0.10
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 0.05
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 0.01
nsimblocks <- nsimblocks + 1
RSimVals <- rbind(RSimVals, RSimVals[((1):(curr_nrow)),])
RSimVals[((nsimblocks*curr_nrow +1):((nsimblocks + 1)*curr_nrow)), "MeasErrorMultFactor"] <- 0
nsimblocks <- nsimblocks + 1
#Add simulations for 26 and 50 samples
#Add these new nsamp values below below the other simulation values to make sure the results saved for lower number correspond
#properly to how they were run
#Need to split the RSimsVals because the cluster is unable to accept array values greater than 40000
sub_all_sims <- RSimVals[seq(1, nrow(RSimVals), by = 3),]
sub_all_sims2 <- sub_all_sims
sub_all_sims3 <- sub_all_sims
sub_all_sims$nsamp <- 26
sub_all_sims2$nsamp <- 50
sub_all_sims3$nsamp <- 80
RSimVals <- rbind(RSimVals, sub_all_sims, sub_all_sims2, sub_all_sims3)
#Now, add results for effect sizes c(1,1.375) and c(1,1.75)
#Want to calculate these results for every scenario mentioned above, so subset to other unique combinations of this
t2 <- subset(RSimVals, RSimVals$condEffSizeStr=="c(1,1)")
t3 <- t2
t2$condEffSizeStr <- "c(1,1.375)"
t3$condEffSizeStr <- "c(1,1.75)"
RSimVals <- rbind(RSimVals, t2, t3)
RSimVals$UniqString <- paste0("nsamp", RSimVals$nsamp, "condEffSizeStr", RSimVals$condEffSizeStr,
"GenWithMeasError", RSimVals$GenWithMeasError, "MeasErrorMultFactor", RSimVals$MeasErrorMultFactor,
"BetweenCovMultFactor", RSimVals$BetweenCovMultFactor)
RSimVals$SimVal <- 1:nrow(RSimVals)
RSimVals$StartSeed <- RSimVals$SimVal * 1e6
save(RSimVals, file = "RSimVals.RData", version = 2)
#curr_nrow <- nrow(RSimVals)
#RSimVals <- rbind(RSimVals, RSimVals)
#RSimVals[((1):(curr_nrow)), "GenWithMeasError"] <- TRUE
|
test_that("prediction_spd Works", {
p <- diag(rep(1,3)) # 3x3 SPD matrix
V <- repmat(p, 5) # N=5 coefficients V
X <- matrix(c(1,2,3,4,5,6,7,8,9,1,2,3,4,5,6)/10, nrow=3) # rows = Xi components (3), cols = number of covariates (5)
expect_equal(class(prediction_spd(p = p,V = V, X = X)), "array")
})
| /tests/testthat/test-prediction_spd.R | permissive | zhangzjjjjjj/MGLMRiem | R | false | false | 308 | r | test_that("prediction_spd Works", {
p <- diag(rep(1,3)) # 3x3 SPD matrix
V <- repmat(p, 5) # N=5 coefficients V
X <- matrix(c(1,2,3,4,5,6,7,8,9,1,2,3,4,5,6)/10, nrow=3) # rows = Xi components (3), cols = number of covariates (5)
expect_equal(class(prediction_spd(p = p,V = V, X = X)), "array")
})
|
# Define UI for application that plots random distributions
#get About panel
tabPanelAbout <- source("about.r")$value
shinyUI(pageWithSidebar(
# Application title
headerPanel("NADA and Substitution Methods - Simulator"),
# Sidebar with a slider input for number of observations
sidebarPanel(
selectInput("distribution", "Distribution Type:", c("Log-Normal", "Gamma", "Normal")),
# This UI limits the reactivity to only this button and changing the distribution
actionButton("myValue1", "Select Distribution and RUN Sim"),
numericInput("obs",
"Number of observations:",
min = 0,
max = 100,
value = 50),
# IF LN this is the input values
conditionalPanel(
condition="input.distribution == 'Log-Normal'",
numericInput("log_sd",
"SD of Logs:",
min=0.1,
max=2.5,
value=1,
step=0.5
),
numericInput("log_mu",
"Mean of Logs:",
min=0.0,
max=10,
value=0.5,
step=0.5
)),
# IF GAMMA this is the input values
conditionalPanel(
condition="input.distribution == 'Gamma'",
numericInput("shape",
"Shape:",
min=0.1,
max=4,
value=1,
step=0.5
),
numericInput("scale",
"Scale:",
min=1,
max=200,
value=100,
step=10
)),
# IF NORMAL this is the input values
conditionalPanel(
condition="input.distribution == 'Normal'",
numericInput("sd",
"Std.Dev:",
min=0.1,
max=20,
value=1,
step=0.5
),
numericInput("mu",
"Mu:",
min=-10,
max=10,
value=10,
step=1
)),
# This ends the conditional input, we now input our censoring information
numericInput("cenRate1",
"Censoring Quantile 1:",
min=0,
max=1,
value=0.5,
step=0.1
),
# NOTE the weights are a relative weighting
sliderInput("cenWeight1",
"Weight of censoring value 1:",
min = 0,
max = 100,
value = 33
),
numericInput("cenRate2",
"Censoring quantile 2:",
min=0,
max=1,
value= 0.1,
step=0.1
),
sliderInput("cenWeight2",
"Weight of censoring quantile 2:",
min = 0,
max = 100,
value = 33
),
numericInput("cenRate3",
"Censoring quantile 3:",
min=0,
max=1,
value=0.1
),
sliderInput("cenWeight3",
"Weight of censoring value 3:",
min=0,
max=100,
value= 33
),
numericInput("simNum",
"Number of Simulated Samples:",
min=1,
max=1000,
value=100,
step=100
),
# this plots reactively the desnity plot for the proposed distribution and will be reactive to changes in theta values
plotOutput("distGraph")
),
# This is output of the simulation
mainPanel(
# we create tabs - plots, summary, RMSE, Bias, and Censored Statistical Summary
tabsetPanel(
tabPanel("Plots",
plotOutput("Meangraph"),
plotOutput("SDgraph")
),
tabPanel("Summary",
verbatimTextOutput("summary"),
h3("Mean Values"),
tableOutput("Meanview"),
h3("Standard Deviation Values"),
tableOutput("SDview")
),
tabPanel("RMSE Plot",
plotOutput("RMSEplot.mean"),
plotOutput("RMSEplot.sd")
),
tabPanel("Bias Plot",
plotOutput("Bplot.mean"),
plotOutput("Bplot.sd")
),
tabPanel("Censored Statistics Summary",
h2("Summary Statistics"),
tableOutput("censum.all"),
h2("Censoring Limits"),
tableOutput("limits.all")
),
tabPanelAbout(), id = "allPanels")
)
)
) | /ui.R | permissive | YoJimboDurant/shiny_NADA_test | R | false | false | 4,730 | r | # Define UI for application that plots random distributions
#get About panel
tabPanelAbout <- source("about.r")$value
shinyUI(pageWithSidebar(
# Application title
headerPanel("NADA and Substitution Methods - Simulator"),
# Sidebar with a slider input for number of observations
sidebarPanel(
selectInput("distribution", "Distribution Type:", c("Log-Normal", "Gamma", "Normal")),
# This UI limits the reactivity to only this button and changing the distribution
actionButton("myValue1", "Select Distribution and RUN Sim"),
numericInput("obs",
"Number of observations:",
min = 0,
max = 100,
value = 50),
# IF LN this is the input values
conditionalPanel(
condition="input.distribution == 'Log-Normal'",
numericInput("log_sd",
"SD of Logs:",
min=0.1,
max=2.5,
value=1,
step=0.5
),
numericInput("log_mu",
"Mean of Logs:",
min=0.0,
max=10,
value=0.5,
step=0.5
)),
# IF GAMMA this is the input values
conditionalPanel(
condition="input.distribution == 'Gamma'",
numericInput("shape",
"Shape:",
min=0.1,
max=4,
value=1,
step=0.5
),
numericInput("scale",
"Scale:",
min=1,
max=200,
value=100,
step=10
)),
# IF NORMAL this is the input values
conditionalPanel(
condition="input.distribution == 'Normal'",
numericInput("sd",
"Std.Dev:",
min=0.1,
max=20,
value=1,
step=0.5
),
numericInput("mu",
"Mu:",
min=-10,
max=10,
value=10,
step=1
)),
# This ends the conditional input, we now input our censoring information
numericInput("cenRate1",
"Censoring Quantile 1:",
min=0,
max=1,
value=0.5,
step=0.1
),
# NOTE the weights are a relative weighting
sliderInput("cenWeight1",
"Weight of censoring value 1:",
min = 0,
max = 100,
value = 33
),
numericInput("cenRate2",
"Censoring quantile 2:",
min=0,
max=1,
value= 0.1,
step=0.1
),
sliderInput("cenWeight2",
"Weight of censoring quantile 2:",
min = 0,
max = 100,
value = 33
),
numericInput("cenRate3",
"Censoring quantile 3:",
min=0,
max=1,
value=0.1
),
sliderInput("cenWeight3",
"Weight of censoring value 3:",
min=0,
max=100,
value= 33
),
numericInput("simNum",
"Number of Simulated Samples:",
min=1,
max=1000,
value=100,
step=100
),
# this plots reactively the desnity plot for the proposed distribution and will be reactive to changes in theta values
plotOutput("distGraph")
),
# This is output of the simulation
mainPanel(
# we create tabs - plots, summary, RMSE, Bias, and Censored Statistical Summary
tabsetPanel(
tabPanel("Plots",
plotOutput("Meangraph"),
plotOutput("SDgraph")
),
tabPanel("Summary",
verbatimTextOutput("summary"),
h3("Mean Values"),
tableOutput("Meanview"),
h3("Standard Deviation Values"),
tableOutput("SDview")
),
tabPanel("RMSE Plot",
plotOutput("RMSEplot.mean"),
plotOutput("RMSEplot.sd")
),
tabPanel("Bias Plot",
plotOutput("Bplot.mean"),
plotOutput("Bplot.sd")
),
tabPanel("Censored Statistics Summary",
h2("Summary Statistics"),
tableOutput("censum.all"),
h2("Censoring Limits"),
tableOutput("limits.all")
),
tabPanelAbout(), id = "allPanels")
)
)
) |
library(fRLR)
### Name: fRLR-package
### Title: A short title line describing what the package does
### Aliases: fRLR-package fRLR
### Keywords: package
### ** Examples
## Not run:
##D ## Optional simple examples of the most important functions
##D ## These can be in \dontrun{} and \donttest{} blocks.
##D
## End(Not run)
| /data/genthat_extracted_code/fRLR/examples/fRLR-package.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 349 | r | library(fRLR)
### Name: fRLR-package
### Title: A short title line describing what the package does
### Aliases: fRLR-package fRLR
### Keywords: package
### ** Examples
## Not run:
##D ## Optional simple examples of the most important functions
##D ## These can be in \dontrun{} and \donttest{} blocks.
##D
## End(Not run)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/utilities.R
\name{rnorm_tcont}
\alias{rnorm_tcont}
\title{Generate contaminated normally distributed data}
\usage{
rnorm_tcont(n, mean = 0, sd = 1, cprop = 0.1, csd = 3)
}
\arguments{
\item{n}{number of observations}
\item{mean}{mean value}
\item{sd}{standard deviation}
\item{cprop}{proportion of contaminated samples}
\item{csd}{standard deviation of contaminated samples}
}
\value{
vector with \code{n} pseudo random numbers
}
\description{
Simulate from a tail contaminated normal distribution
}
\author{
Florian Klinglmueller
}
| /man/rnorm_tcont.Rd | no_license | livioivil/resamplingMCP | R | false | false | 624 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/utilities.R
\name{rnorm_tcont}
\alias{rnorm_tcont}
\title{Generate contaminated normally distributed data}
\usage{
rnorm_tcont(n, mean = 0, sd = 1, cprop = 0.1, csd = 3)
}
\arguments{
\item{n}{number of observations}
\item{mean}{mean value}
\item{sd}{standard deviation}
\item{cprop}{proportion of contaminated samples}
\item{csd}{standard deviation of contaminated samples}
}
\value{
vector with \code{n} pseudo random numbers
}
\description{
Simulate from a tail contaminated normal distribution
}
\author{
Florian Klinglmueller
}
|
\name{dat.plourde}
\alias{dat.plourde}
\docType{data}
\title{
A Meta-Analysis for Comparing the Fluoroscopy Time in Percutaneous Coronary Intervention Between Radial and Femoral Accesses
}
\description{
This dataset serves as an example of meta-analysis of mean differences.
}
\usage{data("dat.plourde")}
\format{
A data frame with 19 studies with the following 5 variables within each study.
\describe{
\item{\code{y}}{point esimates of mean differences.}
\item{\code{s2}}{sample variances of mean differences.}
\item{\code{n1}}{sample sizes in treatment group 1 (radial).}
\item{\code{n2}}{sample sizes in treatment group 2 (femoral).}
\item{\code{n}}{total sample sizes.}
}
}
\source{
Plourde G, Pancholy SB, Nolan J, Jolly S, Rao SV, Amhed I, Bangalore S, Patel T, Dahm JB, Bertrand OF (2015). "Radiation exposure in relation to the arterial access site used for diagnostic coronary angiography and percutaneous coronary intervention: a systematic review and meta-analysis." \emph{Lancet}, \bold{386}(10009), 2192--2203. <\doi{10.1016/S0140-6736(15)00305-0}>
}
\keyword{dataset} | /man/dat.plourde.Rd | no_license | cran/altmeta | R | false | false | 1,131 | rd | \name{dat.plourde}
\alias{dat.plourde}
\docType{data}
\title{
A Meta-Analysis for Comparing the Fluoroscopy Time in Percutaneous Coronary Intervention Between Radial and Femoral Accesses
}
\description{
This dataset serves as an example of meta-analysis of mean differences.
}
\usage{data("dat.plourde")}
\format{
A data frame with 19 studies with the following 5 variables within each study.
\describe{
\item{\code{y}}{point esimates of mean differences.}
\item{\code{s2}}{sample variances of mean differences.}
\item{\code{n1}}{sample sizes in treatment group 1 (radial).}
\item{\code{n2}}{sample sizes in treatment group 2 (femoral).}
\item{\code{n}}{total sample sizes.}
}
}
\source{
Plourde G, Pancholy SB, Nolan J, Jolly S, Rao SV, Amhed I, Bangalore S, Patel T, Dahm JB, Bertrand OF (2015). "Radiation exposure in relation to the arterial access site used for diagnostic coronary angiography and percutaneous coronary intervention: a systematic review and meta-analysis." \emph{Lancet}, \bold{386}(10009), 2192--2203. <\doi{10.1016/S0140-6736(15)00305-0}>
}
\keyword{dataset} |
#'
generateLearningCurve = function(lrn, task, test.size = 0.3, test.inds = NULL,
n.seq = seq(0.1, 1, by = 0.1), measures, repls = 1L) {
l = length(lrn)
for (i in 1:l) {
lrn[[i]] = checkLearner(lrn[[i]])
}
# assertClass(task, "Task")
n = task$task.desc$size
# assertNumeric(n.seq, min.len = 2L, any.missing = FALSE)
if (is.null(test.inds)) {
test.inds = makeResampleInstance("Holdout", task = task, split = test.size)$train[[1L]]
} else {
test.inds = asInteger(test.inds)
}
measures = checkMeasures(measures, task)
# repls = asInt(repls, lower = 1L)
k = length(n.seq)
inds.all = setdiff(1:n, test.inds)
perfs = replicate(l, array(NA, dim = c(repls, k, length(measures))),
simplify = FALSE)
lrn.names = unlist(lapply(lrn, function(x) x$short.name))
measure.names = sapply(measures, measureAggrName)
names(perfs) = lrn.names
for (i in 1:l) {
dimnames(perfs[[i]]) = list(1:repls, n.seq, measure.names)
}
n.obs = numeric(length(n.seq))
for (repl in 1:repls) {
# inds = sample(n.seq[1L])
m.last = 0
rest = inds.all
inds.last = integer(0L)
for (j in 1:k) {
m = n.seq[j]
more = (m - m.last) * (n - length(test.inds))
inds.new = sample(rest, more)
inds.cur = c(inds.last, inds.new)
if (repl == 1) {
n.obs[j] = length(inds.cur)
}
for (i in 1:l) {
mod = train(lrn[[i]], task, subset = inds.cur)
pred = predict(mod, task, subset = test.inds)
perfs[[i]][repl, j, ] = performance(pred, task = task, measures = measures)
}
m.last = m
inds.last = inds.cur
}
}
res = data.frame()
for (j in 1:l) {
for (i in 1:length(measures)) {
new = data.frame(n.obs = n.obs, perfs = colMeans(perfs[[j]])[,i],
measure = measure.names[i],
learner = lrn.names[j])
res = rbind(res, new)
}
}
return(res)
}
plotLearningCurve = function(res) {
library(ggplot2)
ggplot(res, aes(x = n.obs, y = perfs, colour = learner)) + layer(geom = "point") +
layer(geom = "line") + facet_wrap(~measure)
}
# r1 = generateLearningCurve(lrn = list("classif.rpart", "classif.lda", "classif.knn"),
# task = iris.task, measures = list(mmce, acc), repls = 12L)
# plotLearningCurve(r1)
# r2 = generateLearningCurve(lrn = list("classif.rpart", "classif.knn", "classif.naiveBayes",
# "classif.svm", "classif.plr", "classif.randomForest"),
# task = sonar.task, test.size = 0.25, n.seq = seq(0.2, 1, by = 0.2),
# measures = list(tp, fp, tn, fn), repls = 6L)
# plotLearningCurve(r2)
| /todo-files/generateLearningCurve.R | no_license | narayana1208/mlr | R | false | false | 2,740 | r |
#'
generateLearningCurve = function(lrn, task, test.size = 0.3, test.inds = NULL,
n.seq = seq(0.1, 1, by = 0.1), measures, repls = 1L) {
l = length(lrn)
for (i in 1:l) {
lrn[[i]] = checkLearner(lrn[[i]])
}
# assertClass(task, "Task")
n = task$task.desc$size
# assertNumeric(n.seq, min.len = 2L, any.missing = FALSE)
if (is.null(test.inds)) {
test.inds = makeResampleInstance("Holdout", task = task, split = test.size)$train[[1L]]
} else {
test.inds = asInteger(test.inds)
}
measures = checkMeasures(measures, task)
# repls = asInt(repls, lower = 1L)
k = length(n.seq)
inds.all = setdiff(1:n, test.inds)
perfs = replicate(l, array(NA, dim = c(repls, k, length(measures))),
simplify = FALSE)
lrn.names = unlist(lapply(lrn, function(x) x$short.name))
measure.names = sapply(measures, measureAggrName)
names(perfs) = lrn.names
for (i in 1:l) {
dimnames(perfs[[i]]) = list(1:repls, n.seq, measure.names)
}
n.obs = numeric(length(n.seq))
for (repl in 1:repls) {
# inds = sample(n.seq[1L])
m.last = 0
rest = inds.all
inds.last = integer(0L)
for (j in 1:k) {
m = n.seq[j]
more = (m - m.last) * (n - length(test.inds))
inds.new = sample(rest, more)
inds.cur = c(inds.last, inds.new)
if (repl == 1) {
n.obs[j] = length(inds.cur)
}
for (i in 1:l) {
mod = train(lrn[[i]], task, subset = inds.cur)
pred = predict(mod, task, subset = test.inds)
perfs[[i]][repl, j, ] = performance(pred, task = task, measures = measures)
}
m.last = m
inds.last = inds.cur
}
}
res = data.frame()
for (j in 1:l) {
for (i in 1:length(measures)) {
new = data.frame(n.obs = n.obs, perfs = colMeans(perfs[[j]])[,i],
measure = measure.names[i],
learner = lrn.names[j])
res = rbind(res, new)
}
}
return(res)
}
plotLearningCurve = function(res) {
library(ggplot2)
ggplot(res, aes(x = n.obs, y = perfs, colour = learner)) + layer(geom = "point") +
layer(geom = "line") + facet_wrap(~measure)
}
# r1 = generateLearningCurve(lrn = list("classif.rpart", "classif.lda", "classif.knn"),
# task = iris.task, measures = list(mmce, acc), repls = 12L)
# plotLearningCurve(r1)
# r2 = generateLearningCurve(lrn = list("classif.rpart", "classif.knn", "classif.naiveBayes",
# "classif.svm", "classif.plr", "classif.randomForest"),
# task = sonar.task, test.size = 0.25, n.seq = seq(0.2, 1, by = 0.2),
# measures = list(tp, fp, tn, fn), repls = 6L)
# plotLearningCurve(r2)
|
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<link rel="dns-prefetch" href="https://assets-cdn.github.com">
<link rel="dns-prefetch" href="https://avatars0.githubusercontent.com">
<link rel="dns-prefetch" href="https://avatars1.githubusercontent.com">
<link rel="dns-prefetch" href="https://avatars2.githubusercontent.com">
<link rel="dns-prefetch" href="https://avatars3.githubusercontent.com">
<link rel="dns-prefetch" href="https://github-cloud.s3.amazonaws.com">
<link rel="dns-prefetch" href="https://user-images.githubusercontent.com/">
<link crossorigin="anonymous" href="https://assets-cdn.github.com/assets/frameworks-521cbf980c80.css" integrity="sha512-Uhy/mAyAx1HfsenmjQ85ASpOk5bjt2Ay03pNeixXIvkHlEm5S+N4u0HWfDGhvsGYx4bGyviXWGGPZeIffqYcNA==" media="all" rel="stylesheet" />
<link crossorigin="anonymous" href="https://assets-cdn.github.com/assets/github-bab09cdfa5e9.css" integrity="sha512-urCc36XpOB6NJpSUfwUO4198a84yfDnoKASZ+D+7pCjpTpQ3YrhkgX9SgIpI83PiKF87mXoMJHJ/nE0eXNeTqA==" media="all" rel="stylesheet" />
<link crossorigin="anonymous" href="https://assets-cdn.github.com/assets/site-e1e1bc98a53e.css" integrity="sha512-4eG8mKU+R9QAnMQwfSIgbo24hS+nUXxSuUs5G5LMQw+5wjC1TSKfgxJb2j61PXya94+wMwN1OT7r7MF5rbdUvw==" media="all" rel="stylesheet" />
<meta name="viewport" content="width=device-width">
<title>coursera-exploratory-data-analysis-course-project-2/plot1.R at master · TomLous/coursera-exploratory-data-analysis-course-project-2 · GitHub</title>
<link rel="search" type="application/opensearchdescription+xml" href="/opensearch.xml" title="GitHub">
<link rel="fluid-icon" href="https://github.com/fluidicon.png" title="GitHub">
<meta property="fb:app_id" content="1401488693436528">
<meta content="https://avatars1.githubusercontent.com/u/2259971?s=400&v=4" property="og:image" /><meta content="GitHub" property="og:site_name" /><meta content="object" property="og:type" /><meta content="TomLous/coursera-exploratory-data-analysis-course-project-2" property="og:title" /><meta content="https://github.com/TomLous/coursera-exploratory-data-analysis-course-project-2" property="og:url" /><meta content="Contribute to coursera-exploratory-data-analysis-course-project-2 development by creating an account on GitHub." property="og:description" />
<link rel="assets" href="https://assets-cdn.github.com/">
<meta name="pjax-timeout" content="1000">
<meta name="request-id" content="FE8C:57C0:21DA79D:38C3D41:5A76ACEB" data-pjax-transient>
<meta name="selected-link" value="repo_source" data-pjax-transient>
<meta name="google-site-verification" content="KT5gs8h0wvaagLKAVWq8bbeNwnZZK1r1XQysX3xurLU">
<meta name="google-site-verification" content="ZzhVyEFwb7w3e0-uOTltm8Jsck2F5StVihD0exw2fsA">
<meta name="google-site-verification" content="GXs5KoUUkNCoaAZn7wPN-t01Pywp9M3sEjnt_3_ZWPc">
<meta name="google-analytics" content="UA-3769691-2">
<meta content="collector.githubapp.com" name="octolytics-host" /><meta content="github" name="octolytics-app-id" /><meta content="https://collector.githubapp.com/github-external/browser_event" name="octolytics-event-url" /><meta content="FE8C:57C0:21DA79D:38C3D41:5A76ACEB" name="octolytics-dimension-request_id" /><meta content="iad" name="octolytics-dimension-region_edge" /><meta content="iad" name="octolytics-dimension-region_render" />
<meta content="https://github.com/hydro_browser_events" name="hydro-events-url" />
<meta content="/<user-name>/<repo-name>/blob/show" data-pjax-transient="true" name="analytics-location" />
<meta class="js-ga-set" name="dimension1" content="Logged Out">
<meta name="hostname" content="github.com">
<meta name="user-login" content="">
<meta name="expected-hostname" content="github.com">
<meta name="js-proxy-site-detection-payload" content="YTE4MWFlZDQzODA2NjIxNWZmYzQwNDQxODRiYTg4ODkwNmY2ZWUzZTJjM2I1MWQzOTY4MmJhMDEwNGYyNDY5N3x7InJlbW90ZV9hZGRyZXNzIjoiNzYuODQuNTQuNjMiLCJyZXF1ZXN0X2lkIjoiRkU4Qzo1N0MwOjIxREE3OUQ6MzhDM0Q0MTo1QTc2QUNFQiIsInRpbWVzdGFtcCI6MTUxNzcyNjk4NiwiaG9zdCI6ImdpdGh1Yi5jb20ifQ==">
<meta name="enabled-features" content="UNIVERSE_BANNER,FREE_TRIALS,MARKETPLACE_HERO_CARD_UPLOADER">
<meta name="html-safe-nonce" content="cd1c041f21fbbde1e93e0b11d81eabf1ab946fc8">
<meta http-equiv="x-pjax-version" content="2bcdfdf37090b189cdf7cd792d535310">
<link href="https://github.com/TomLous/coursera-exploratory-data-analysis-course-project-2/commits/master.atom" rel="alternate" title="Recent Commits to coursera-exploratory-data-analysis-course-project-2:master" type="application/atom+xml">
<meta name="description" content="Contribute to coursera-exploratory-data-analysis-course-project-2 development by creating an account on GitHub.">
<meta name="go-import" content="github.com/TomLous/coursera-exploratory-data-analysis-course-project-2 git https://github.com/TomLous/coursera-exploratory-data-analysis-course-project-2.git">
<meta content="2259971" name="octolytics-dimension-user_id" /><meta content="TomLous" name="octolytics-dimension-user_login" /><meta content="22311040" name="octolytics-dimension-repository_id" /><meta content="TomLous/coursera-exploratory-data-analysis-course-project-2" name="octolytics-dimension-repository_nwo" /><meta content="true" name="octolytics-dimension-repository_public" /><meta content="false" name="octolytics-dimension-repository_is_fork" /><meta content="22311040" name="octolytics-dimension-repository_network_root_id" /><meta content="TomLous/coursera-exploratory-data-analysis-course-project-2" name="octolytics-dimension-repository_network_root_nwo" /><meta content="false" name="octolytics-dimension-repository_explore_github_marketplace_ci_cta_shown" />
<link rel="canonical" href="https://github.com/TomLous/coursera-exploratory-data-analysis-course-project-2/blob/master/plot1.R" data-pjax-transient>
<meta name="browser-stats-url" content="https://api.github.com/_private/browser/stats">
<meta name="browser-errors-url" content="https://api.github.com/_private/browser/errors">
<link rel="mask-icon" href="https://assets-cdn.github.com/pinned-octocat.svg" color="#000000">
<link rel="icon" type="image/x-icon" class="js-site-favicon" href="https://assets-cdn.github.com/favicon.ico">
<meta name="theme-color" content="#1e2327">
<meta name="u2f-support" content="true">
</head>
<body class="logged-out env-production page-blob">
<div class="position-relative js-header-wrapper ">
<a href="#start-of-content" tabindex="1" class="px-2 py-4 show-on-focus js-skip-to-content">Skip to content</a>
<div id="js-pjax-loader-bar" class="pjax-loader-bar"><div class="progress"></div></div>
<header class="Header header-logged-out position-relative f4 py-3" role="banner">
<div class="container-lg d-flex px-3">
<div class="d-flex flex-justify-between flex-items-center">
<a class="header-logo-invertocat my-0" href="https://github.com/" aria-label="Homepage" data-ga-click="(Logged out) Header, go to homepage, icon:logo-wordmark">
<svg aria-hidden="true" class="octicon octicon-mark-github" height="32" version="1.1" viewBox="0 0 16 16" width="32"><path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"/></svg>
</a>
</div>
<div class="HeaderMenu HeaderMenu--bright d-flex flex-justify-between flex-auto">
<nav class="mt-0">
<ul class="d-flex list-style-none">
<li class="ml-2">
<a href="/features" class="js-selected-navigation-item HeaderNavlink px-0 py-2 m-0" data-ga-click="Header, click, Nav menu - item:features" data-selected-links="/features /features/project-management /features/code-review /features/project-management /features/integrations /features">
Features
</a> </li>
<li class="ml-4">
<a href="/business" class="js-selected-navigation-item HeaderNavlink px-0 py-2 m-0" data-ga-click="Header, click, Nav menu - item:business" data-selected-links="/business /business/security /business/customers /business">
Business
</a> </li>
<li class="ml-4">
<a href="/explore" class="js-selected-navigation-item HeaderNavlink px-0 py-2 m-0" data-ga-click="Header, click, Nav menu - item:explore" data-selected-links="/explore /trending /trending/developers /integrations /integrations/feature/code /integrations/feature/collaborate /integrations/feature/ship showcases showcases_search showcases_landing /explore">
Explore
</a> </li>
<li class="ml-4">
<a href="/marketplace" class="js-selected-navigation-item HeaderNavlink px-0 py-2 m-0" data-ga-click="Header, click, Nav menu - item:marketplace" data-selected-links=" /marketplace">
Marketplace
</a> </li>
<li class="ml-4">
<a href="/pricing" class="js-selected-navigation-item HeaderNavlink px-0 py-2 m-0" data-ga-click="Header, click, Nav menu - item:pricing" data-selected-links="/pricing /pricing/developer /pricing/team /pricing/business-hosted /pricing/business-enterprise /pricing">
Pricing
</a> </li>
</ul>
</nav>
<div class="d-flex">
<div class="d-lg-flex flex-items-center mr-3">
<div class="header-search scoped-search site-scoped-search js-site-search" role="search">
<!-- '"` --><!-- </textarea></xmp> --></option></form><form accept-charset="UTF-8" action="/TomLous/coursera-exploratory-data-analysis-course-project-2/search" class="js-site-search-form" data-scoped-search-url="/TomLous/coursera-exploratory-data-analysis-course-project-2/search" data-unscoped-search-url="/search" method="get"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /></div>
<label class="form-control header-search-wrapper js-chromeless-input-container">
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/blob/master/plot1.R" class="header-search-scope no-underline">This repository</a>
<input type="text"
class="form-control header-search-input js-site-search-focus js-site-search-field is-clearable"
data-hotkey="s"
name="q"
value=""
placeholder="Search"
aria-label="Search this repository"
data-unscoped-placeholder="Search GitHub"
data-scoped-placeholder="Search"
autocapitalize="off">
<input type="hidden" class="js-site-search-type-field" name="type" >
</label>
</form></div>
</div>
<span class="d-inline-block">
<div class="HeaderNavlink px-0 py-2 m-0">
<a class="text-bold text-white no-underline" href="/login?return_to=%2FTomLous%2Fcoursera-exploratory-data-analysis-course-project-2%2Fblob%2Fmaster%2Fplot1.R" data-ga-click="(Logged out) Header, clicked Sign in, text:sign-in">Sign in</a>
<span class="text-gray">or</span>
<a class="text-bold text-white no-underline" href="/join?source=header-repo" data-ga-click="(Logged out) Header, clicked Sign up, text:sign-up">Sign up</a>
</div>
</span>
</div>
</div>
</div>
</header>
</div>
<div id="start-of-content" class="show-on-focus"></div>
<div id="js-flash-container">
</div>
<div role="main" class="application-main ">
<div itemscope itemtype="http://schema.org/SoftwareSourceCode" class="">
<div id="js-repo-pjax-container" data-pjax-container >
<div class="pagehead repohead instapaper_ignore readability-menu experiment-repo-nav ">
<div class="repohead-details-container clearfix container">
<ul class="pagehead-actions">
<li>
<a href="/login?return_to=%2FTomLous%2Fcoursera-exploratory-data-analysis-course-project-2"
class="btn btn-sm btn-with-count tooltipped tooltipped-n"
aria-label="You must be signed in to watch a repository" rel="nofollow">
<svg aria-hidden="true" class="octicon octicon-eye" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M8.06 2C3 2 0 8 0 8s3 6 8.06 6C13 14 16 8 16 8s-3-6-7.94-6zM8 12c-2.2 0-4-1.78-4-4 0-2.2 1.8-4 4-4 2.22 0 4 1.8 4 4 0 2.22-1.78 4-4 4zm2-4c0 1.11-.89 2-2 2-1.11 0-2-.89-2-2 0-1.11.89-2 2-2 1.11 0 2 .89 2 2z"/></svg>
Watch
</a>
<a class="social-count" href="/TomLous/coursera-exploratory-data-analysis-course-project-2/watchers"
aria-label="2 users are watching this repository">
2
</a>
</li>
<li>
<a href="/login?return_to=%2FTomLous%2Fcoursera-exploratory-data-analysis-course-project-2"
class="btn btn-sm btn-with-count tooltipped tooltipped-n"
aria-label="You must be signed in to star a repository" rel="nofollow">
<svg aria-hidden="true" class="octicon octicon-star" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path fill-rule="evenodd" d="M14 6l-4.9-.64L7 1 4.9 5.36 0 6l3.6 3.26L2.67 14 7 11.67 11.33 14l-.93-4.74z"/></svg>
Star
</a>
<a class="social-count js-social-count" href="/TomLous/coursera-exploratory-data-analysis-course-project-2/stargazers"
aria-label="1 user starred this repository">
1
</a>
</li>
<li>
<a href="/login?return_to=%2FTomLous%2Fcoursera-exploratory-data-analysis-course-project-2"
class="btn btn-sm btn-with-count tooltipped tooltipped-n"
aria-label="You must be signed in to fork a repository" rel="nofollow">
<svg aria-hidden="true" class="octicon octicon-repo-forked" height="16" version="1.1" viewBox="0 0 10 16" width="10"><path fill-rule="evenodd" d="M8 1a1.993 1.993 0 0 0-1 3.72V6L5 8 3 6V4.72A1.993 1.993 0 0 0 2 1a1.993 1.993 0 0 0-1 3.72V6.5l3 3v1.78A1.993 1.993 0 0 0 5 15a1.993 1.993 0 0 0 1-3.72V9.5l3-3V4.72A1.993 1.993 0 0 0 8 1zM2 4.2C1.34 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3 10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3-10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"/></svg>
Fork
</a>
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/network" class="social-count"
aria-label="109 users forked this repository">
109
</a>
</li>
</ul>
<h1 class="public ">
<svg aria-hidden="true" class="octicon octicon-repo" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M4 9H3V8h1v1zm0-3H3v1h1V6zm0-2H3v1h1V4zm0-2H3v1h1V2zm8-1v12c0 .55-.45 1-1 1H6v2l-1.5-1.5L3 16v-2H1c-.55 0-1-.45-1-1V1c0-.55.45-1 1-1h10c.55 0 1 .45 1 1zm-1 10H1v2h2v-1h3v1h5v-2zm0-10H2v9h9V1z"/></svg>
<span class="author" itemprop="author"><a href="/TomLous" class="url fn" rel="author">TomLous</a></span><!--
--><span class="path-divider">/</span><!--
--><strong itemprop="name"><a href="/TomLous/coursera-exploratory-data-analysis-course-project-2" data-pjax="#js-repo-pjax-container">coursera-exploratory-data-analysis-course-project-2</a></strong>
</h1>
</div>
<nav class="reponav js-repo-nav js-sidenav-container-pjax container"
itemscope
itemtype="http://schema.org/BreadcrumbList"
role="navigation"
data-pjax="#js-repo-pjax-container">
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2" class="js-selected-navigation-item selected reponav-item" data-hotkey="g c" data-selected-links="repo_source repo_downloads repo_commits repo_releases repo_tags repo_branches repo_packages /TomLous/coursera-exploratory-data-analysis-course-project-2" itemprop="url">
<svg aria-hidden="true" class="octicon octicon-code" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path fill-rule="evenodd" d="M9.5 3L8 4.5 11.5 8 8 11.5 9.5 13 14 8 9.5 3zm-5 0L0 8l4.5 5L6 11.5 2.5 8 6 4.5 4.5 3z"/></svg>
<span itemprop="name">Code</span>
<meta itemprop="position" content="1">
</a> </span>
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/issues" class="js-selected-navigation-item reponav-item" data-hotkey="g i" data-selected-links="repo_issues repo_labels repo_milestones /TomLous/coursera-exploratory-data-analysis-course-project-2/issues" itemprop="url">
<svg aria-hidden="true" class="octicon octicon-issue-opened" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path fill-rule="evenodd" d="M7 2.3c3.14 0 5.7 2.56 5.7 5.7s-2.56 5.7-5.7 5.7A5.71 5.71 0 0 1 1.3 8c0-3.14 2.56-5.7 5.7-5.7zM7 1C3.14 1 0 4.14 0 8s3.14 7 7 7 7-3.14 7-7-3.14-7-7-7zm1 3H6v5h2V4zm0 6H6v2h2v-2z"/></svg>
<span itemprop="name">Issues</span>
<span class="Counter">0</span>
<meta itemprop="position" content="2">
</a> </span>
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/pulls" class="js-selected-navigation-item reponav-item" data-hotkey="g p" data-selected-links="repo_pulls /TomLous/coursera-exploratory-data-analysis-course-project-2/pulls" itemprop="url">
<svg aria-hidden="true" class="octicon octicon-git-pull-request" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M11 11.28V5c-.03-.78-.34-1.47-.94-2.06C9.46 2.35 8.78 2.03 8 2H7V0L4 3l3 3V4h1c.27.02.48.11.69.31.21.2.3.42.31.69v6.28A1.993 1.993 0 0 0 10 15a1.993 1.993 0 0 0 1-3.72zm-1 2.92c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zM4 3c0-1.11-.89-2-2-2a1.993 1.993 0 0 0-1 3.72v6.56A1.993 1.993 0 0 0 2 15a1.993 1.993 0 0 0 1-3.72V4.72c.59-.34 1-.98 1-1.72zm-.8 10c0 .66-.55 1.2-1.2 1.2-.65 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2zM2 4.2C1.34 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"/></svg>
<span itemprop="name">Pull requests</span>
<span class="Counter">0</span>
<meta itemprop="position" content="3">
</a> </span>
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/projects" class="js-selected-navigation-item reponav-item" data-hotkey="g b" data-selected-links="repo_projects new_repo_project repo_project /TomLous/coursera-exploratory-data-analysis-course-project-2/projects">
<svg aria-hidden="true" class="octicon octicon-project" height="16" version="1.1" viewBox="0 0 15 16" width="15"><path fill-rule="evenodd" d="M10 12h3V2h-3v10zm-4-2h3V2H6v8zm-4 4h3V2H2v12zm-1 1h13V1H1v14zM14 0H1a1 1 0 0 0-1 1v14a1 1 0 0 0 1 1h13a1 1 0 0 0 1-1V1a1 1 0 0 0-1-1z"/></svg>
Projects
<span class="Counter" >0</span>
</a>
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/pulse" class="js-selected-navigation-item reponav-item" data-selected-links="repo_graphs repo_contributors dependency_graph pulse /TomLous/coursera-exploratory-data-analysis-course-project-2/pulse">
<svg aria-hidden="true" class="octicon octicon-graph" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M16 14v1H0V0h1v14h15zM5 13H3V8h2v5zm4 0H7V3h2v10zm4 0h-2V6h2v7z"/></svg>
Insights
</a>
</nav>
</div>
<div class="container new-discussion-timeline experiment-repo-nav ">
<div class="repository-content ">
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/blob/1e065c8529a9f143621378c3748eac6a4f74afc5/plot1.R" class="d-none js-permalink-shortcut" data-hotkey="y">Permalink</a>
<!-- blob contrib key: blob_contributors:v21:6f1b29b259ff64a69d0e995bc1ef9096 -->
<div class="file-navigation js-zeroclipboard-container">
<div class="select-menu branch-select-menu js-menu-container js-select-menu float-left">
<button class=" btn btn-sm select-menu-button js-menu-target css-truncate" data-hotkey="w"
type="button" aria-label="Switch branches or tags" aria-expanded="false" aria-haspopup="true">
<i>Branch:</i>
<span class="js-select-button css-truncate-target">master</span>
</button>
<div class="select-menu-modal-holder js-menu-content js-navigation-container" data-pjax>
<div class="select-menu-modal">
<div class="select-menu-header">
<svg aria-label="Close" class="octicon octicon-x js-menu-close" height="16" role="img" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48z"/></svg>
<span class="select-menu-title">Switch branches/tags</span>
</div>
<div class="select-menu-filters">
<div class="select-menu-text-filter">
<input type="text" aria-label="Filter branches/tags" id="context-commitish-filter-field" class="form-control js-filterable-field js-navigation-enable" placeholder="Filter branches/tags">
</div>
<div class="select-menu-tabs">
<ul>
<li class="select-menu-tab">
<a href="#" data-tab-filter="branches" data-filter-placeholder="Filter branches/tags" class="js-select-menu-tab" role="tab">Branches</a>
</li>
<li class="select-menu-tab">
<a href="#" data-tab-filter="tags" data-filter-placeholder="Find a tag…" class="js-select-menu-tab" role="tab">Tags</a>
</li>
</ul>
</div>
</div>
<div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="branches" role="menu">
<div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
<a class="select-menu-item js-navigation-item js-navigation-open selected"
href="/TomLous/coursera-exploratory-data-analysis-course-project-2/blob/master/plot1.R"
data-name="master"
data-skip-pjax="true"
rel="nofollow">
<svg aria-hidden="true" class="octicon octicon-check select-menu-item-icon" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
master
</span>
</a>
</div>
<div class="select-menu-no-results">Nothing to show</div>
</div>
<div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="tags">
<div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
</div>
<div class="select-menu-no-results">Nothing to show</div>
</div>
</div>
</div>
</div>
<div class="BtnGroup float-right">
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/find/master"
class="js-pjax-capture-input btn btn-sm BtnGroup-item"
data-pjax
data-hotkey="t">
Find file
</a>
<button aria-label="Copy file path to clipboard" class="js-zeroclipboard btn btn-sm BtnGroup-item tooltipped tooltipped-s" data-copied-hint="Copied!" type="button">Copy path</button>
</div>
<div class="breadcrumb js-zeroclipboard-target">
<span class="repo-root js-repo-root"><span class="js-path-segment"><a href="/TomLous/coursera-exploratory-data-analysis-course-project-2" data-pjax="true"><span>coursera-exploratory-data-analysis-course-project-2</span></a></span></span><span class="separator">/</span><strong class="final-path">plot1.R</strong>
</div>
</div>
<div class="commit-tease">
<span class="float-right">
<a class="commit-tease-sha" href="/TomLous/coursera-exploratory-data-analysis-course-project-2/commit/1949a42fad4e79022eab30e82ad6cba319935ea3" data-pjax>
1949a42
</a>
<relative-time datetime="2014-07-27T13:30:31Z">Jul 27, 2014</relative-time>
</span>
<div>
<img alt="@TomLous" class="avatar" height="20" src="https://avatars3.githubusercontent.com/u/2259971?s=40&v=4" width="20" />
<a href="/TomLous" class="user-mention" rel="author">TomLous</a>
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/commit/1949a42fad4e79022eab30e82ad6cba319935ea3" class="message" data-pjax="true" title="Plot1 & 2">Plot1 & 2</a>
</div>
<div class="commit-tease-contributors">
<button type="button" class="btn-link muted-link contributors-toggle" data-facebox="#blob_contributors_box">
<strong>1</strong>
contributor
</button>
</div>
<div id="blob_contributors_box" style="display:none">
<h2 class="facebox-header" data-facebox-id="facebox-header">Users who have contributed to this file</h2>
<ul class="facebox-user-list" data-facebox-id="facebox-description">
<li class="facebox-user-list-item">
<img alt="@TomLous" height="24" src="https://avatars2.githubusercontent.com/u/2259971?s=48&v=4" width="24" />
<a href="/TomLous">TomLous</a>
</li>
</ul>
</div>
</div>
<div class="file">
<div class="file-header">
<div class="file-actions">
<div class="BtnGroup">
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/raw/master/plot1.R" class="btn btn-sm BtnGroup-item" id="raw-url">Raw</a>
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/blame/master/plot1.R" class="btn btn-sm js-update-url-with-hash BtnGroup-item" data-hotkey="b">Blame</a>
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/commits/master/plot1.R" class="btn btn-sm BtnGroup-item" rel="nofollow">History</a>
</div>
<a class="btn-octicon tooltipped tooltipped-nw"
href="https://desktop.github.com"
aria-label="Open this file in GitHub Desktop"
data-ga-click="Repository, open with desktop, type:windows">
<svg aria-hidden="true" class="octicon octicon-device-desktop" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M15 2H1c-.55 0-1 .45-1 1v9c0 .55.45 1 1 1h5.34c-.25.61-.86 1.39-2.34 2h8c-1.48-.61-2.09-1.39-2.34-2H15c.55 0 1-.45 1-1V3c0-.55-.45-1-1-1zm0 9H1V3h14v8z"/></svg>
</a>
<button type="button" class="btn-octicon disabled tooltipped tooltipped-nw"
aria-label="You must be signed in to make or propose changes">
<svg aria-hidden="true" class="octicon octicon-pencil" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path fill-rule="evenodd" d="M0 12v3h3l8-8-3-3-8 8zm3 2H1v-2h1v1h1v1zm10.3-9.3L12 6 9 3l1.3-1.3a.996.996 0 0 1 1.41 0l1.59 1.59c.39.39.39 1.02 0 1.41z"/></svg>
</button>
<button type="button" class="btn-octicon btn-octicon-danger disabled tooltipped tooltipped-nw"
aria-label="You must be signed in to make or propose changes">
<svg aria-hidden="true" class="octicon octicon-trashcan" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M11 2H9c0-.55-.45-1-1-1H5c-.55 0-1 .45-1 1H2c-.55 0-1 .45-1 1v1c0 .55.45 1 1 1v9c0 .55.45 1 1 1h7c.55 0 1-.45 1-1V5c.55 0 1-.45 1-1V3c0-.55-.45-1-1-1zm-1 12H3V5h1v8h1V5h1v8h1V5h1v8h1V5h1v9zm1-10H2V3h9v1z"/></svg>
</button>
</div>
<div class="file-info">
16 lines (14 sloc)
<span class="file-info-divider"></span>
746 Bytes
</div>
</div>
<div itemprop="text" class="blob-wrapper data type-r">
<table class="highlight tab-size js-file-line-container" data-tab-size="8">
<tr>
<td id="L1" class="blob-num js-line-number" data-line-number="1"></td>
<td id="LC1" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span># This first line will likely take a few seconds. Be patient!</span></td>
</tr>
<tr>
<td id="L2" class="blob-num js-line-number" data-line-number="2"></td>
<td id="LC2" class="blob-code blob-code-inner js-file-line"><span class="pl-k">if</span>(<span class="pl-k">!</span>exists(<span class="pl-s"><span class="pl-pds">"</span>NEI<span class="pl-pds">"</span></span>)){</td>
</tr>
<tr>
<td id="L3" class="blob-num js-line-number" data-line-number="3"></td>
<td id="LC3" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">NEI</span> <span class="pl-k"><-</span> readRDS(<span class="pl-s"><span class="pl-pds">"</span>./data/summarySCC_PM25.rds<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L4" class="blob-num js-line-number" data-line-number="4"></td>
<td id="LC4" class="blob-code blob-code-inner js-file-line">}</td>
</tr>
<tr>
<td id="L5" class="blob-num js-line-number" data-line-number="5"></td>
<td id="LC5" class="blob-code blob-code-inner js-file-line"><span class="pl-k">if</span>(<span class="pl-k">!</span>exists(<span class="pl-s"><span class="pl-pds">"</span>SCC<span class="pl-pds">"</span></span>)){</td>
</tr>
<tr>
<td id="L6" class="blob-num js-line-number" data-line-number="6"></td>
<td id="LC6" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">SCC</span> <span class="pl-k"><-</span> readRDS(<span class="pl-s"><span class="pl-pds">"</span>./data/Source_Classification_Code.rds<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L7" class="blob-num js-line-number" data-line-number="7"></td>
<td id="LC7" class="blob-code blob-code-inner js-file-line">}</td>
</tr>
<tr>
<td id="L8" class="blob-num js-line-number" data-line-number="8"></td>
<td id="LC8" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span> Have total emissions from PM2.5 decreased in the United States from 1999 to 2008? </span></td>
</tr>
<tr>
<td id="L9" class="blob-num js-line-number" data-line-number="9"></td>
<td id="LC9" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span> Using the base plotting system, make a plot showing the total PM2.5 emission from all sources </span></td>
</tr>
<tr>
<td id="L10" class="blob-num js-line-number" data-line-number="10"></td>
<td id="LC10" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span> for each of the years 1999, 2002, 2005, and 2008.</span></td>
</tr>
<tr>
<td id="L11" class="blob-num js-line-number" data-line-number="11"></td>
<td id="LC11" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L12" class="blob-num js-line-number" data-line-number="12"></td>
<td id="LC12" class="blob-code blob-code-inner js-file-line"><span class="pl-smi">aggregatedTotalByYear</span> <span class="pl-k"><-</span> aggregate(<span class="pl-smi">Emissions</span> <span class="pl-k">~</span> <span class="pl-smi">year</span>, <span class="pl-smi">NEI</span>, <span class="pl-smi">sum</span>)</td>
</tr>
<tr>
<td id="L13" class="blob-num js-line-number" data-line-number="13"></td>
<td id="LC13" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L14" class="blob-num js-line-number" data-line-number="14"></td>
<td id="LC14" class="blob-code blob-code-inner js-file-line">png(<span class="pl-s"><span class="pl-pds">'</span>plot1.png<span class="pl-pds">'</span></span>)</td>
</tr>
<tr>
<td id="L15" class="blob-num js-line-number" data-line-number="15"></td>
<td id="LC15" class="blob-code blob-code-inner js-file-line">barplot(<span class="pl-v">height</span><span class="pl-k">=</span><span class="pl-smi">aggregatedTotalByYear</span><span class="pl-k">$</span><span class="pl-smi">Emissions</span>, <span class="pl-v">names.arg</span><span class="pl-k">=</span><span class="pl-smi">aggregatedTotalByYear</span><span class="pl-k">$</span><span class="pl-smi">year</span>, <span class="pl-v">xlab</span><span class="pl-k">=</span><span class="pl-s"><span class="pl-pds">"</span>years<span class="pl-pds">"</span></span>, <span class="pl-v">ylab</span><span class="pl-k">=</span>expression(<span class="pl-s"><span class="pl-pds">'</span>total PM<span class="pl-pds">'</span></span>[<span class="pl-c1">2.5</span>]<span class="pl-k">*</span><span class="pl-s"><span class="pl-pds">'</span> emission<span class="pl-pds">'</span></span>),<span class="pl-v">main</span><span class="pl-k">=</span>expression(<span class="pl-s"><span class="pl-pds">'</span>Total PM<span class="pl-pds">'</span></span>[<span class="pl-c1">2.5</span>]<span class="pl-k">*</span><span class="pl-s"><span class="pl-pds">'</span> emissions at various years<span class="pl-pds">'</span></span>))</td>
</tr>
<tr>
<td id="L16" class="blob-num js-line-number" data-line-number="16"></td>
<td id="LC16" class="blob-code blob-code-inner js-file-line">dev.off()</td>
</tr>
</table>
<div class="BlobToolbar position-absolute js-file-line-actions dropdown js-menu-container js-select-menu d-none" aria-hidden="true">
<button class="btn-octicon ml-0 px-2 p-0 bg-white border border-gray-dark rounded-1 dropdown-toggle js-menu-target" id="js-file-line-action-button" type="button" aria-expanded="false" aria-haspopup="true" aria-label="Inline file action toolbar" aria-controls="inline-file-actions">
<svg aria-hidden="true" class="octicon octicon-kebab-horizontal" height="16" version="1.1" viewBox="0 0 13 16" width="13"><path fill-rule="evenodd" d="M1.5 9a1.5 1.5 0 1 1 0-3 1.5 1.5 0 0 1 0 3zm5 0a1.5 1.5 0 1 1 0-3 1.5 1.5 0 0 1 0 3zm5 0a1.5 1.5 0 1 1 0-3 1.5 1.5 0 0 1 0 3z"/></svg>
</button>
<div class="dropdown-menu-content js-menu-content" id="inline-file-actions">
<ul class="BlobToolbar-dropdown dropdown-menu dropdown-menu-se mt-2">
<li><a class="js-zeroclipboard dropdown-item" style="cursor:pointer;" id="js-copy-lines" data-original-text="Copy lines">Copy lines</a></li>
<li><a class="js-zeroclipboard dropdown-item" id= "js-copy-permalink" style="cursor:pointer;" data-original-text="Copy permalink">Copy permalink</a></li>
<li><a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/blame/1e065c8529a9f143621378c3748eac6a4f74afc5/plot1.R" class="dropdown-item js-update-url-with-hash" id="js-view-git-blame">View git blame</a></li>
<li><a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/issues/new" class="dropdown-item" id="js-new-issue">Open new issue</a></li>
</ul>
</div>
</div>
</div>
</div>
<button type="button" data-facebox="#jump-to-line" data-facebox-class="linejump" data-hotkey="l" class="d-none">Jump to Line</button>
<div id="jump-to-line" style="display:none">
<!-- '"` --><!-- </textarea></xmp> --></option></form><form accept-charset="UTF-8" action="" class="js-jump-to-line-form" method="get"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /></div>
<input class="form-control linejump-input js-jump-to-line-field" type="text" placeholder="Jump to line…" aria-label="Jump to line" autofocus>
<button type="submit" class="btn">Go</button>
</form> </div>
</div>
<div class="modal-backdrop js-touch-events"></div>
</div>
</div>
</div>
</div>
<div class="footer container-lg px-3" role="contentinfo">
<div class="position-relative d-flex flex-justify-between py-6 mt-6 f6 text-gray border-top border-gray-light ">
<ul class="list-style-none d-flex flex-wrap ">
<li class="mr-3">© 2018 <span title="0.13419s from unicorn-1768575-94zlz">GitHub</span>, Inc.</li>
<li class="mr-3"><a href="https://github.com/site/terms" data-ga-click="Footer, go to terms, text:terms">Terms</a></li>
<li class="mr-3"><a href="https://github.com/site/privacy" data-ga-click="Footer, go to privacy, text:privacy">Privacy</a></li>
<li class="mr-3"><a href="https://github.com/security" data-ga-click="Footer, go to security, text:security">Security</a></li>
<li class="mr-3"><a href="https://status.github.com/" data-ga-click="Footer, go to status, text:status">Status</a></li>
<li><a href="https://help.github.com" data-ga-click="Footer, go to help, text:help">Help</a></li>
</ul>
<a href="https://github.com" aria-label="Homepage" class="footer-octicon" title="GitHub">
<svg aria-hidden="true" class="octicon octicon-mark-github" height="24" version="1.1" viewBox="0 0 16 16" width="24"><path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"/></svg>
</a>
<ul class="list-style-none d-flex flex-wrap ">
<li class="mr-3"><a href="https://github.com/contact" data-ga-click="Footer, go to contact, text:contact">Contact GitHub</a></li>
<li class="mr-3"><a href="https://developer.github.com" data-ga-click="Footer, go to api, text:api">API</a></li>
<li class="mr-3"><a href="https://training.github.com" data-ga-click="Footer, go to training, text:training">Training</a></li>
<li class="mr-3"><a href="https://shop.github.com" data-ga-click="Footer, go to shop, text:shop">Shop</a></li>
<li class="mr-3"><a href="https://github.com/blog" data-ga-click="Footer, go to blog, text:blog">Blog</a></li>
<li><a href="https://github.com/about" data-ga-click="Footer, go to about, text:about">About</a></li>
</ul>
</div>
</div>
<div id="ajax-error-message" class="ajax-error-message flash flash-error">
<svg aria-hidden="true" class="octicon octicon-alert" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M8.865 1.52c-.18-.31-.51-.5-.87-.5s-.69.19-.87.5L.275 13.5c-.18.31-.18.69 0 1 .19.31.52.5.87.5h13.7c.36 0 .69-.19.86-.5.17-.31.18-.69.01-1L8.865 1.52zM8.995 13h-2v-2h2v2zm0-3h-2V6h2v4z"/></svg>
<button type="button" class="flash-close js-ajax-error-dismiss" aria-label="Dismiss error">
<svg aria-hidden="true" class="octicon octicon-x" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48z"/></svg>
</button>
You can't perform that action at this time.
</div>
<script crossorigin="anonymous" integrity="sha512-XMGJvyy1pIQdZi6FwfzPUDXHfItIkA7EL3jK0uSro6JSF0Tp76YxJNtflJlhbeQxOHaIj144gWd+J2ZmFUgFiQ==" src="https://assets-cdn.github.com/assets/frameworks-5cc189bf2cb5.js" type="application/javascript"></script>
<script async="async" crossorigin="anonymous" integrity="sha512-rLONI7YsyG4/uJsXApnqQsgsr0HKCVks4R8350jML9lNwjs9QLMu1O0aAAGCY0/i23WmiMEFB/xMBfvzEfKx8A==" src="https://assets-cdn.github.com/assets/github-acb38d23b62c.js" type="application/javascript"></script>
<div class="js-stale-session-flash stale-session-flash flash flash-warn flash-banner d-none">
<svg aria-hidden="true" class="octicon octicon-alert" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M8.865 1.52c-.18-.31-.51-.5-.87-.5s-.69.19-.87.5L.275 13.5c-.18.31-.18.69 0 1 .19.31.52.5.87.5h13.7c.36 0 .69-.19.86-.5.17-.31.18-.69.01-1L8.865 1.52zM8.995 13h-2v-2h2v2zm0-3h-2V6h2v4z"/></svg>
<span class="signed-in-tab-flash">You signed in with another tab or window. <a href="">Reload</a> to refresh your session.</span>
<span class="signed-out-tab-flash">You signed out in another tab or window. <a href="">Reload</a> to refresh your session.</span>
</div>
<div class="facebox" id="facebox" style="display:none;">
<div class="facebox-popup">
<div class="facebox-content" role="dialog" aria-labelledby="facebox-header" aria-describedby="facebox-description">
</div>
<button type="button" class="facebox-close js-facebox-close" aria-label="Close modal">
<svg aria-hidden="true" class="octicon octicon-x" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48z"/></svg>
</button>
</div>
</div>
</body>
</html>
| /plot4.R | no_license | asingh71DS/explor2 | R | false | false | 41,836 | r |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<link rel="dns-prefetch" href="https://assets-cdn.github.com">
<link rel="dns-prefetch" href="https://avatars0.githubusercontent.com">
<link rel="dns-prefetch" href="https://avatars1.githubusercontent.com">
<link rel="dns-prefetch" href="https://avatars2.githubusercontent.com">
<link rel="dns-prefetch" href="https://avatars3.githubusercontent.com">
<link rel="dns-prefetch" href="https://github-cloud.s3.amazonaws.com">
<link rel="dns-prefetch" href="https://user-images.githubusercontent.com/">
<link crossorigin="anonymous" href="https://assets-cdn.github.com/assets/frameworks-521cbf980c80.css" integrity="sha512-Uhy/mAyAx1HfsenmjQ85ASpOk5bjt2Ay03pNeixXIvkHlEm5S+N4u0HWfDGhvsGYx4bGyviXWGGPZeIffqYcNA==" media="all" rel="stylesheet" />
<link crossorigin="anonymous" href="https://assets-cdn.github.com/assets/github-bab09cdfa5e9.css" integrity="sha512-urCc36XpOB6NJpSUfwUO4198a84yfDnoKASZ+D+7pCjpTpQ3YrhkgX9SgIpI83PiKF87mXoMJHJ/nE0eXNeTqA==" media="all" rel="stylesheet" />
<link crossorigin="anonymous" href="https://assets-cdn.github.com/assets/site-e1e1bc98a53e.css" integrity="sha512-4eG8mKU+R9QAnMQwfSIgbo24hS+nUXxSuUs5G5LMQw+5wjC1TSKfgxJb2j61PXya94+wMwN1OT7r7MF5rbdUvw==" media="all" rel="stylesheet" />
<meta name="viewport" content="width=device-width">
<title>coursera-exploratory-data-analysis-course-project-2/plot1.R at master · TomLous/coursera-exploratory-data-analysis-course-project-2 · GitHub</title>
<link rel="search" type="application/opensearchdescription+xml" href="/opensearch.xml" title="GitHub">
<link rel="fluid-icon" href="https://github.com/fluidicon.png" title="GitHub">
<meta property="fb:app_id" content="1401488693436528">
<meta content="https://avatars1.githubusercontent.com/u/2259971?s=400&v=4" property="og:image" /><meta content="GitHub" property="og:site_name" /><meta content="object" property="og:type" /><meta content="TomLous/coursera-exploratory-data-analysis-course-project-2" property="og:title" /><meta content="https://github.com/TomLous/coursera-exploratory-data-analysis-course-project-2" property="og:url" /><meta content="Contribute to coursera-exploratory-data-analysis-course-project-2 development by creating an account on GitHub." property="og:description" />
<link rel="assets" href="https://assets-cdn.github.com/">
<meta name="pjax-timeout" content="1000">
<meta name="request-id" content="FE8C:57C0:21DA79D:38C3D41:5A76ACEB" data-pjax-transient>
<meta name="selected-link" value="repo_source" data-pjax-transient>
<meta name="google-site-verification" content="KT5gs8h0wvaagLKAVWq8bbeNwnZZK1r1XQysX3xurLU">
<meta name="google-site-verification" content="ZzhVyEFwb7w3e0-uOTltm8Jsck2F5StVihD0exw2fsA">
<meta name="google-site-verification" content="GXs5KoUUkNCoaAZn7wPN-t01Pywp9M3sEjnt_3_ZWPc">
<meta name="google-analytics" content="UA-3769691-2">
<meta content="collector.githubapp.com" name="octolytics-host" /><meta content="github" name="octolytics-app-id" /><meta content="https://collector.githubapp.com/github-external/browser_event" name="octolytics-event-url" /><meta content="FE8C:57C0:21DA79D:38C3D41:5A76ACEB" name="octolytics-dimension-request_id" /><meta content="iad" name="octolytics-dimension-region_edge" /><meta content="iad" name="octolytics-dimension-region_render" />
<meta content="https://github.com/hydro_browser_events" name="hydro-events-url" />
<meta content="/<user-name>/<repo-name>/blob/show" data-pjax-transient="true" name="analytics-location" />
<meta class="js-ga-set" name="dimension1" content="Logged Out">
<meta name="hostname" content="github.com">
<meta name="user-login" content="">
<meta name="expected-hostname" content="github.com">
<meta name="js-proxy-site-detection-payload" content="YTE4MWFlZDQzODA2NjIxNWZmYzQwNDQxODRiYTg4ODkwNmY2ZWUzZTJjM2I1MWQzOTY4MmJhMDEwNGYyNDY5N3x7InJlbW90ZV9hZGRyZXNzIjoiNzYuODQuNTQuNjMiLCJyZXF1ZXN0X2lkIjoiRkU4Qzo1N0MwOjIxREE3OUQ6MzhDM0Q0MTo1QTc2QUNFQiIsInRpbWVzdGFtcCI6MTUxNzcyNjk4NiwiaG9zdCI6ImdpdGh1Yi5jb20ifQ==">
<meta name="enabled-features" content="UNIVERSE_BANNER,FREE_TRIALS,MARKETPLACE_HERO_CARD_UPLOADER">
<meta name="html-safe-nonce" content="cd1c041f21fbbde1e93e0b11d81eabf1ab946fc8">
<meta http-equiv="x-pjax-version" content="2bcdfdf37090b189cdf7cd792d535310">
<link href="https://github.com/TomLous/coursera-exploratory-data-analysis-course-project-2/commits/master.atom" rel="alternate" title="Recent Commits to coursera-exploratory-data-analysis-course-project-2:master" type="application/atom+xml">
<meta name="description" content="Contribute to coursera-exploratory-data-analysis-course-project-2 development by creating an account on GitHub.">
<meta name="go-import" content="github.com/TomLous/coursera-exploratory-data-analysis-course-project-2 git https://github.com/TomLous/coursera-exploratory-data-analysis-course-project-2.git">
<meta content="2259971" name="octolytics-dimension-user_id" /><meta content="TomLous" name="octolytics-dimension-user_login" /><meta content="22311040" name="octolytics-dimension-repository_id" /><meta content="TomLous/coursera-exploratory-data-analysis-course-project-2" name="octolytics-dimension-repository_nwo" /><meta content="true" name="octolytics-dimension-repository_public" /><meta content="false" name="octolytics-dimension-repository_is_fork" /><meta content="22311040" name="octolytics-dimension-repository_network_root_id" /><meta content="TomLous/coursera-exploratory-data-analysis-course-project-2" name="octolytics-dimension-repository_network_root_nwo" /><meta content="false" name="octolytics-dimension-repository_explore_github_marketplace_ci_cta_shown" />
<link rel="canonical" href="https://github.com/TomLous/coursera-exploratory-data-analysis-course-project-2/blob/master/plot1.R" data-pjax-transient>
<meta name="browser-stats-url" content="https://api.github.com/_private/browser/stats">
<meta name="browser-errors-url" content="https://api.github.com/_private/browser/errors">
<link rel="mask-icon" href="https://assets-cdn.github.com/pinned-octocat.svg" color="#000000">
<link rel="icon" type="image/x-icon" class="js-site-favicon" href="https://assets-cdn.github.com/favicon.ico">
<meta name="theme-color" content="#1e2327">
<meta name="u2f-support" content="true">
</head>
<body class="logged-out env-production page-blob">
<div class="position-relative js-header-wrapper ">
<a href="#start-of-content" tabindex="1" class="px-2 py-4 show-on-focus js-skip-to-content">Skip to content</a>
<div id="js-pjax-loader-bar" class="pjax-loader-bar"><div class="progress"></div></div>
<header class="Header header-logged-out position-relative f4 py-3" role="banner">
<div class="container-lg d-flex px-3">
<div class="d-flex flex-justify-between flex-items-center">
<a class="header-logo-invertocat my-0" href="https://github.com/" aria-label="Homepage" data-ga-click="(Logged out) Header, go to homepage, icon:logo-wordmark">
<svg aria-hidden="true" class="octicon octicon-mark-github" height="32" version="1.1" viewBox="0 0 16 16" width="32"><path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"/></svg>
</a>
</div>
<div class="HeaderMenu HeaderMenu--bright d-flex flex-justify-between flex-auto">
<nav class="mt-0">
<ul class="d-flex list-style-none">
<li class="ml-2">
<a href="/features" class="js-selected-navigation-item HeaderNavlink px-0 py-2 m-0" data-ga-click="Header, click, Nav menu - item:features" data-selected-links="/features /features/project-management /features/code-review /features/project-management /features/integrations /features">
Features
</a> </li>
<li class="ml-4">
<a href="/business" class="js-selected-navigation-item HeaderNavlink px-0 py-2 m-0" data-ga-click="Header, click, Nav menu - item:business" data-selected-links="/business /business/security /business/customers /business">
Business
</a> </li>
<li class="ml-4">
<a href="/explore" class="js-selected-navigation-item HeaderNavlink px-0 py-2 m-0" data-ga-click="Header, click, Nav menu - item:explore" data-selected-links="/explore /trending /trending/developers /integrations /integrations/feature/code /integrations/feature/collaborate /integrations/feature/ship showcases showcases_search showcases_landing /explore">
Explore
</a> </li>
<li class="ml-4">
<a href="/marketplace" class="js-selected-navigation-item HeaderNavlink px-0 py-2 m-0" data-ga-click="Header, click, Nav menu - item:marketplace" data-selected-links=" /marketplace">
Marketplace
</a> </li>
<li class="ml-4">
<a href="/pricing" class="js-selected-navigation-item HeaderNavlink px-0 py-2 m-0" data-ga-click="Header, click, Nav menu - item:pricing" data-selected-links="/pricing /pricing/developer /pricing/team /pricing/business-hosted /pricing/business-enterprise /pricing">
Pricing
</a> </li>
</ul>
</nav>
<div class="d-flex">
<div class="d-lg-flex flex-items-center mr-3">
<div class="header-search scoped-search site-scoped-search js-site-search" role="search">
<!-- '"` --><!-- </textarea></xmp> --></option></form><form accept-charset="UTF-8" action="/TomLous/coursera-exploratory-data-analysis-course-project-2/search" class="js-site-search-form" data-scoped-search-url="/TomLous/coursera-exploratory-data-analysis-course-project-2/search" data-unscoped-search-url="/search" method="get"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /></div>
<label class="form-control header-search-wrapper js-chromeless-input-container">
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/blob/master/plot1.R" class="header-search-scope no-underline">This repository</a>
<input type="text"
class="form-control header-search-input js-site-search-focus js-site-search-field is-clearable"
data-hotkey="s"
name="q"
value=""
placeholder="Search"
aria-label="Search this repository"
data-unscoped-placeholder="Search GitHub"
data-scoped-placeholder="Search"
autocapitalize="off">
<input type="hidden" class="js-site-search-type-field" name="type" >
</label>
</form></div>
</div>
<span class="d-inline-block">
<div class="HeaderNavlink px-0 py-2 m-0">
<a class="text-bold text-white no-underline" href="/login?return_to=%2FTomLous%2Fcoursera-exploratory-data-analysis-course-project-2%2Fblob%2Fmaster%2Fplot1.R" data-ga-click="(Logged out) Header, clicked Sign in, text:sign-in">Sign in</a>
<span class="text-gray">or</span>
<a class="text-bold text-white no-underline" href="/join?source=header-repo" data-ga-click="(Logged out) Header, clicked Sign up, text:sign-up">Sign up</a>
</div>
</span>
</div>
</div>
</div>
</header>
</div>
<div id="start-of-content" class="show-on-focus"></div>
<div id="js-flash-container">
</div>
<div role="main" class="application-main ">
<div itemscope itemtype="http://schema.org/SoftwareSourceCode" class="">
<div id="js-repo-pjax-container" data-pjax-container >
<div class="pagehead repohead instapaper_ignore readability-menu experiment-repo-nav ">
<div class="repohead-details-container clearfix container">
<ul class="pagehead-actions">
<li>
<a href="/login?return_to=%2FTomLous%2Fcoursera-exploratory-data-analysis-course-project-2"
class="btn btn-sm btn-with-count tooltipped tooltipped-n"
aria-label="You must be signed in to watch a repository" rel="nofollow">
<svg aria-hidden="true" class="octicon octicon-eye" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M8.06 2C3 2 0 8 0 8s3 6 8.06 6C13 14 16 8 16 8s-3-6-7.94-6zM8 12c-2.2 0-4-1.78-4-4 0-2.2 1.8-4 4-4 2.22 0 4 1.8 4 4 0 2.22-1.78 4-4 4zm2-4c0 1.11-.89 2-2 2-1.11 0-2-.89-2-2 0-1.11.89-2 2-2 1.11 0 2 .89 2 2z"/></svg>
Watch
</a>
<a class="social-count" href="/TomLous/coursera-exploratory-data-analysis-course-project-2/watchers"
aria-label="2 users are watching this repository">
2
</a>
</li>
<li>
<a href="/login?return_to=%2FTomLous%2Fcoursera-exploratory-data-analysis-course-project-2"
class="btn btn-sm btn-with-count tooltipped tooltipped-n"
aria-label="You must be signed in to star a repository" rel="nofollow">
<svg aria-hidden="true" class="octicon octicon-star" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path fill-rule="evenodd" d="M14 6l-4.9-.64L7 1 4.9 5.36 0 6l3.6 3.26L2.67 14 7 11.67 11.33 14l-.93-4.74z"/></svg>
Star
</a>
<a class="social-count js-social-count" href="/TomLous/coursera-exploratory-data-analysis-course-project-2/stargazers"
aria-label="1 user starred this repository">
1
</a>
</li>
<li>
<a href="/login?return_to=%2FTomLous%2Fcoursera-exploratory-data-analysis-course-project-2"
class="btn btn-sm btn-with-count tooltipped tooltipped-n"
aria-label="You must be signed in to fork a repository" rel="nofollow">
<svg aria-hidden="true" class="octicon octicon-repo-forked" height="16" version="1.1" viewBox="0 0 10 16" width="10"><path fill-rule="evenodd" d="M8 1a1.993 1.993 0 0 0-1 3.72V6L5 8 3 6V4.72A1.993 1.993 0 0 0 2 1a1.993 1.993 0 0 0-1 3.72V6.5l3 3v1.78A1.993 1.993 0 0 0 5 15a1.993 1.993 0 0 0 1-3.72V9.5l3-3V4.72A1.993 1.993 0 0 0 8 1zM2 4.2C1.34 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3 10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3-10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"/></svg>
Fork
</a>
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/network" class="social-count"
aria-label="109 users forked this repository">
109
</a>
</li>
</ul>
<h1 class="public ">
<svg aria-hidden="true" class="octicon octicon-repo" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M4 9H3V8h1v1zm0-3H3v1h1V6zm0-2H3v1h1V4zm0-2H3v1h1V2zm8-1v12c0 .55-.45 1-1 1H6v2l-1.5-1.5L3 16v-2H1c-.55 0-1-.45-1-1V1c0-.55.45-1 1-1h10c.55 0 1 .45 1 1zm-1 10H1v2h2v-1h3v1h5v-2zm0-10H2v9h9V1z"/></svg>
<span class="author" itemprop="author"><a href="/TomLous" class="url fn" rel="author">TomLous</a></span><!--
--><span class="path-divider">/</span><!--
--><strong itemprop="name"><a href="/TomLous/coursera-exploratory-data-analysis-course-project-2" data-pjax="#js-repo-pjax-container">coursera-exploratory-data-analysis-course-project-2</a></strong>
</h1>
</div>
<nav class="reponav js-repo-nav js-sidenav-container-pjax container"
itemscope
itemtype="http://schema.org/BreadcrumbList"
role="navigation"
data-pjax="#js-repo-pjax-container">
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2" class="js-selected-navigation-item selected reponav-item" data-hotkey="g c" data-selected-links="repo_source repo_downloads repo_commits repo_releases repo_tags repo_branches repo_packages /TomLous/coursera-exploratory-data-analysis-course-project-2" itemprop="url">
<svg aria-hidden="true" class="octicon octicon-code" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path fill-rule="evenodd" d="M9.5 3L8 4.5 11.5 8 8 11.5 9.5 13 14 8 9.5 3zm-5 0L0 8l4.5 5L6 11.5 2.5 8 6 4.5 4.5 3z"/></svg>
<span itemprop="name">Code</span>
<meta itemprop="position" content="1">
</a> </span>
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/issues" class="js-selected-navigation-item reponav-item" data-hotkey="g i" data-selected-links="repo_issues repo_labels repo_milestones /TomLous/coursera-exploratory-data-analysis-course-project-2/issues" itemprop="url">
<svg aria-hidden="true" class="octicon octicon-issue-opened" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path fill-rule="evenodd" d="M7 2.3c3.14 0 5.7 2.56 5.7 5.7s-2.56 5.7-5.7 5.7A5.71 5.71 0 0 1 1.3 8c0-3.14 2.56-5.7 5.7-5.7zM7 1C3.14 1 0 4.14 0 8s3.14 7 7 7 7-3.14 7-7-3.14-7-7-7zm1 3H6v5h2V4zm0 6H6v2h2v-2z"/></svg>
<span itemprop="name">Issues</span>
<span class="Counter">0</span>
<meta itemprop="position" content="2">
</a> </span>
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/pulls" class="js-selected-navigation-item reponav-item" data-hotkey="g p" data-selected-links="repo_pulls /TomLous/coursera-exploratory-data-analysis-course-project-2/pulls" itemprop="url">
<svg aria-hidden="true" class="octicon octicon-git-pull-request" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M11 11.28V5c-.03-.78-.34-1.47-.94-2.06C9.46 2.35 8.78 2.03 8 2H7V0L4 3l3 3V4h1c.27.02.48.11.69.31.21.2.3.42.31.69v6.28A1.993 1.993 0 0 0 10 15a1.993 1.993 0 0 0 1-3.72zm-1 2.92c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zM4 3c0-1.11-.89-2-2-2a1.993 1.993 0 0 0-1 3.72v6.56A1.993 1.993 0 0 0 2 15a1.993 1.993 0 0 0 1-3.72V4.72c.59-.34 1-.98 1-1.72zm-.8 10c0 .66-.55 1.2-1.2 1.2-.65 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2zM2 4.2C1.34 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"/></svg>
<span itemprop="name">Pull requests</span>
<span class="Counter">0</span>
<meta itemprop="position" content="3">
</a> </span>
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/projects" class="js-selected-navigation-item reponav-item" data-hotkey="g b" data-selected-links="repo_projects new_repo_project repo_project /TomLous/coursera-exploratory-data-analysis-course-project-2/projects">
<svg aria-hidden="true" class="octicon octicon-project" height="16" version="1.1" viewBox="0 0 15 16" width="15"><path fill-rule="evenodd" d="M10 12h3V2h-3v10zm-4-2h3V2H6v8zm-4 4h3V2H2v12zm-1 1h13V1H1v14zM14 0H1a1 1 0 0 0-1 1v14a1 1 0 0 0 1 1h13a1 1 0 0 0 1-1V1a1 1 0 0 0-1-1z"/></svg>
Projects
<span class="Counter" >0</span>
</a>
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/pulse" class="js-selected-navigation-item reponav-item" data-selected-links="repo_graphs repo_contributors dependency_graph pulse /TomLous/coursera-exploratory-data-analysis-course-project-2/pulse">
<svg aria-hidden="true" class="octicon octicon-graph" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M16 14v1H0V0h1v14h15zM5 13H3V8h2v5zm4 0H7V3h2v10zm4 0h-2V6h2v7z"/></svg>
Insights
</a>
</nav>
</div>
<div class="container new-discussion-timeline experiment-repo-nav ">
<div class="repository-content ">
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/blob/1e065c8529a9f143621378c3748eac6a4f74afc5/plot1.R" class="d-none js-permalink-shortcut" data-hotkey="y">Permalink</a>
<!-- blob contrib key: blob_contributors:v21:6f1b29b259ff64a69d0e995bc1ef9096 -->
<div class="file-navigation js-zeroclipboard-container">
<div class="select-menu branch-select-menu js-menu-container js-select-menu float-left">
<button class=" btn btn-sm select-menu-button js-menu-target css-truncate" data-hotkey="w"
type="button" aria-label="Switch branches or tags" aria-expanded="false" aria-haspopup="true">
<i>Branch:</i>
<span class="js-select-button css-truncate-target">master</span>
</button>
<div class="select-menu-modal-holder js-menu-content js-navigation-container" data-pjax>
<div class="select-menu-modal">
<div class="select-menu-header">
<svg aria-label="Close" class="octicon octicon-x js-menu-close" height="16" role="img" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48z"/></svg>
<span class="select-menu-title">Switch branches/tags</span>
</div>
<div class="select-menu-filters">
<div class="select-menu-text-filter">
<input type="text" aria-label="Filter branches/tags" id="context-commitish-filter-field" class="form-control js-filterable-field js-navigation-enable" placeholder="Filter branches/tags">
</div>
<div class="select-menu-tabs">
<ul>
<li class="select-menu-tab">
<a href="#" data-tab-filter="branches" data-filter-placeholder="Filter branches/tags" class="js-select-menu-tab" role="tab">Branches</a>
</li>
<li class="select-menu-tab">
<a href="#" data-tab-filter="tags" data-filter-placeholder="Find a tag…" class="js-select-menu-tab" role="tab">Tags</a>
</li>
</ul>
</div>
</div>
<div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="branches" role="menu">
<div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
<a class="select-menu-item js-navigation-item js-navigation-open selected"
href="/TomLous/coursera-exploratory-data-analysis-course-project-2/blob/master/plot1.R"
data-name="master"
data-skip-pjax="true"
rel="nofollow">
<svg aria-hidden="true" class="octicon octicon-check select-menu-item-icon" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
master
</span>
</a>
</div>
<div class="select-menu-no-results">Nothing to show</div>
</div>
<div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="tags">
<div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
</div>
<div class="select-menu-no-results">Nothing to show</div>
</div>
</div>
</div>
</div>
<div class="BtnGroup float-right">
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/find/master"
class="js-pjax-capture-input btn btn-sm BtnGroup-item"
data-pjax
data-hotkey="t">
Find file
</a>
<button aria-label="Copy file path to clipboard" class="js-zeroclipboard btn btn-sm BtnGroup-item tooltipped tooltipped-s" data-copied-hint="Copied!" type="button">Copy path</button>
</div>
<div class="breadcrumb js-zeroclipboard-target">
<span class="repo-root js-repo-root"><span class="js-path-segment"><a href="/TomLous/coursera-exploratory-data-analysis-course-project-2" data-pjax="true"><span>coursera-exploratory-data-analysis-course-project-2</span></a></span></span><span class="separator">/</span><strong class="final-path">plot1.R</strong>
</div>
</div>
<div class="commit-tease">
<span class="float-right">
<a class="commit-tease-sha" href="/TomLous/coursera-exploratory-data-analysis-course-project-2/commit/1949a42fad4e79022eab30e82ad6cba319935ea3" data-pjax>
1949a42
</a>
<relative-time datetime="2014-07-27T13:30:31Z">Jul 27, 2014</relative-time>
</span>
<div>
<img alt="@TomLous" class="avatar" height="20" src="https://avatars3.githubusercontent.com/u/2259971?s=40&v=4" width="20" />
<a href="/TomLous" class="user-mention" rel="author">TomLous</a>
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/commit/1949a42fad4e79022eab30e82ad6cba319935ea3" class="message" data-pjax="true" title="Plot1 & 2">Plot1 & 2</a>
</div>
<div class="commit-tease-contributors">
<button type="button" class="btn-link muted-link contributors-toggle" data-facebox="#blob_contributors_box">
<strong>1</strong>
contributor
</button>
</div>
<div id="blob_contributors_box" style="display:none">
<h2 class="facebox-header" data-facebox-id="facebox-header">Users who have contributed to this file</h2>
<ul class="facebox-user-list" data-facebox-id="facebox-description">
<li class="facebox-user-list-item">
<img alt="@TomLous" height="24" src="https://avatars2.githubusercontent.com/u/2259971?s=48&v=4" width="24" />
<a href="/TomLous">TomLous</a>
</li>
</ul>
</div>
</div>
<div class="file">
<div class="file-header">
<div class="file-actions">
<div class="BtnGroup">
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/raw/master/plot1.R" class="btn btn-sm BtnGroup-item" id="raw-url">Raw</a>
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/blame/master/plot1.R" class="btn btn-sm js-update-url-with-hash BtnGroup-item" data-hotkey="b">Blame</a>
<a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/commits/master/plot1.R" class="btn btn-sm BtnGroup-item" rel="nofollow">History</a>
</div>
<a class="btn-octicon tooltipped tooltipped-nw"
href="https://desktop.github.com"
aria-label="Open this file in GitHub Desktop"
data-ga-click="Repository, open with desktop, type:windows">
<svg aria-hidden="true" class="octicon octicon-device-desktop" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M15 2H1c-.55 0-1 .45-1 1v9c0 .55.45 1 1 1h5.34c-.25.61-.86 1.39-2.34 2h8c-1.48-.61-2.09-1.39-2.34-2H15c.55 0 1-.45 1-1V3c0-.55-.45-1-1-1zm0 9H1V3h14v8z"/></svg>
</a>
<button type="button" class="btn-octicon disabled tooltipped tooltipped-nw"
aria-label="You must be signed in to make or propose changes">
<svg aria-hidden="true" class="octicon octicon-pencil" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path fill-rule="evenodd" d="M0 12v3h3l8-8-3-3-8 8zm3 2H1v-2h1v1h1v1zm10.3-9.3L12 6 9 3l1.3-1.3a.996.996 0 0 1 1.41 0l1.59 1.59c.39.39.39 1.02 0 1.41z"/></svg>
</button>
<button type="button" class="btn-octicon btn-octicon-danger disabled tooltipped tooltipped-nw"
aria-label="You must be signed in to make or propose changes">
<svg aria-hidden="true" class="octicon octicon-trashcan" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M11 2H9c0-.55-.45-1-1-1H5c-.55 0-1 .45-1 1H2c-.55 0-1 .45-1 1v1c0 .55.45 1 1 1v9c0 .55.45 1 1 1h7c.55 0 1-.45 1-1V5c.55 0 1-.45 1-1V3c0-.55-.45-1-1-1zm-1 12H3V5h1v8h1V5h1v8h1V5h1v8h1V5h1v9zm1-10H2V3h9v1z"/></svg>
</button>
</div>
<div class="file-info">
16 lines (14 sloc)
<span class="file-info-divider"></span>
746 Bytes
</div>
</div>
<div itemprop="text" class="blob-wrapper data type-r">
<table class="highlight tab-size js-file-line-container" data-tab-size="8">
<tr>
<td id="L1" class="blob-num js-line-number" data-line-number="1"></td>
<td id="LC1" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span># This first line will likely take a few seconds. Be patient!</span></td>
</tr>
<tr>
<td id="L2" class="blob-num js-line-number" data-line-number="2"></td>
<td id="LC2" class="blob-code blob-code-inner js-file-line"><span class="pl-k">if</span>(<span class="pl-k">!</span>exists(<span class="pl-s"><span class="pl-pds">"</span>NEI<span class="pl-pds">"</span></span>)){</td>
</tr>
<tr>
<td id="L3" class="blob-num js-line-number" data-line-number="3"></td>
<td id="LC3" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">NEI</span> <span class="pl-k"><-</span> readRDS(<span class="pl-s"><span class="pl-pds">"</span>./data/summarySCC_PM25.rds<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L4" class="blob-num js-line-number" data-line-number="4"></td>
<td id="LC4" class="blob-code blob-code-inner js-file-line">}</td>
</tr>
<tr>
<td id="L5" class="blob-num js-line-number" data-line-number="5"></td>
<td id="LC5" class="blob-code blob-code-inner js-file-line"><span class="pl-k">if</span>(<span class="pl-k">!</span>exists(<span class="pl-s"><span class="pl-pds">"</span>SCC<span class="pl-pds">"</span></span>)){</td>
</tr>
<tr>
<td id="L6" class="blob-num js-line-number" data-line-number="6"></td>
<td id="LC6" class="blob-code blob-code-inner js-file-line"> <span class="pl-smi">SCC</span> <span class="pl-k"><-</span> readRDS(<span class="pl-s"><span class="pl-pds">"</span>./data/Source_Classification_Code.rds<span class="pl-pds">"</span></span>)</td>
</tr>
<tr>
<td id="L7" class="blob-num js-line-number" data-line-number="7"></td>
<td id="LC7" class="blob-code blob-code-inner js-file-line">}</td>
</tr>
<tr>
<td id="L8" class="blob-num js-line-number" data-line-number="8"></td>
<td id="LC8" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span> Have total emissions from PM2.5 decreased in the United States from 1999 to 2008? </span></td>
</tr>
<tr>
<td id="L9" class="blob-num js-line-number" data-line-number="9"></td>
<td id="LC9" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span> Using the base plotting system, make a plot showing the total PM2.5 emission from all sources </span></td>
</tr>
<tr>
<td id="L10" class="blob-num js-line-number" data-line-number="10"></td>
<td id="LC10" class="blob-code blob-code-inner js-file-line"><span class="pl-c"><span class="pl-c">#</span> for each of the years 1999, 2002, 2005, and 2008.</span></td>
</tr>
<tr>
<td id="L11" class="blob-num js-line-number" data-line-number="11"></td>
<td id="LC11" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L12" class="blob-num js-line-number" data-line-number="12"></td>
<td id="LC12" class="blob-code blob-code-inner js-file-line"><span class="pl-smi">aggregatedTotalByYear</span> <span class="pl-k"><-</span> aggregate(<span class="pl-smi">Emissions</span> <span class="pl-k">~</span> <span class="pl-smi">year</span>, <span class="pl-smi">NEI</span>, <span class="pl-smi">sum</span>)</td>
</tr>
<tr>
<td id="L13" class="blob-num js-line-number" data-line-number="13"></td>
<td id="LC13" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L14" class="blob-num js-line-number" data-line-number="14"></td>
<td id="LC14" class="blob-code blob-code-inner js-file-line">png(<span class="pl-s"><span class="pl-pds">'</span>plot1.png<span class="pl-pds">'</span></span>)</td>
</tr>
<tr>
<td id="L15" class="blob-num js-line-number" data-line-number="15"></td>
<td id="LC15" class="blob-code blob-code-inner js-file-line">barplot(<span class="pl-v">height</span><span class="pl-k">=</span><span class="pl-smi">aggregatedTotalByYear</span><span class="pl-k">$</span><span class="pl-smi">Emissions</span>, <span class="pl-v">names.arg</span><span class="pl-k">=</span><span class="pl-smi">aggregatedTotalByYear</span><span class="pl-k">$</span><span class="pl-smi">year</span>, <span class="pl-v">xlab</span><span class="pl-k">=</span><span class="pl-s"><span class="pl-pds">"</span>years<span class="pl-pds">"</span></span>, <span class="pl-v">ylab</span><span class="pl-k">=</span>expression(<span class="pl-s"><span class="pl-pds">'</span>total PM<span class="pl-pds">'</span></span>[<span class="pl-c1">2.5</span>]<span class="pl-k">*</span><span class="pl-s"><span class="pl-pds">'</span> emission<span class="pl-pds">'</span></span>),<span class="pl-v">main</span><span class="pl-k">=</span>expression(<span class="pl-s"><span class="pl-pds">'</span>Total PM<span class="pl-pds">'</span></span>[<span class="pl-c1">2.5</span>]<span class="pl-k">*</span><span class="pl-s"><span class="pl-pds">'</span> emissions at various years<span class="pl-pds">'</span></span>))</td>
</tr>
<tr>
<td id="L16" class="blob-num js-line-number" data-line-number="16"></td>
<td id="LC16" class="blob-code blob-code-inner js-file-line">dev.off()</td>
</tr>
</table>
<div class="BlobToolbar position-absolute js-file-line-actions dropdown js-menu-container js-select-menu d-none" aria-hidden="true">
<button class="btn-octicon ml-0 px-2 p-0 bg-white border border-gray-dark rounded-1 dropdown-toggle js-menu-target" id="js-file-line-action-button" type="button" aria-expanded="false" aria-haspopup="true" aria-label="Inline file action toolbar" aria-controls="inline-file-actions">
<svg aria-hidden="true" class="octicon octicon-kebab-horizontal" height="16" version="1.1" viewBox="0 0 13 16" width="13"><path fill-rule="evenodd" d="M1.5 9a1.5 1.5 0 1 1 0-3 1.5 1.5 0 0 1 0 3zm5 0a1.5 1.5 0 1 1 0-3 1.5 1.5 0 0 1 0 3zm5 0a1.5 1.5 0 1 1 0-3 1.5 1.5 0 0 1 0 3z"/></svg>
</button>
<div class="dropdown-menu-content js-menu-content" id="inline-file-actions">
<ul class="BlobToolbar-dropdown dropdown-menu dropdown-menu-se mt-2">
<li><a class="js-zeroclipboard dropdown-item" style="cursor:pointer;" id="js-copy-lines" data-original-text="Copy lines">Copy lines</a></li>
<li><a class="js-zeroclipboard dropdown-item" id= "js-copy-permalink" style="cursor:pointer;" data-original-text="Copy permalink">Copy permalink</a></li>
<li><a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/blame/1e065c8529a9f143621378c3748eac6a4f74afc5/plot1.R" class="dropdown-item js-update-url-with-hash" id="js-view-git-blame">View git blame</a></li>
<li><a href="/TomLous/coursera-exploratory-data-analysis-course-project-2/issues/new" class="dropdown-item" id="js-new-issue">Open new issue</a></li>
</ul>
</div>
</div>
</div>
</div>
<button type="button" data-facebox="#jump-to-line" data-facebox-class="linejump" data-hotkey="l" class="d-none">Jump to Line</button>
<div id="jump-to-line" style="display:none">
<!-- '"` --><!-- </textarea></xmp> --></option></form><form accept-charset="UTF-8" action="" class="js-jump-to-line-form" method="get"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /></div>
<input class="form-control linejump-input js-jump-to-line-field" type="text" placeholder="Jump to line…" aria-label="Jump to line" autofocus>
<button type="submit" class="btn">Go</button>
</form> </div>
</div>
<div class="modal-backdrop js-touch-events"></div>
</div>
</div>
</div>
</div>
<div class="footer container-lg px-3" role="contentinfo">
<div class="position-relative d-flex flex-justify-between py-6 mt-6 f6 text-gray border-top border-gray-light ">
<ul class="list-style-none d-flex flex-wrap ">
<li class="mr-3">© 2018 <span title="0.13419s from unicorn-1768575-94zlz">GitHub</span>, Inc.</li>
<li class="mr-3"><a href="https://github.com/site/terms" data-ga-click="Footer, go to terms, text:terms">Terms</a></li>
<li class="mr-3"><a href="https://github.com/site/privacy" data-ga-click="Footer, go to privacy, text:privacy">Privacy</a></li>
<li class="mr-3"><a href="https://github.com/security" data-ga-click="Footer, go to security, text:security">Security</a></li>
<li class="mr-3"><a href="https://status.github.com/" data-ga-click="Footer, go to status, text:status">Status</a></li>
<li><a href="https://help.github.com" data-ga-click="Footer, go to help, text:help">Help</a></li>
</ul>
<a href="https://github.com" aria-label="Homepage" class="footer-octicon" title="GitHub">
<svg aria-hidden="true" class="octicon octicon-mark-github" height="24" version="1.1" viewBox="0 0 16 16" width="24"><path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"/></svg>
</a>
<ul class="list-style-none d-flex flex-wrap ">
<li class="mr-3"><a href="https://github.com/contact" data-ga-click="Footer, go to contact, text:contact">Contact GitHub</a></li>
<li class="mr-3"><a href="https://developer.github.com" data-ga-click="Footer, go to api, text:api">API</a></li>
<li class="mr-3"><a href="https://training.github.com" data-ga-click="Footer, go to training, text:training">Training</a></li>
<li class="mr-3"><a href="https://shop.github.com" data-ga-click="Footer, go to shop, text:shop">Shop</a></li>
<li class="mr-3"><a href="https://github.com/blog" data-ga-click="Footer, go to blog, text:blog">Blog</a></li>
<li><a href="https://github.com/about" data-ga-click="Footer, go to about, text:about">About</a></li>
</ul>
</div>
</div>
<div id="ajax-error-message" class="ajax-error-message flash flash-error">
<svg aria-hidden="true" class="octicon octicon-alert" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M8.865 1.52c-.18-.31-.51-.5-.87-.5s-.69.19-.87.5L.275 13.5c-.18.31-.18.69 0 1 .19.31.52.5.87.5h13.7c.36 0 .69-.19.86-.5.17-.31.18-.69.01-1L8.865 1.52zM8.995 13h-2v-2h2v2zm0-3h-2V6h2v4z"/></svg>
<button type="button" class="flash-close js-ajax-error-dismiss" aria-label="Dismiss error">
<svg aria-hidden="true" class="octicon octicon-x" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48z"/></svg>
</button>
You can't perform that action at this time.
</div>
<script crossorigin="anonymous" integrity="sha512-XMGJvyy1pIQdZi6FwfzPUDXHfItIkA7EL3jK0uSro6JSF0Tp76YxJNtflJlhbeQxOHaIj144gWd+J2ZmFUgFiQ==" src="https://assets-cdn.github.com/assets/frameworks-5cc189bf2cb5.js" type="application/javascript"></script>
<script async="async" crossorigin="anonymous" integrity="sha512-rLONI7YsyG4/uJsXApnqQsgsr0HKCVks4R8350jML9lNwjs9QLMu1O0aAAGCY0/i23WmiMEFB/xMBfvzEfKx8A==" src="https://assets-cdn.github.com/assets/github-acb38d23b62c.js" type="application/javascript"></script>
<div class="js-stale-session-flash stale-session-flash flash flash-warn flash-banner d-none">
<svg aria-hidden="true" class="octicon octicon-alert" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M8.865 1.52c-.18-.31-.51-.5-.87-.5s-.69.19-.87.5L.275 13.5c-.18.31-.18.69 0 1 .19.31.52.5.87.5h13.7c.36 0 .69-.19.86-.5.17-.31.18-.69.01-1L8.865 1.52zM8.995 13h-2v-2h2v2zm0-3h-2V6h2v4z"/></svg>
<span class="signed-in-tab-flash">You signed in with another tab or window. <a href="">Reload</a> to refresh your session.</span>
<span class="signed-out-tab-flash">You signed out in another tab or window. <a href="">Reload</a> to refresh your session.</span>
</div>
<div class="facebox" id="facebox" style="display:none;">
<div class="facebox-popup">
<div class="facebox-content" role="dialog" aria-labelledby="facebox-header" aria-describedby="facebox-description">
</div>
<button type="button" class="facebox-close js-facebox-close" aria-label="Close modal">
<svg aria-hidden="true" class="octicon octicon-x" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48z"/></svg>
</button>
</div>
</div>
</body>
</html>
|
library(rgdal)
library(rgeos)
library(tidyverse)
library(tmap)
library(leaflet)
library(sf)
library(RColorBrewer)
#set working directory
setwd("~/Desktop/UChicago Harris/Spring 2019/GIS & Spatial Analysis /Assignment 2")
#load tract data - 2015
census.data <- read.csv("NJ Tract ACS_15_5YR_DP03/ACS_15_5YR_DP03.csv")
#load county data - 2015
county.census.data <- read.csv("NJ County ACS_15_5YR_DP03/ACS_15_5YR_DP03.csv")
#load tract shapefile
output.area <- readOGR(".", "NJ_Tract")
#load county shapefile
county.output.area <- output.area <- readOGR(".", "NJ_County")
plot(output.area)
#merge tract data
NJ <- merge(output.area, census.data, by.x="GEOID", by.y="GEO.id2")
#merge county data
NJ_County <- merge(county.output.area, county.census.data, by.x="FIPSSTCO", by.y="GEO.id2")
head(NJ_County@data)
#simple chloropleth map by tract median household income
qtm(NJ, fill = "HC01_VC85")
#simple chloropleth map by county median household income
qtm(NJ_County, fill = "HC01_VC85")
# quantilemap of tract median income
MedIncome2015_quant <-
tm_shape(NJ) +
tm_fill("HC01_VC85",
pallette = "Greens",
style = "quantile",
title = "Tract Median Income") +
tm_borders(alpha=.3) +
tm_compass() +
tm_layout(legend.text.size = 0.9,
legend.title.size = 1.0,
legend.outside = TRUE,
legend.position = c("right", "top"),
frame = FALSE)
MedIncome2015_quant
# natural breaks map of tract median income
MedIncome2015_tracts <-
tm_shape(NJ) +
tm_fill("HC01_VC85",
pallette = "Greens",
style = "jenks",
title = "Tract Median Income") +
tm_borders(alpha=.3) +
tm_layout(legend.text.size = 0.9,
legend.title.size = 1.0,
legend.outside = TRUE,
legend.position = c("right", "top"),
frame = FALSE)
MedIncome2015_tracts
# natural breaks map of county median income
MedIncome2015_county <-
tm_shape(NJ_County) +
tm_fill("HC01_VC85",
pallette = "Greens",
style = "jenks",
title = "County Median Income") +
tm_borders(alpha=.3) +
tm_layout(legend.text.size = 0.9,
legend.title.size = 1.0,
legend.outside = TRUE,
legend.position = c("right", "top"),
frame = FALSE)
MedIncome2015_county
# view interactive maps side-by-side
current.mode <- tmap_mode("view")
tmap_arrange(MedIncome2015_county, MedIncome2015_tracts)
| /Assignment2 - NJ Median Income.R | no_license | spaykin/NJ-Income-Mapping | R | false | false | 2,487 | r | library(rgdal)
library(rgeos)
library(tidyverse)
library(tmap)
library(leaflet)
library(sf)
library(RColorBrewer)
#set working directory
setwd("~/Desktop/UChicago Harris/Spring 2019/GIS & Spatial Analysis /Assignment 2")
#load tract data - 2015
census.data <- read.csv("NJ Tract ACS_15_5YR_DP03/ACS_15_5YR_DP03.csv")
#load county data - 2015
county.census.data <- read.csv("NJ County ACS_15_5YR_DP03/ACS_15_5YR_DP03.csv")
#load tract shapefile
output.area <- readOGR(".", "NJ_Tract")
#load county shapefile
county.output.area <- output.area <- readOGR(".", "NJ_County")
plot(output.area)
#merge tract data
NJ <- merge(output.area, census.data, by.x="GEOID", by.y="GEO.id2")
#merge county data
NJ_County <- merge(county.output.area, county.census.data, by.x="FIPSSTCO", by.y="GEO.id2")
head(NJ_County@data)
#simple chloropleth map by tract median household income
qtm(NJ, fill = "HC01_VC85")
#simple chloropleth map by county median household income
qtm(NJ_County, fill = "HC01_VC85")
# quantilemap of tract median income
MedIncome2015_quant <-
tm_shape(NJ) +
tm_fill("HC01_VC85",
pallette = "Greens",
style = "quantile",
title = "Tract Median Income") +
tm_borders(alpha=.3) +
tm_compass() +
tm_layout(legend.text.size = 0.9,
legend.title.size = 1.0,
legend.outside = TRUE,
legend.position = c("right", "top"),
frame = FALSE)
MedIncome2015_quant
# natural breaks map of tract median income
MedIncome2015_tracts <-
tm_shape(NJ) +
tm_fill("HC01_VC85",
pallette = "Greens",
style = "jenks",
title = "Tract Median Income") +
tm_borders(alpha=.3) +
tm_layout(legend.text.size = 0.9,
legend.title.size = 1.0,
legend.outside = TRUE,
legend.position = c("right", "top"),
frame = FALSE)
MedIncome2015_tracts
# natural breaks map of county median income
MedIncome2015_county <-
tm_shape(NJ_County) +
tm_fill("HC01_VC85",
pallette = "Greens",
style = "jenks",
title = "County Median Income") +
tm_borders(alpha=.3) +
tm_layout(legend.text.size = 0.9,
legend.title.size = 1.0,
legend.outside = TRUE,
legend.position = c("right", "top"),
frame = FALSE)
MedIncome2015_county
# view interactive maps side-by-side
current.mode <- tmap_mode("view")
tmap_arrange(MedIncome2015_county, MedIncome2015_tracts)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/table.Distributions.R
\name{table.Distributions}
\alias{table.Distributions}
\title{Distributions Summary: Statistics and Stylized Facts}
\usage{
table.Distributions(R, scale = NA, digits = 4)
}
\arguments{
\item{R}{an xts, vector, matrix, data frame, timeSeries or zoo object of
asset returns}
\item{scale}{number of periods in a year (daily scale = 252, monthly scale =
12, quarterly scale = 4)}
\item{digits}{number of digits to round results to}
}
\description{
Table of Monthly standard deviation, Skewness, Sample standard deviation,
Kurtosis, Excess kurtosis, Sample Skweness and Sample excess kurtosis
}
\examples{
data(managers)
table.Distributions(managers[,1:8])
require("Hmisc")
result = t(table.Distributions(managers[,1:8]))
textplot(format.df(result, na.blank=TRUE, numeric.dollar=FALSE, cdec=c(3,3,1)),
rmar = 0.8, cmar = 2, max.cex=.9, halign = "center", valign = "top",
row.valign="center", wrap.rownames=20, wrap.colnames=10,
col.rownames=c("red", rep("darkgray",5), rep("orange",2)), mar = c(0,0,3,0)+0.1)
title(main="Portfolio Distributions statistics")
}
\author{
Matthieu Lestel
}
\references{
Carl Bacon, \emph{Practical portfolio performance measurement
and attribution}, second edition 2008 p.87
}
\seealso{
\code{\link{StdDev.annualized}} \cr \code{\link{skewness}} \cr
\code{\link{kurtosis}}
}
| /man/table.Distributions.Rd | no_license | tsbattman/PerformanceAnalytics | R | false | false | 1,414 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/table.Distributions.R
\name{table.Distributions}
\alias{table.Distributions}
\title{Distributions Summary: Statistics and Stylized Facts}
\usage{
table.Distributions(R, scale = NA, digits = 4)
}
\arguments{
\item{R}{an xts, vector, matrix, data frame, timeSeries or zoo object of
asset returns}
\item{scale}{number of periods in a year (daily scale = 252, monthly scale =
12, quarterly scale = 4)}
\item{digits}{number of digits to round results to}
}
\description{
Table of Monthly standard deviation, Skewness, Sample standard deviation,
Kurtosis, Excess kurtosis, Sample Skweness and Sample excess kurtosis
}
\examples{
data(managers)
table.Distributions(managers[,1:8])
require("Hmisc")
result = t(table.Distributions(managers[,1:8]))
textplot(format.df(result, na.blank=TRUE, numeric.dollar=FALSE, cdec=c(3,3,1)),
rmar = 0.8, cmar = 2, max.cex=.9, halign = "center", valign = "top",
row.valign="center", wrap.rownames=20, wrap.colnames=10,
col.rownames=c("red", rep("darkgray",5), rep("orange",2)), mar = c(0,0,3,0)+0.1)
title(main="Portfolio Distributions statistics")
}
\author{
Matthieu Lestel
}
\references{
Carl Bacon, \emph{Practical portfolio performance measurement
and attribution}, second edition 2008 p.87
}
\seealso{
\code{\link{StdDev.annualized}} \cr \code{\link{skewness}} \cr
\code{\link{kurtosis}}
}
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinythemes)
# Define UI for application that draws a histogram
ui <- navbarPage(theme = shinytheme("superhero"),
"Innovation Rates and College Characteristics",
tabPanel("Mapping Characteristics",
# Application title
titlePanel("Heat Map of Different College Characteristics"),
# Show a plot of the generated distribution
mainPanel(
plotOutput("plot3")
),
),
tabPanel("Linear Relationships",
fluidPage(
titlePanel("Correlations"),
sidebarLayout(
sidebarPanel(
selectInput(
"plot_type",
"College Characteristic",
c("Admissions Rate" = "a", "Average SAT Score" = "b",
"Size of Class" = "c", "Average Total Cost of Attendance" = "d",
"Average Faculty Salary" = "e")
)),
mainPanel(plotOutput("line_plot")))
)),
tabPanel("Regression and Prediction",
h1("Selectiveness and Inventiveness"),
fixedRow(
column(4,
p("The regression on the right shows that, quite intuitively,
the less selective a college is (marked by an increase in admissions
rate), the fewer share of inventors it has among its undergraduate population.
However, there could be many confounding factors that could influence
the results of this analysis.")),
column(5,
plotOutput("plot1", height = "100%"))
),
br(),
h1("Faculty Salary and Inventiveness"),
fixedRow(
column(4,
p("The regression on the right shows that quite clearly, as faculty salary
increases, the share of inventors increases as well - this is also quite intuitive.
Like before, however, we aren't sure if there are confounding factors that could
have affected this relationship.")),
column(5,
plotOutput("plot2", height = "100%"))
),
br(),
h1("Different SAT Subjects"),
fixedRow(
column(4,
p("This maps (in 3 colors), different SAT midpoint scores in reading, writing, and math.
The goal was to identify whether there was a difference in strength in subject area on
inventors. As is quite evident in the graph, there is too much noise for us to make any
conclusive judgments.")),
column(5,
plotOutput("plot4", height = "100%"))
)),
tabPanel("Discussion",
h2("Modeling"),
p("For my graphics, I chose to include several types: one showing trends between inventor
rates and various college characteristics (there is a dropdown menu in the Modeling panel
that allows someone to choose which characteristic they want to examine - example characteristics
for people to choose from include admission rates, average faculty salary, undergraduate
population size, average SAT score, and many more), one showing the regression of admissions
rates and inventor/patent rates, and one that shows datapoints of colleges around the US
that are covered in my dataset"),
h2("Map of Colleges"),
p("In these graphics, I display a map of the US, with different regions highlighted (filled) depending
on various characteristics - for example, total patents, inventor rates, etc. The goal is so
that prospective students can see various college characteristics by region. As expected, areas
near Silicon Valley and Boston are higher than the average in terms of total patents and
inventor rates, but the maps also depict several surprising results as well in terms of what
regions are hotspots for inventiveness."),
h2("Relationships between College Characteristics and Patent Rates"),
p("These graphics depict any linear relatinoships (if applicable) between different college characteristics
and patent rates, as calculated by Opportunity Insights. An individual is defined as an inventor if he or
she is listed on a patent application between 2001 and 2012 or grant between 1996 and 2014
(see Section II.B of the paper - “Mobility Report Cards: The Role of Colleges in Intergenerational Mobility.
The vast majority of characteristics seem to not really have any statistially significant correlations with
the share of inventors per college, although some characterisics do display rather counter-intuitive results."),
h2("Regression"),
p("Given the above, I now seek to actually test if there are causal relationships (using linear models and
logistic regressions) between specific college characteristics that I hypothesize could actually affect the
share of inventors that come out of a college. These results will give colleges insight into what
areas they should focus research on to improve the innovation and desire to build within their students."),
h2("Conclusion"),
p("Discussion of Final Results")),
tabPanel("About",
titlePanel("About This Project"),
h3("Project Background and Motivations"),
p("The goal of this project is to conduct a data-driven analysis on
the relationship between innovation rates by college (obtained from
Raj Chetty's Opportunity Insights) and institution level data obtained
from College Scorecard. With so much scrutiny going into whether college
is really worth it or not, the key factor that most researchers have
been examining is the earnings of students right out of college - this
is the most common barometer of success. However, having read Andrew Yang's
book 'Smart People Should Build Things', my main concern with the 'outcome'
of college is whether people are creating something of value - I define value
as higher innovation rates (which is measured through number of patents registered).
Throughout the project, I will run regressions on various data for innovation
rates (total number of patents granted to sstudents, share of inventors among
students, total number of patent citations obtained by students, etc.) and
various college characteristics (percentage of high income students, average
SAT/ACT score, percentage of degrees awarded in various fields)."),
h3("Dataset 1 Used"),
p("The first data source comes from Raj Chetty's Opportunity Insights data, which harnesses
the power of big data to create scalable policy solutions to restore the American Dream.
In the data source, the table presents estimates of students' patent rates by the college
they attended, where college is what place each child attends as the institution the child
attended for the greatest amount of time during the 4 calendar years in which the child
turned 19-22. The most important variable is called 'inventor' and represents the share of
inventors among students at each college. For more information about the data source and
other data on upward mobility, race and economic opportunity in the US, impact of neighborhoods,
and impacts of teacher, please visit this",
a("link.", href = "https://opportunityinsights.org/data/")),
h3("Dataset 2 Used"),
p("The other dataset I use is from the College Scorecard project - this is designed to increase
transparency and provide data to help students and families compare college costs and outcomes
as they weigh the tradeoffs of different colleges. The data I choose to focus on are data files
with data about institutions as a whole and include variables that map each college's name,
location, degree type, instituion revenue, academic areas offered, admissions rates,
SAT/ACT scores, and much more. The goal is to compare this dataset with innovation rates
measured at each college and see if there are correlations between characteristics of each
college and innovation rates. For more information about the College Scorecard project and
its data sources, please visit this",
a("link.", href = "https://collegescorecard.ed.gov/data/")),
h3("About Me"),
p("My name is Michael Chen and I'm a current sophomore at Harvard studying Applied Math and
Economics with a secondary in Government. As a co-founder of a biotech startup, I'm quite
intrigued by the innovation rates at colleges and how to increase this 'builder'
mentality among our students. My Github repo for this project can be accessed",
a("here.", href = "https://github.com/michaelzchen/finalproject.git"),
"You can reach me at: ",
a("chen_michael@college.harvard.edu", href = "chen_michael@college.harvard.edu"),
"or on",
a("LinkedIn.", href = "https://www.linkedin.com/in/michael-c-134086135/"))))
# Define server logic required to draw a histogram
server <- function(input, output) {
# Send a pre-rendered image, and don't delete the image after sending it
output$plot1 <- renderImage({
# When input$n is 3, filename is ./images/image3.jpeg
filename <- normalizePath(file.path('Reg_Adm_Rates_Inventors.png'))
# Return a list containing the filename and alt text
list(src = filename, width = 700, length = 800)
}, deleteFile = FALSE)
output$plot2 <- renderImage({
# When input$n is 3, filename is ./images/image3.jpeg
filename <- normalizePath(file.path('Reg_AvgFacSal_Inventors.png'))
# Return a list containing the filename and alt text
list(src = filename, width = 700, length = 800)
}, deleteFile = FALSE)
output$plot3 <- renderImage({
# When input$n is 3, filename is ./images/image3.jpeg
filename <- normalizePath(file.path('MapsOriginal.png'))
# Return a list containing the filename and alt text
list(src = filename, width = 700, length = 800)
}, deleteFile = FALSE)
output$plot4 <- renderImage({
# When input$n is 3, filename is ./images/image3.jpeg
filename <- normalizePath(file.path('Reg_SatAvgDiffSubj_Inventors.png'))
# Return a list containing the filename and alt text
list(src = filename, width = 700, length = 800)
}, deleteFile = FALSE)
}
# Run the application
shinyApp(ui = ui, server = server) | /v1/app.R | no_license | michaelzchen/InnovationRates-Colleges | R | false | false | 13,212 | r | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinythemes)
# Define UI for application that draws a histogram
ui <- navbarPage(theme = shinytheme("superhero"),
"Innovation Rates and College Characteristics",
tabPanel("Mapping Characteristics",
# Application title
titlePanel("Heat Map of Different College Characteristics"),
# Show a plot of the generated distribution
mainPanel(
plotOutput("plot3")
),
),
tabPanel("Linear Relationships",
fluidPage(
titlePanel("Correlations"),
sidebarLayout(
sidebarPanel(
selectInput(
"plot_type",
"College Characteristic",
c("Admissions Rate" = "a", "Average SAT Score" = "b",
"Size of Class" = "c", "Average Total Cost of Attendance" = "d",
"Average Faculty Salary" = "e")
)),
mainPanel(plotOutput("line_plot")))
)),
tabPanel("Regression and Prediction",
h1("Selectiveness and Inventiveness"),
fixedRow(
column(4,
p("The regression on the right shows that, quite intuitively,
the less selective a college is (marked by an increase in admissions
rate), the fewer share of inventors it has among its undergraduate population.
However, there could be many confounding factors that could influence
the results of this analysis.")),
column(5,
plotOutput("plot1", height = "100%"))
),
br(),
h1("Faculty Salary and Inventiveness"),
fixedRow(
column(4,
p("The regression on the right shows that quite clearly, as faculty salary
increases, the share of inventors increases as well - this is also quite intuitive.
Like before, however, we aren't sure if there are confounding factors that could
have affected this relationship.")),
column(5,
plotOutput("plot2", height = "100%"))
),
br(),
h1("Different SAT Subjects"),
fixedRow(
column(4,
p("This maps (in 3 colors), different SAT midpoint scores in reading, writing, and math.
The goal was to identify whether there was a difference in strength in subject area on
inventors. As is quite evident in the graph, there is too much noise for us to make any
conclusive judgments.")),
column(5,
plotOutput("plot4", height = "100%"))
)),
tabPanel("Discussion",
h2("Modeling"),
p("For my graphics, I chose to include several types: one showing trends between inventor
rates and various college characteristics (there is a dropdown menu in the Modeling panel
that allows someone to choose which characteristic they want to examine - example characteristics
for people to choose from include admission rates, average faculty salary, undergraduate
population size, average SAT score, and many more), one showing the regression of admissions
rates and inventor/patent rates, and one that shows datapoints of colleges around the US
that are covered in my dataset"),
h2("Map of Colleges"),
p("In these graphics, I display a map of the US, with different regions highlighted (filled) depending
on various characteristics - for example, total patents, inventor rates, etc. The goal is so
that prospective students can see various college characteristics by region. As expected, areas
near Silicon Valley and Boston are higher than the average in terms of total patents and
inventor rates, but the maps also depict several surprising results as well in terms of what
regions are hotspots for inventiveness."),
h2("Relationships between College Characteristics and Patent Rates"),
p("These graphics depict any linear relatinoships (if applicable) between different college characteristics
and patent rates, as calculated by Opportunity Insights. An individual is defined as an inventor if he or
she is listed on a patent application between 2001 and 2012 or grant between 1996 and 2014
(see Section II.B of the paper - “Mobility Report Cards: The Role of Colleges in Intergenerational Mobility.
The vast majority of characteristics seem to not really have any statistially significant correlations with
the share of inventors per college, although some characterisics do display rather counter-intuitive results."),
h2("Regression"),
p("Given the above, I now seek to actually test if there are causal relationships (using linear models and
logistic regressions) between specific college characteristics that I hypothesize could actually affect the
share of inventors that come out of a college. These results will give colleges insight into what
areas they should focus research on to improve the innovation and desire to build within their students."),
h2("Conclusion"),
p("Discussion of Final Results")),
tabPanel("About",
titlePanel("About This Project"),
h3("Project Background and Motivations"),
p("The goal of this project is to conduct a data-driven analysis on
the relationship between innovation rates by college (obtained from
Raj Chetty's Opportunity Insights) and institution level data obtained
from College Scorecard. With so much scrutiny going into whether college
is really worth it or not, the key factor that most researchers have
been examining is the earnings of students right out of college - this
is the most common barometer of success. However, having read Andrew Yang's
book 'Smart People Should Build Things', my main concern with the 'outcome'
of college is whether people are creating something of value - I define value
as higher innovation rates (which is measured through number of patents registered).
Throughout the project, I will run regressions on various data for innovation
rates (total number of patents granted to sstudents, share of inventors among
students, total number of patent citations obtained by students, etc.) and
various college characteristics (percentage of high income students, average
SAT/ACT score, percentage of degrees awarded in various fields)."),
h3("Dataset 1 Used"),
p("The first data source comes from Raj Chetty's Opportunity Insights data, which harnesses
the power of big data to create scalable policy solutions to restore the American Dream.
In the data source, the table presents estimates of students' patent rates by the college
they attended, where college is what place each child attends as the institution the child
attended for the greatest amount of time during the 4 calendar years in which the child
turned 19-22. The most important variable is called 'inventor' and represents the share of
inventors among students at each college. For more information about the data source and
other data on upward mobility, race and economic opportunity in the US, impact of neighborhoods,
and impacts of teacher, please visit this",
a("link.", href = "https://opportunityinsights.org/data/")),
h3("Dataset 2 Used"),
p("The other dataset I use is from the College Scorecard project - this is designed to increase
transparency and provide data to help students and families compare college costs and outcomes
as they weigh the tradeoffs of different colleges. The data I choose to focus on are data files
with data about institutions as a whole and include variables that map each college's name,
location, degree type, instituion revenue, academic areas offered, admissions rates,
SAT/ACT scores, and much more. The goal is to compare this dataset with innovation rates
measured at each college and see if there are correlations between characteristics of each
college and innovation rates. For more information about the College Scorecard project and
its data sources, please visit this",
a("link.", href = "https://collegescorecard.ed.gov/data/")),
h3("About Me"),
p("My name is Michael Chen and I'm a current sophomore at Harvard studying Applied Math and
Economics with a secondary in Government. As a co-founder of a biotech startup, I'm quite
intrigued by the innovation rates at colleges and how to increase this 'builder'
mentality among our students. My Github repo for this project can be accessed",
a("here.", href = "https://github.com/michaelzchen/finalproject.git"),
"You can reach me at: ",
a("chen_michael@college.harvard.edu", href = "chen_michael@college.harvard.edu"),
"or on",
a("LinkedIn.", href = "https://www.linkedin.com/in/michael-c-134086135/"))))
# Define server logic required to draw a histogram
server <- function(input, output) {
# Send a pre-rendered image, and don't delete the image after sending it
output$plot1 <- renderImage({
# When input$n is 3, filename is ./images/image3.jpeg
filename <- normalizePath(file.path('Reg_Adm_Rates_Inventors.png'))
# Return a list containing the filename and alt text
list(src = filename, width = 700, length = 800)
}, deleteFile = FALSE)
output$plot2 <- renderImage({
# When input$n is 3, filename is ./images/image3.jpeg
filename <- normalizePath(file.path('Reg_AvgFacSal_Inventors.png'))
# Return a list containing the filename and alt text
list(src = filename, width = 700, length = 800)
}, deleteFile = FALSE)
output$plot3 <- renderImage({
# When input$n is 3, filename is ./images/image3.jpeg
filename <- normalizePath(file.path('MapsOriginal.png'))
# Return a list containing the filename and alt text
list(src = filename, width = 700, length = 800)
}, deleteFile = FALSE)
output$plot4 <- renderImage({
# When input$n is 3, filename is ./images/image3.jpeg
filename <- normalizePath(file.path('Reg_SatAvgDiffSubj_Inventors.png'))
# Return a list containing the filename and alt text
list(src = filename, width = 700, length = 800)
}, deleteFile = FALSE)
}
# Run the application
shinyApp(ui = ui, server = server) |
## -------------------------------------------------------------------------- ##
## Penalized Multivariate Analysis
## http://cran.r-project.org/web/packages/PMA/index.html
## -------------------------------------------------------------------------- ##
| /models/Penalized Multivariate Analysis.R | no_license | data-science-competitions/Kaggle-Africa-Soil-Property-Prediction-Challenge | R | false | false | 257 | r | ## -------------------------------------------------------------------------- ##
## Penalized Multivariate Analysis
## http://cran.r-project.org/web/packages/PMA/index.html
## -------------------------------------------------------------------------- ##
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HigherOrderRiskPreferences.R
\name{compute_function}
\alias{compute_function}
\title{Computes a continuous and smooth utility function from the given utility points}
\usage{
compute_function(
x,
y,
ids = NULL,
mode = 1,
penalty_order = 4,
lambda_max = 10000,
current_lambda = 1,
ndx = 20,
deg = 6,
verbose = 0
)
}
\arguments{
\item{x}{a matrix or dataframe containing the certainty equivalents (x-values of utility points) for a given participant in each use case.}
\item{y}{can be a vector or a matrix representing the corresponding utility values (y-values of utility points).}
\item{ids}{a list containing the IDs of the participants. If not given, a list with IDs from 1 to n_observations will be created.}
\item{mode}{an integer between 0, 1, 2 representing the three possible modes: multiple imputation, optimal classification or 'weak' classification. Default is optimal classification (1).}
\item{penalty_order}{highest dimension (i.e., derivative) to penalize. Must be lower than deg.}
\item{lambda_max}{maximum lambda used for computing the optimal lambda. It is used only in multiple imputation (mode = 0) and optimal (mode = 1). The default value is 10000.}
\item{current_lambda}{lambda considered in the current iteration. Only used in multiple imputation (mode = 0) to create the combinations and as actual lambda value in 'weak' classification mode (mode = 2). The default value is 1.}
\item{ndx}{number of intervals to partition the distance between the lowest and highest x-values of the utility points.}
\item{deg}{degree of the B-spline basis. Determines the degree of the function to be estimated. If deg = 2, the estimated utility function will consist of quadratic functions.}
\item{verbose}{shows some information while the program is running.}
}
\value{
A smooth and continuous utility function.
}
\description{
Computes a continuous and smooth utility function from the given utility points
}
\examples{
\donttest{
x <- matrix(c(24.60938,34.76074,78.75,81.86035,128.5156,
7.109375,80.4248,113.75,115.083,135.0781,
3.828125,7.211914,8.75,124.1064,131.7969,
1.640625,2.084961,8.75,36.94824,98.98438), nrow = 4, ncol = 5, byrow = TRUE)
y <- c(0.25, 0.375, 0.5, 0.625, 0.75)
compute_function(x, y, verbose = 1)
}
}
| /man/compute_function.Rd | no_license | cran/utilityFunctionTools | R | false | true | 2,444 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HigherOrderRiskPreferences.R
\name{compute_function}
\alias{compute_function}
\title{Computes a continuous and smooth utility function from the given utility points}
\usage{
compute_function(
x,
y,
ids = NULL,
mode = 1,
penalty_order = 4,
lambda_max = 10000,
current_lambda = 1,
ndx = 20,
deg = 6,
verbose = 0
)
}
\arguments{
\item{x}{a matrix or dataframe containing the certainty equivalents (x-values of utility points) for a given participant in each use case.}
\item{y}{can be a vector or a matrix representing the corresponding utility values (y-values of utility points).}
\item{ids}{a list containing the IDs of the participants. If not given, a list with IDs from 1 to n_observations will be created.}
\item{mode}{an integer between 0, 1, 2 representing the three possible modes: multiple imputation, optimal classification or 'weak' classification. Default is optimal classification (1).}
\item{penalty_order}{highest dimension (i.e., derivative) to penalize. Must be lower than deg.}
\item{lambda_max}{maximum lambda used for computing the optimal lambda. It is used only in multiple imputation (mode = 0) and optimal (mode = 1). The default value is 10000.}
\item{current_lambda}{lambda considered in the current iteration. Only used in multiple imputation (mode = 0) to create the combinations and as actual lambda value in 'weak' classification mode (mode = 2). The default value is 1.}
\item{ndx}{number of intervals to partition the distance between the lowest and highest x-values of the utility points.}
\item{deg}{degree of the B-spline basis. Determines the degree of the function to be estimated. If deg = 2, the estimated utility function will consist of quadratic functions.}
\item{verbose}{shows some information while the program is running.}
}
\value{
A smooth and continuous utility function.
}
\description{
Computes a continuous and smooth utility function from the given utility points
}
\examples{
\donttest{
x <- matrix(c(24.60938,34.76074,78.75,81.86035,128.5156,
7.109375,80.4248,113.75,115.083,135.0781,
3.828125,7.211914,8.75,124.1064,131.7969,
1.640625,2.084961,8.75,36.94824,98.98438), nrow = 4, ncol = 5, byrow = TRUE)
y <- c(0.25, 0.375, 0.5, 0.625, 0.75)
compute_function(x, y, verbose = 1)
}
}
|
setwd("/Users/loey/Desktop/Research/FakeNews/LyingKids/Expt1_web/analysis/SONA")
library(tidyverse)
raw <- read_csv("raw.csv")
length(unique(raw$subjID))
demograph <- raw %>%
select(subjID, stillimages, comments) %>%
distinct()
write_csv(demograph, "demographic.csv")
cleaned <- raw %>%
select(-c(stillimages, comments))
trialsCompl <- cleaned %>%
group_by(subjID) %>%
summarise(n = n()) %>%
arrange(n)
write_csv(trialsCompl, "subjByTrialsCompl.csv")
cleaned <- cleaned %>%
filter(!subjID %in% filter(trialsCompl, n<22)$subjID)
allTrials <- cleaned
df <- cleaned %>%
filter(exptPart == "trial")
write_csv(df, "data.csv")
| /Expt1_web/analysis/SONA/cleanData.R | no_license | la-oey/LyingKids | R | false | false | 645 | r | setwd("/Users/loey/Desktop/Research/FakeNews/LyingKids/Expt1_web/analysis/SONA")
library(tidyverse)
raw <- read_csv("raw.csv")
length(unique(raw$subjID))
demograph <- raw %>%
select(subjID, stillimages, comments) %>%
distinct()
write_csv(demograph, "demographic.csv")
cleaned <- raw %>%
select(-c(stillimages, comments))
trialsCompl <- cleaned %>%
group_by(subjID) %>%
summarise(n = n()) %>%
arrange(n)
write_csv(trialsCompl, "subjByTrialsCompl.csv")
cleaned <- cleaned %>%
filter(!subjID %in% filter(trialsCompl, n<22)$subjID)
allTrials <- cleaned
df <- cleaned %>%
filter(exptPart == "trial")
write_csv(df, "data.csv")
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{LLcollapse_perm}
\alias{LLcollapse_perm}
\title{Permutation distribution of Li and Leal's collapsed variant statistic, v_c}
\usage{
LLcollapse_perm(ccdata, ccstatus, nperms, maf, maf_controls = FALSE)
}
\arguments{
\item{ccdata}{A matrix of markers (columns) and individuals (rows). Data are coded as the number of copies of the minor allele.}
\item{ccstatus}{A vector of binary phenotype labels. 0 = control, 1 = case.}
\item{nperms}{The number of permutations to perform}
\item{maf}{Only consider variants whose minor allele frequencies are <= maf}
\item{maf_controls}{If true, calculate mafs from controls only. Otherwise, use all individuals}
}
\value{
The non-centrality parameter of a chi-squared distribution. This is obtained using the proportion of controls and cases with rare variants.
}
\description{
Permutation distribution of Li and Leal's collapsed variant statistic, v_c
}
\examples{
data(rec.ccdata)
status = c(rep(0,rec.ccdata$ncontrols),rep(1,rec.ccdata$ncases))
LL.perm = LLcollapse_perm(rec.ccdata$genos,status,10,0.01)
}
\references{
Li, B., & Leal, S. (2008). Methods for detecting associations with rare variants for common diseases: application to analysis of sequence data. The American Journal of Human Genetics, 83(3), 311-321.
}
| /man/LLcollapse_perm.Rd | permissive | jsanjak/buRden | R | false | false | 1,374 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{LLcollapse_perm}
\alias{LLcollapse_perm}
\title{Permutation distribution of Li and Leal's collapsed variant statistic, v_c}
\usage{
LLcollapse_perm(ccdata, ccstatus, nperms, maf, maf_controls = FALSE)
}
\arguments{
\item{ccdata}{A matrix of markers (columns) and individuals (rows). Data are coded as the number of copies of the minor allele.}
\item{ccstatus}{A vector of binary phenotype labels. 0 = control, 1 = case.}
\item{nperms}{The number of permutations to perform}
\item{maf}{Only consider variants whose minor allele frequencies are <= maf}
\item{maf_controls}{If true, calculate mafs from controls only. Otherwise, use all individuals}
}
\value{
The non-centrality parameter of a chi-squared distribution. This is obtained using the proportion of controls and cases with rare variants.
}
\description{
Permutation distribution of Li and Leal's collapsed variant statistic, v_c
}
\examples{
data(rec.ccdata)
status = c(rep(0,rec.ccdata$ncontrols),rep(1,rec.ccdata$ncases))
LL.perm = LLcollapse_perm(rec.ccdata$genos,status,10,0.01)
}
\references{
Li, B., & Leal, S. (2008). Methods for detecting associations with rare variants for common diseases: application to analysis of sequence data. The American Journal of Human Genetics, 83(3), 311-321.
}
|
library(org.Rn.eg.db)
library(org.Hs.eg.db)
library(xlsxjars)
library(rJava)
library(xlsx)
library(DOSE)
library(clusterProfiler)
library(psych)
library("readr")
library(caret)
library(randomForest)
library(e1071)
library(gbm)
library(mlbench)
library(caretEnsemble)
library(topGO)
library(drc)
library(parallel)
source('./function.R')
data1<-read.csv("./toxygates-2019-4-9-1477.csv",header = TRUE)
dose<-read.csv("./open_tggates_cel_file_attribute.csv",header = TRUE)
rat_invitro_1<-dose_select(data1)
rat_invitro_Low_1<-doseSplit(rat_invitro_1,'L')
rat_invitro_Middle_1<-doseSplit(rat_invitro_1,'M')
rat_invitro_High_1<-doseSplit(rat_invitro_1,'H')
rat_invitro_Low_2<-omitInf(rat_invitro_Low_1)
rat_invitro_Middle_2<-omitInf(rat_invitro_Middle_1)
rat_invitro_High_2<-omitInf(rat_invitro_High_1)
rat_invitro_Low_3<-tData(rat_invitro_Low_2)
rat_invitro_Middle_3<-tData(rat_invitro_Middle_2)
rat_invitro_High_3<-tData(rat_invitro_High_2)
drugs1=colnames(rat_invitro_Low_2)
dose_low=doseMatch(dose,drugs1,'Low','Rat','in vitro','Single','Liver')
dose_Middle=doseMatch(dose,drugs1,'Middle','Rat','in vitro','Single','Liver')
dose_High=doseMatch(dose,drugs1,'High','Rat','in vitro','Single','Liver')
rat_invitro_Low_4<-cbind(rat_invitro_Low_4,dose_low)
rat_invitro_Low_4<-as.data.frame(rat_invitro_Low_4)
rat_invitro_Middle_4<-cbind(rat_invitro_Middle_3,dose_Middle)
rat_invitro_Middle_4<-as.data.frame(rat_invitro_Middle_4)
rat_invitro_High_4<-cbind(rat_invitro_High_3,dose_High)
rat_invitro_High_4<-as.data.frame(rat_invitro_High_4)
totalGene<-geneCo(rat_invitro_Low_4,rat_invitro_Middle_4,rat_invitro_High_4)
totalGene1<-toList(totalGene)
Rmax<-paramExtr(totalGene,'Rinf')
data_rmax_1<-nameOmit(Rmax)
#data_rmax_2<-geneMerge(data_rmax_1)
data_rmax_3<-geneMean2(data_rmax_1)
data_rmax_4=data_rmax_3[,2:length(data_rmax_3[1,])]
difGene_TG=findDiffGene(data_rmax_4)
tgCount=egoSel(data_rmax_4,difGene_TG,colnames(data_rmax_4))
batchFS(data_rmax_4,tgCount,0,20,"./tg_invitro/mean/",'mean')
batchFS(data_rmax_4,tg_count,0,20,"/tg_invitro/sum/",'sum')
batchFS(data_rmax_4,tg_count,0,20,"./tg_invitro/sd/",'sd')
batchFS(data_rmax_4,tg_count,0,20,"./tg_invitro/max/",'max')
selData=countSel(data_rmax_4,tg_count,8)
#selData_TG=countSel(data_rmax_4,tgCount,5)
#selData_DM=mergeDM[,colnames(selData_TG)]
selData=cbind(selData[,1],selData)
pic_score<-picGener_score(selData,'ego')
pic<-picGener(selData,pic_score)
min_matrix<-getMost(pic,'min')
max_matrix<-getMost(pic,'max')
max_matrix=max_matrix+0.00001
pic_normal<-lapply(pic,MatrixNormal,max_matrix,min_matrix)
listWrite(pic_normal,'./tg_rat_inviro/pic/')
| /deal_with_data_tg_invitro.R | no_license | wuhuichen/Hepatotoxicity | R | false | false | 2,691 | r | library(org.Rn.eg.db)
library(org.Hs.eg.db)
library(xlsxjars)
library(rJava)
library(xlsx)
library(DOSE)
library(clusterProfiler)
library(psych)
library("readr")
library(caret)
library(randomForest)
library(e1071)
library(gbm)
library(mlbench)
library(caretEnsemble)
library(topGO)
library(drc)
library(parallel)
source('./function.R')
data1<-read.csv("./toxygates-2019-4-9-1477.csv",header = TRUE)
dose<-read.csv("./open_tggates_cel_file_attribute.csv",header = TRUE)
rat_invitro_1<-dose_select(data1)
rat_invitro_Low_1<-doseSplit(rat_invitro_1,'L')
rat_invitro_Middle_1<-doseSplit(rat_invitro_1,'M')
rat_invitro_High_1<-doseSplit(rat_invitro_1,'H')
rat_invitro_Low_2<-omitInf(rat_invitro_Low_1)
rat_invitro_Middle_2<-omitInf(rat_invitro_Middle_1)
rat_invitro_High_2<-omitInf(rat_invitro_High_1)
rat_invitro_Low_3<-tData(rat_invitro_Low_2)
rat_invitro_Middle_3<-tData(rat_invitro_Middle_2)
rat_invitro_High_3<-tData(rat_invitro_High_2)
drugs1=colnames(rat_invitro_Low_2)
dose_low=doseMatch(dose,drugs1,'Low','Rat','in vitro','Single','Liver')
dose_Middle=doseMatch(dose,drugs1,'Middle','Rat','in vitro','Single','Liver')
dose_High=doseMatch(dose,drugs1,'High','Rat','in vitro','Single','Liver')
rat_invitro_Low_4<-cbind(rat_invitro_Low_4,dose_low)
rat_invitro_Low_4<-as.data.frame(rat_invitro_Low_4)
rat_invitro_Middle_4<-cbind(rat_invitro_Middle_3,dose_Middle)
rat_invitro_Middle_4<-as.data.frame(rat_invitro_Middle_4)
rat_invitro_High_4<-cbind(rat_invitro_High_3,dose_High)
rat_invitro_High_4<-as.data.frame(rat_invitro_High_4)
totalGene<-geneCo(rat_invitro_Low_4,rat_invitro_Middle_4,rat_invitro_High_4)
totalGene1<-toList(totalGene)
Rmax<-paramExtr(totalGene,'Rinf')
data_rmax_1<-nameOmit(Rmax)
#data_rmax_2<-geneMerge(data_rmax_1)
data_rmax_3<-geneMean2(data_rmax_1)
data_rmax_4=data_rmax_3[,2:length(data_rmax_3[1,])]
difGene_TG=findDiffGene(data_rmax_4)
tgCount=egoSel(data_rmax_4,difGene_TG,colnames(data_rmax_4))
batchFS(data_rmax_4,tgCount,0,20,"./tg_invitro/mean/",'mean')
batchFS(data_rmax_4,tg_count,0,20,"/tg_invitro/sum/",'sum')
batchFS(data_rmax_4,tg_count,0,20,"./tg_invitro/sd/",'sd')
batchFS(data_rmax_4,tg_count,0,20,"./tg_invitro/max/",'max')
selData=countSel(data_rmax_4,tg_count,8)
#selData_TG=countSel(data_rmax_4,tgCount,5)
#selData_DM=mergeDM[,colnames(selData_TG)]
selData=cbind(selData[,1],selData)
pic_score<-picGener_score(selData,'ego')
pic<-picGener(selData,pic_score)
min_matrix<-getMost(pic,'min')
max_matrix<-getMost(pic,'max')
max_matrix=max_matrix+0.00001
pic_normal<-lapply(pic,MatrixNormal,max_matrix,min_matrix)
listWrite(pic_normal,'./tg_rat_inviro/pic/')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/negative-hypergeometric-distribution.R
\name{NegHyper}
\alias{NegHyper}
\alias{dnhyper}
\alias{pnhyper}
\alias{qnhyper}
\alias{rnhyper}
\title{Negative hypergeometric distribution}
\usage{
dnhyper(x, n, m, r, log = FALSE)
pnhyper(q, n, m, r, lower.tail = TRUE, log.p = FALSE)
qnhyper(p, n, m, r, lower.tail = TRUE, log.p = FALSE)
rnhyper(nn, n, m, r)
}
\arguments{
\item{x, q}{vector of quantiles representing the number of white balls drawn without
replacement from an urn which contains both black and white balls.}
\item{n}{the number of black balls in the urn.}
\item{m}{the number of white balls in the urn.}
\item{r}{the number of white balls that needs to be drawn for the sampling
to be stopped.}
\item{log, log.p}{logical; if TRUE, probabilities p are given as log(p).}
\item{lower.tail}{logical; if TRUE (default), probabilities are \eqn{P[X \le x]}
otherwise, \eqn{P[X > x]}.}
\item{p}{vector of probabilities.}
\item{nn}{number of observations. If \code{length(n) > 1},
the length is taken to be the number required.}
}
\description{
Probability mass function, distribution function, quantile function and random generation
for the negative hypergeometric distribution.
}
\details{
Negative hypergeometric distribution describes number of balls \eqn{x} observed
until drawing without replacement to obtain \eqn{r} white balls
from the urn containing \eqn{m} white balls and \eqn{n} black balls,
and is defined as
\deqn{
f(x) = \frac{{x-1 \choose r-1}{m+n-x \choose m-r}}{{m+n \choose n}}
}{
f(x) = choose(x-1, r-1)*choose(m+n-x, m-r)/choose(m+n, n)
}
The algorithm used for calculating probability mass function,
cumulative distribution function and quantile function is based
on Fortran program NHYPERG created by Berry and Mielke (1996, 1998).
Random generation is done by inverse transform sampling.
}
\examples{
x <- rnhyper(1e5, 60, 35, 15)
xx <- 15:95
plot(prop.table(table(x)))
lines(xx, dnhyper(xx, 60, 35, 15), col = "red")
hist(pnhyper(x, 60, 35, 15))
xx <- seq(0, 100, by = 0.01)
plot(ecdf(x))
lines(xx, pnhyper(xx, 60, 35, 15), col = "red", lwd = 2)
}
\references{
Berry, K. J., & Mielke, P. W. (1998).
The negative hypergeometric probability distribution:
Sampling without replacement from a finite population.
Perceptual and motor skills, 86(1), 207-210.
\url{http://pms.sagepub.com/content/86/1/207.full.pdf}
Berry, K. J., & Mielke, P. W. (1996).
Exact confidence limits for population proportions based on the negative
hypergeometric probability distribution.
Perceptual and motor skills, 83(3 suppl), 1216-1218.
\url{http://pms.sagepub.com/content/83/3_suppl/1216.full.pdf}
Schuster, E. F., & Sype, W. R. (1987).
On the negative hypergeometric distribution.
International Journal of Mathematical Education in Science and Technology, 18(3), 453-459.
Chae, K. C. (1993).
Presenting the negative hypergeometric distribution to the introductory statistics courses.
International Journal of Mathematical Education in Science and Technology, 24(4), 523-526.
Jones, S.N. (2013). A Gaming Application of the Negative Hypergeometric Distribution.
UNLV Theses, Dissertations, Professional Papers, and Capstones. Paper 1846.
\url{http://digitalscholarship.unlv.edu/cgi/viewcontent.cgi?article=2847&context=thesesdissertations}
}
\seealso{
\code{\link[stats]{Hypergeometric}}
}
\concept{
Univariate
Discrete
}
\keyword{distribution}
| /man/NegHyper.Rd | no_license | alexfun/extraDistr | R | false | true | 3,448 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/negative-hypergeometric-distribution.R
\name{NegHyper}
\alias{NegHyper}
\alias{dnhyper}
\alias{pnhyper}
\alias{qnhyper}
\alias{rnhyper}
\title{Negative hypergeometric distribution}
\usage{
dnhyper(x, n, m, r, log = FALSE)
pnhyper(q, n, m, r, lower.tail = TRUE, log.p = FALSE)
qnhyper(p, n, m, r, lower.tail = TRUE, log.p = FALSE)
rnhyper(nn, n, m, r)
}
\arguments{
\item{x, q}{vector of quantiles representing the number of white balls drawn without
replacement from an urn which contains both black and white balls.}
\item{n}{the number of black balls in the urn.}
\item{m}{the number of white balls in the urn.}
\item{r}{the number of white balls that needs to be drawn for the sampling
to be stopped.}
\item{log, log.p}{logical; if TRUE, probabilities p are given as log(p).}
\item{lower.tail}{logical; if TRUE (default), probabilities are \eqn{P[X \le x]}
otherwise, \eqn{P[X > x]}.}
\item{p}{vector of probabilities.}
\item{nn}{number of observations. If \code{length(n) > 1},
the length is taken to be the number required.}
}
\description{
Probability mass function, distribution function, quantile function and random generation
for the negative hypergeometric distribution.
}
\details{
Negative hypergeometric distribution describes number of balls \eqn{x} observed
until drawing without replacement to obtain \eqn{r} white balls
from the urn containing \eqn{m} white balls and \eqn{n} black balls,
and is defined as
\deqn{
f(x) = \frac{{x-1 \choose r-1}{m+n-x \choose m-r}}{{m+n \choose n}}
}{
f(x) = choose(x-1, r-1)*choose(m+n-x, m-r)/choose(m+n, n)
}
The algorithm used for calculating probability mass function,
cumulative distribution function and quantile function is based
on Fortran program NHYPERG created by Berry and Mielke (1996, 1998).
Random generation is done by inverse transform sampling.
}
\examples{
x <- rnhyper(1e5, 60, 35, 15)
xx <- 15:95
plot(prop.table(table(x)))
lines(xx, dnhyper(xx, 60, 35, 15), col = "red")
hist(pnhyper(x, 60, 35, 15))
xx <- seq(0, 100, by = 0.01)
plot(ecdf(x))
lines(xx, pnhyper(xx, 60, 35, 15), col = "red", lwd = 2)
}
\references{
Berry, K. J., & Mielke, P. W. (1998).
The negative hypergeometric probability distribution:
Sampling without replacement from a finite population.
Perceptual and motor skills, 86(1), 207-210.
\url{http://pms.sagepub.com/content/86/1/207.full.pdf}
Berry, K. J., & Mielke, P. W. (1996).
Exact confidence limits for population proportions based on the negative
hypergeometric probability distribution.
Perceptual and motor skills, 83(3 suppl), 1216-1218.
\url{http://pms.sagepub.com/content/83/3_suppl/1216.full.pdf}
Schuster, E. F., & Sype, W. R. (1987).
On the negative hypergeometric distribution.
International Journal of Mathematical Education in Science and Technology, 18(3), 453-459.
Chae, K. C. (1993).
Presenting the negative hypergeometric distribution to the introductory statistics courses.
International Journal of Mathematical Education in Science and Technology, 24(4), 523-526.
Jones, S.N. (2013). A Gaming Application of the Negative Hypergeometric Distribution.
UNLV Theses, Dissertations, Professional Papers, and Capstones. Paper 1846.
\url{http://digitalscholarship.unlv.edu/cgi/viewcontent.cgi?article=2847&context=thesesdissertations}
}
\seealso{
\code{\link[stats]{Hypergeometric}}
}
\concept{
Univariate
Discrete
}
\keyword{distribution}
|
## check top of console and here that wd has changed
getwd()
## notice where file browser is now
a <- 2
b <- 3
siqSq <- 0.5 ## this typo was good actually
x <- runif(40)
y <- a + b * x + rnorm(40, sd = sqrt(sigSq))
sigSq <- 0.5
y <- a + b * x + rnorm(40, sd = sqrt(sigSq))
x
y
(avgX <- mean(x))
write(avgX, "avgX.txt")
plot(x, y)
plot(x, y)
abline(a, b, col = "orange")
dev.print(pdf, "niftyPlot.pdf")
## notice that avgX.txt and niftyPlot.pdf have appeared in swc
## this is a good start to analysis ... moving towards saving the script
## visit History pane, select keeper commands, click to source, tidy if needed
## save it as toy.R, notice default location is swc and see it appear in swc
## quit!
ls()
## notice how much gets restored: workspace, files open for editing, history, etc.
## improve code by introducing n, change a or b or line color, etc.
## experiment with ways to re-run
## command enter to walk through
## mouse-y Run to walk through
## click on Source and the items in it mini-menu
## explore the w/ and w/o echo stuff
## visit PDF in external viewer to verify that it's changing
## one small step in your workflow, giant step towards reproducibility
## search for "niftyPlot"
## wean yourself from the mouse for things like loading data and writing anything to file
## end of this block
| /lessons/misc-r/code/block01_postProject.R | permissive | karthik/bc | R | false | false | 1,312 | r | ## check top of console and here that wd has changed
getwd()
## notice where file browser is now
a <- 2
b <- 3
siqSq <- 0.5 ## this typo was good actually
x <- runif(40)
y <- a + b * x + rnorm(40, sd = sqrt(sigSq))
sigSq <- 0.5
y <- a + b * x + rnorm(40, sd = sqrt(sigSq))
x
y
(avgX <- mean(x))
write(avgX, "avgX.txt")
plot(x, y)
plot(x, y)
abline(a, b, col = "orange")
dev.print(pdf, "niftyPlot.pdf")
## notice that avgX.txt and niftyPlot.pdf have appeared in swc
## this is a good start to analysis ... moving towards saving the script
## visit History pane, select keeper commands, click to source, tidy if needed
## save it as toy.R, notice default location is swc and see it appear in swc
## quit!
ls()
## notice how much gets restored: workspace, files open for editing, history, etc.
## improve code by introducing n, change a or b or line color, etc.
## experiment with ways to re-run
## command enter to walk through
## mouse-y Run to walk through
## click on Source and the items in it mini-menu
## explore the w/ and w/o echo stuff
## visit PDF in external viewer to verify that it's changing
## one small step in your workflow, giant step towards reproducibility
## search for "niftyPlot"
## wean yourself from the mouse for things like loading data and writing anything to file
## end of this block
|
# Human readable details of formatting done
readableOutputUI <- function(id) {
ns <- NS(id)
tabPanel("Readable formatting record")
} | /Shiny Modules/readableOutput.R | no_license | zannah-rain/Feature-EngineeR | R | false | false | 140 | r | # Human readable details of formatting done
readableOutputUI <- function(id) {
ns <- NS(id)
tabPanel("Readable formatting record")
} |
#' Assemble a data frame of incident hospitalizations due to
#' COVID-19 or influenza as they were available as of a specified issue date.
#'
#' @param issue_date character issue date (i.e. report date) to use for
#' constructing truths in format 'yyyy-mm-dd'
#' @param location_code character vector of location codes. Default to NULL
#' This should be a list of state FIPS code and/or 'US'.
#' @param spatial_resolution character vector specifying spatial unit types to
#' include: state' and/or 'national'
#' This parameter will be ignored if location_code is provided.
#' @param temporal_resolution character vector specifying temporal resolution
#' to include: 'daily' or 'weekly'
#' @param measure character vector specifying measure of disease prevalence:
#' either 'hospitalizations' for COVID hospitalizations or
#' 'flu hospitalizations' for hospitalizations with influenza
#' @param replace_negatives boolean to replace negative incs with imputed data
#' Currently only FALSE is supported
#' @param adjustment_cases character vector specifying times and locations with
#' reporting anomalies to adjust. Only the value "none" is currently supported
#' @param adjustment_method string specifying how anomalies are adjusted.
#' Only the value "none" is currently supported.
#' @param geography character, which data to read. Only "US" is supported.
#' @param drop_last_date boolean indicating whether to drop the last 1 day of
#' data for the influenza and COVID hospitalization signals. The last day of
#' data from the HHS data source is unreliable, so it is recommended to set this
#' to `TRUE`. However, the default is `FALSE` so that the function maintains
#' fidelity to the authoritative data source.
#'
#' @return data frame with columns location (fips code), date, inc, and cum
#' all values of cum will currently be NA
#'
#' @export
load_healthdata_data <- function(
issue_date = NULL,
as_of = NULL,
location_code = NULL,
spatial_resolution = "state",
temporal_resolution = "weekly",
measure = c("hospitalizations", "flu hospitalizations"),
replace_negatives = FALSE,
adjustment_cases = "none",
adjustment_method = "none",
geography = "US",
drop_last_date = FALSE) {
# validate measure and pull in correct data set
measure <- match.arg(measure,
choices = c("hospitalizations", "flu hospitalizations"))
# retrieve data update history
healthdata_timeseries_history <- healthdata_timeseries_history()
healthdata_dailyrevision_history <- healthdata_dailyrevision_history()
all_avail_issue_date <- unique(c(healthdata_timeseries_history$issue_date,
healthdata_dailyrevision_history$issue_date))
# a vector of date objects
all_avail_issue_date <- unique(c(all_avail_issue_date,
covidData::healthdata_hosp_early_data$issue_date))
# validate issue_date and as_of
# and convert as_of and issue_date to date objects
if (!is.null(issue_date) && !is.null(as_of)) {
stop("Cannot provide both arguments issue_date and as_of to load_healthcare_data.")
} else if (is.null(issue_date) && is.null(as_of)) {
issue_date <- max(all_avail_issue_date)
} else if (!is.null(as_of)) {
avail_issues <- all_avail_issue_date[
all_avail_issue_date <= as.Date(as_of)
]
if (length(avail_issues) == 0) {
stop("Provided as_of date is earlier than all available issue dates.")
} else {
issue_date <- max(avail_issues)
}
} else {
issue_date <- lubridate::ymd(issue_date)
}
if (!(issue_date %in% all_avail_issue_date)) {
stop(paste0(
'Invalid issue date; must be one of: ',
paste0(all_avail_issue_date, collapse = ', ')
))
}
# validate spatial_resolution
spatial_resolution <- match.arg(
spatial_resolution,
choices = c("state", "national"),
several.ok = TRUE
)
# validate temporal_resolution
temporal_resolution <- match.arg(
temporal_resolution,
choices = c("daily", "weekly"),
several.ok = FALSE
)
# validate replace_negatives
if (replace_negatives) {
stop("Currently, only replace_negatives = FALSE is supported")
}
# validate adjustment_cases and adjustment_method
adjustment_cases <- match.arg(
adjustment_cases,
choices = "none",
several.ok = FALSE
)
adjustment_method <- match.arg(
adjustment_method,
choices = "none",
several.ok = FALSE
)
#download and preprocess data based on issue_date
raw_healthdata_data <- build_healthdata_data(
issue_date,
healthdata_timeseries_history,
healthdata_dailyrevision_history)
if (issue_date > as.Date("2021-03-12")) {
raw_healthdata_data <- preprocess_healthdata_data(
raw_healthdata_data,
covidData::fips_codes,
measure = measure)
} else if (measure == "flu hospitalizations") {
stop("Flu hospitalizations not available for specified issue date.")
}
# data with as_of/issue_date before/on "2021-03-12"
# is already in the correct format
healthdata_data <- raw_healthdata_data %>%
dplyr::pull(data) %>%
`[[`(1)
all_locations <- unique(healthdata_data$location)
if (!is.null(location_code)){
locations_to_keep <- match.arg(
location_code,
choices = all_locations,
several.ok = TRUE)
# ignore spatial_resolution
spatial_resolution <- NULL
} else {
# drop results for irrelevant locations
locations_to_keep <- NULL
if ("state" %in% spatial_resolution) {
locations_to_keep <- all_locations[all_locations != "US"]
}
if ("national" %in% spatial_resolution) {
locations_to_keep <- c(locations_to_keep, "US")
}
}
results <- healthdata_data %>%
dplyr::select(location, date, inc) %>%
dplyr::filter(location %in% locations_to_keep)
# If requested, drop the last day of data within each location
if (drop_last_date) {
results <- results %>%
dplyr::group_by(location) %>%
dplyr::filter(date < max(date)) %>%
dplyr::ungroup()
}
# aggregate daily incidence to weekly incidence
if (temporal_resolution == "weekly") {
results <- results %>%
dplyr::mutate(
sat_date = lubridate::ceiling_date(
lubridate::ymd(date), unit = "week") - 1
) %>%
dplyr::group_by(location) %>%
# if the last week is not complete, drop all observations from the
# previous Saturday in that week
dplyr::filter(
if (max(date) < max(sat_date)) date <= max(sat_date) - 7 else TRUE
) %>%
dplyr::ungroup() %>%
dplyr::select(-date) %>%
dplyr::rename(date = sat_date) %>%
dplyr::group_by(location, date) %>%
dplyr::summarize(inc = sum(inc, na.rm = FALSE), .groups = "drop")
}
# aggregate inc to get the correct cum
results <- results %>%
dplyr::mutate(
date = lubridate::ymd(date),
cum = results %>%
dplyr::group_by(location) %>%
dplyr::mutate(cum = cumsum(inc)) %>%
dplyr::ungroup() %>%
dplyr::pull(cum)
)
return(results)
}
#' Preprocess healthdata data set, calculating incidence, adjusting date, and
#' calculating national incidence.
#'
#' @param raw_healthdata_data tibble one row and columns issue_date and data
#' The data column should be a list of data frames, with column names
#' date, state, previous_day_admission_adult_covid_confirmed, and
#' previous_day_admission_pediatric_covid_confirmed
#' @param fips_codes covidData::fips_codes data object
#' @param measure the measure to retrieve, either "hospitalizations" or
#' "flu hospitalizations"
#'
#' @return a result of similar format to raw_healthdata_data, but columns
#' date, location, and inc
preprocess_healthdata_data <- function(raw_healthdata_data, fips_codes,
measure) {
result <- raw_healthdata_data
# calculate incidence column, change date to previous day, and
# rename state to abbreviation
if (measure == "hospitalizations") {
result$data[[1]] <- result$data[[1]] %>%
dplyr::transmute(
abbreviation = state,
date = as.Date(date) - 1,
inc = previous_day_admission_adult_covid_confirmed +
previous_day_admission_pediatric_covid_confirmed
)
} else if (measure == "flu hospitalizations") {
if (!("previous_day_admission_influenza_confirmed" %in% colnames(result$data[[1]]))) {
stop("Flu hospitalizations not available for specified issue date.")
}
result$data[[1]] <- result$data[[1]] %>%
dplyr::transmute(
abbreviation = state,
date = as.Date(date) - 1,
inc = previous_day_admission_influenza_confirmed
)
} else {
stop("Invalid measure: must be either 'hospitalizations' or ",
"'flu hospitalizations'.")
}
# add US location by summing across all others
result$data[[1]] <- dplyr::bind_rows(
result$data[[1]],
result$data[[1]] %>%
dplyr::group_by(date) %>%
dplyr::summarise(inc = sum(inc), .groups = "drop") %>%
dplyr::mutate(abbreviation = "US")
)
# add location column, remove abbreviation
result$data[[1]] <- result$data[[1]] %>%
dplyr::left_join(
fips_codes %>% dplyr::select(location, abbreviation),
by = "abbreviation"
) %>%
dplyr::select(-abbreviation)
return(result)
}
| /R/healthdata_data.R | no_license | reichlab/covidData | R | false | false | 9,387 | r | #' Assemble a data frame of incident hospitalizations due to
#' COVID-19 or influenza as they were available as of a specified issue date.
#'
#' @param issue_date character issue date (i.e. report date) to use for
#' constructing truths in format 'yyyy-mm-dd'
#' @param location_code character vector of location codes. Default to NULL
#' This should be a list of state FIPS code and/or 'US'.
#' @param spatial_resolution character vector specifying spatial unit types to
#' include: state' and/or 'national'
#' This parameter will be ignored if location_code is provided.
#' @param temporal_resolution character vector specifying temporal resolution
#' to include: 'daily' or 'weekly'
#' @param measure character vector specifying measure of disease prevalence:
#' either 'hospitalizations' for COVID hospitalizations or
#' 'flu hospitalizations' for hospitalizations with influenza
#' @param replace_negatives boolean to replace negative incs with imputed data
#' Currently only FALSE is supported
#' @param adjustment_cases character vector specifying times and locations with
#' reporting anomalies to adjust. Only the value "none" is currently supported
#' @param adjustment_method string specifying how anomalies are adjusted.
#' Only the value "none" is currently supported.
#' @param geography character, which data to read. Only "US" is supported.
#' @param drop_last_date boolean indicating whether to drop the last 1 day of
#' data for the influenza and COVID hospitalization signals. The last day of
#' data from the HHS data source is unreliable, so it is recommended to set this
#' to `TRUE`. However, the default is `FALSE` so that the function maintains
#' fidelity to the authoritative data source.
#'
#' @return data frame with columns location (fips code), date, inc, and cum
#' all values of cum will currently be NA
#'
#' @export
load_healthdata_data <- function(
issue_date = NULL,
as_of = NULL,
location_code = NULL,
spatial_resolution = "state",
temporal_resolution = "weekly",
measure = c("hospitalizations", "flu hospitalizations"),
replace_negatives = FALSE,
adjustment_cases = "none",
adjustment_method = "none",
geography = "US",
drop_last_date = FALSE) {
# validate measure and pull in correct data set
measure <- match.arg(measure,
choices = c("hospitalizations", "flu hospitalizations"))
# retrieve data update history
healthdata_timeseries_history <- healthdata_timeseries_history()
healthdata_dailyrevision_history <- healthdata_dailyrevision_history()
all_avail_issue_date <- unique(c(healthdata_timeseries_history$issue_date,
healthdata_dailyrevision_history$issue_date))
# a vector of date objects
all_avail_issue_date <- unique(c(all_avail_issue_date,
covidData::healthdata_hosp_early_data$issue_date))
# validate issue_date and as_of
# and convert as_of and issue_date to date objects
if (!is.null(issue_date) && !is.null(as_of)) {
stop("Cannot provide both arguments issue_date and as_of to load_healthcare_data.")
} else if (is.null(issue_date) && is.null(as_of)) {
issue_date <- max(all_avail_issue_date)
} else if (!is.null(as_of)) {
avail_issues <- all_avail_issue_date[
all_avail_issue_date <= as.Date(as_of)
]
if (length(avail_issues) == 0) {
stop("Provided as_of date is earlier than all available issue dates.")
} else {
issue_date <- max(avail_issues)
}
} else {
issue_date <- lubridate::ymd(issue_date)
}
if (!(issue_date %in% all_avail_issue_date)) {
stop(paste0(
'Invalid issue date; must be one of: ',
paste0(all_avail_issue_date, collapse = ', ')
))
}
# validate spatial_resolution
spatial_resolution <- match.arg(
spatial_resolution,
choices = c("state", "national"),
several.ok = TRUE
)
# validate temporal_resolution
temporal_resolution <- match.arg(
temporal_resolution,
choices = c("daily", "weekly"),
several.ok = FALSE
)
# validate replace_negatives
if (replace_negatives) {
stop("Currently, only replace_negatives = FALSE is supported")
}
# validate adjustment_cases and adjustment_method
adjustment_cases <- match.arg(
adjustment_cases,
choices = "none",
several.ok = FALSE
)
adjustment_method <- match.arg(
adjustment_method,
choices = "none",
several.ok = FALSE
)
#download and preprocess data based on issue_date
raw_healthdata_data <- build_healthdata_data(
issue_date,
healthdata_timeseries_history,
healthdata_dailyrevision_history)
if (issue_date > as.Date("2021-03-12")) {
raw_healthdata_data <- preprocess_healthdata_data(
raw_healthdata_data,
covidData::fips_codes,
measure = measure)
} else if (measure == "flu hospitalizations") {
stop("Flu hospitalizations not available for specified issue date.")
}
# data with as_of/issue_date before/on "2021-03-12"
# is already in the correct format
healthdata_data <- raw_healthdata_data %>%
dplyr::pull(data) %>%
`[[`(1)
all_locations <- unique(healthdata_data$location)
if (!is.null(location_code)){
locations_to_keep <- match.arg(
location_code,
choices = all_locations,
several.ok = TRUE)
# ignore spatial_resolution
spatial_resolution <- NULL
} else {
# drop results for irrelevant locations
locations_to_keep <- NULL
if ("state" %in% spatial_resolution) {
locations_to_keep <- all_locations[all_locations != "US"]
}
if ("national" %in% spatial_resolution) {
locations_to_keep <- c(locations_to_keep, "US")
}
}
results <- healthdata_data %>%
dplyr::select(location, date, inc) %>%
dplyr::filter(location %in% locations_to_keep)
# If requested, drop the last day of data within each location
if (drop_last_date) {
results <- results %>%
dplyr::group_by(location) %>%
dplyr::filter(date < max(date)) %>%
dplyr::ungroup()
}
# aggregate daily incidence to weekly incidence
if (temporal_resolution == "weekly") {
results <- results %>%
dplyr::mutate(
sat_date = lubridate::ceiling_date(
lubridate::ymd(date), unit = "week") - 1
) %>%
dplyr::group_by(location) %>%
# if the last week is not complete, drop all observations from the
# previous Saturday in that week
dplyr::filter(
if (max(date) < max(sat_date)) date <= max(sat_date) - 7 else TRUE
) %>%
dplyr::ungroup() %>%
dplyr::select(-date) %>%
dplyr::rename(date = sat_date) %>%
dplyr::group_by(location, date) %>%
dplyr::summarize(inc = sum(inc, na.rm = FALSE), .groups = "drop")
}
# aggregate inc to get the correct cum
results <- results %>%
dplyr::mutate(
date = lubridate::ymd(date),
cum = results %>%
dplyr::group_by(location) %>%
dplyr::mutate(cum = cumsum(inc)) %>%
dplyr::ungroup() %>%
dplyr::pull(cum)
)
return(results)
}
#' Preprocess healthdata data set, calculating incidence, adjusting date, and
#' calculating national incidence.
#'
#' @param raw_healthdata_data tibble one row and columns issue_date and data
#' The data column should be a list of data frames, with column names
#' date, state, previous_day_admission_adult_covid_confirmed, and
#' previous_day_admission_pediatric_covid_confirmed
#' @param fips_codes covidData::fips_codes data object
#' @param measure the measure to retrieve, either "hospitalizations" or
#' "flu hospitalizations"
#'
#' @return a result of similar format to raw_healthdata_data, but columns
#' date, location, and inc
preprocess_healthdata_data <- function(raw_healthdata_data, fips_codes,
measure) {
result <- raw_healthdata_data
# calculate incidence column, change date to previous day, and
# rename state to abbreviation
if (measure == "hospitalizations") {
result$data[[1]] <- result$data[[1]] %>%
dplyr::transmute(
abbreviation = state,
date = as.Date(date) - 1,
inc = previous_day_admission_adult_covid_confirmed +
previous_day_admission_pediatric_covid_confirmed
)
} else if (measure == "flu hospitalizations") {
if (!("previous_day_admission_influenza_confirmed" %in% colnames(result$data[[1]]))) {
stop("Flu hospitalizations not available for specified issue date.")
}
result$data[[1]] <- result$data[[1]] %>%
dplyr::transmute(
abbreviation = state,
date = as.Date(date) - 1,
inc = previous_day_admission_influenza_confirmed
)
} else {
stop("Invalid measure: must be either 'hospitalizations' or ",
"'flu hospitalizations'.")
}
# add US location by summing across all others
result$data[[1]] <- dplyr::bind_rows(
result$data[[1]],
result$data[[1]] %>%
dplyr::group_by(date) %>%
dplyr::summarise(inc = sum(inc), .groups = "drop") %>%
dplyr::mutate(abbreviation = "US")
)
# add location column, remove abbreviation
result$data[[1]] <- result$data[[1]] %>%
dplyr::left_join(
fips_codes %>% dplyr::select(location, abbreviation),
by = "abbreviation"
) %>%
dplyr::select(-abbreviation)
return(result)
}
|
plot_fits <- function(prepped_fishery, fit){
# check <- case_studies %>%
# filter(economic_model == 3, case_study == "realistic",
# period == "end", window == "10")
#
check <- case_studies %>% slice(5)
prepped_fishery <- check$prepped_fishery[[1]]
fit <- check$scrooge_fit[[1]]$result
sampled_years <- prepped_fishery$sampled_years
sampled_years <- data_frame(year =1:length(sampled_years),
sampled_year = sampled_years)
rec_devs <- tidybayes::spread_samples(fit, rec_dev_t[year])
true_recdevs <- prepped_fishery$simed_fishery %>%
group_by(year) %>%
summarise(true_rec_dev = exp(unique(rec_dev))) %>%
ungroup() %>%
mutate(year = 1:nrow(.))
rec_devs %>%
left_join(true_recdevs, by = "year") %>%
ggplot(aes(year,rec_dev_t, group = .iteration)) +
geom_line(alpha = 0.25) +
geom_point(aes(year,true_rec_dev), color = "red") +
geom_hline(aes(yintercept = 1), color = "red")
f_t <- tidybayes::spread_samples(fit, f_t[year]) %>%
ungroup() %>%
left_join(sampled_years, by = "year") %>%
mutate(year = sampled_year) %>%
select(-sampled_year)
true_f <- check$prepped_fishery[[1]]$simed_fishery %>%
group_by(year) %>%
summarise(true_f = unique(f)) %>%
mutate(year = year - min(year) + 1)
f_plot <- f_t %>%
left_join(true_f, by = "year") %>%
group_by(year) %>%
summarise(
lower_90 = quantile(f_t, 0.05),
upper_90 = quantile(f_t, 0.95),
lower_50 = quantile(f_t, 0.25),
upper_50 = quantile(f_t, 0.75),
mean_f = mean(f_t),
true_f = mean(true_f)) %>%
ggplot() +
geom_ribbon(aes(year, ymin = lower_90, ymax = upper_90), fill = "lightgrey") +
geom_ribbon(aes(year, ymin = lower_50, ymax = upper_50), fill = "darkgrey") +
geom_line(aes(year,mean_f), color = "steelblue") +
geom_point(aes(year, true_f), fill = "tomato", size = 4, shape = 21) +
labs(y = "F", x = "Year")
f_plot
delta_f <- tidybayes::spread_samples(fit, delta_f[year]) %>%
ungroup() %>%
left_join(sampled_years, by = "year") %>%
mutate(year = sampled_year) %>%
select(-sampled_year)
ppue <- tidybayes::spread_samples(fit, ppue_hat[year]) %>%
ungroup() %>%
left_join(sampled_years, by = "year") %>%
mutate(year = sampled_year) %>%
select(-sampled_year)
true_ppue <-
data_frame(year = sampled_years$sampled_year,
true_ppue = prepped_fishery$scrooge_data$ppue_t) %>%
ungroup() %>%
mutate(true_ppue = true_ppue / max(true_ppue))
ppue_plot <- ppue %>%
left_join(true_ppue, by = "year") %>%
mutate(resid = ppue_hat - true_ppue) %>%
group_by(year) %>%
summarise(
lower_90 = quantile(ppue_hat, 0.05),
upper_90 = quantile(ppue_hat, 0.95),
lower_50 = quantile(ppue_hat, 0.25),
upper_50 = quantile(ppue_hat, 0.75),
mean_val = mean(ppue_hat),
true_val = mean(true_ppue)) %>%
ggplot() +
geom_ribbon(aes(year, ymin = lower_90, ymax = upper_90), fill = "lightgrey") +
geom_ribbon(aes(year, ymin = lower_50, ymax = upper_50), fill = "darkgrey") +
geom_line(aes(year,mean_val), color = "steelblue") +
geom_point(aes(year, true_val), fill = "tomato", size = 4, shape = 21) +
labs(y = "PPUE", x = "Year")
observed_lcomps <- prepped_fishery$scrooge_data$length_comps %>%
as_data_frame() %>%
mutate(year = sampled_years$sampled_year) %>%
gather(lbin, numbers, -year) %>%
mutate(lbin = as.numeric(lbin)) %>%
group_by(year) %>%
mutate(numbers = numbers / sum(numbers))
pp_n_tl <- tidybayes::spread_samples(fit, n_tl[year,length_bin]) %>%
ungroup() %>%
left_join(sampled_years, by = "year") %>%
mutate(year = sampled_year) %>%
select(-sampled_year)
pp_length_plot <- pp_n_tl %>%
group_by(year, .chain,.iteration) %>%
mutate(p_n_tl = n_tl / sum(n_tl)) %>%
group_by(year, .chain, length_bin) %>%
summarise(lower_90 = quantile(p_n_tl,0.05),
upper_90 = quantile(p_n_tl,0.95)) %>%
ggplot() +
geom_ribbon(aes(x = length_bin, ymin = lower_90, ymax = upper_90), fill = "lightgrey") +
facet_wrap(~year) +
theme_minimal() +
geom_point(data = observed_lcomps, aes(lbin, numbers), size = .5, alpha = 0.5, color = "red")
lcomps <- tidybayes::spread_samples(fit, p_lbin_sampled[year,lbin]) %>%
ungroup() %>%
left_join(sampled_years, by = "year") %>%
mutate(year = sampled_year) %>%
select(-sampled_year)
length_plot <- lcomps %>%
group_by(year, lbin) %>%
summarise(mean_n = mean(p_lbin_sampled)) %>%
group_by(year) %>%
mutate(mean_n = mean_n / sum(mean_n)) %>%
ggplot() +
geom_line(aes(lbin, mean_n, color = year, group = year),show.legend = F) +
geom_point(data = observed_lcomps, aes(lbin, numbers), size = .5, alpha = 0.5) +
facet_wrap(~year) +
theme_classic()
} | /functions/plot_fits.R | permissive | DanOvando/scrooge | R | false | false | 4,913 | r | plot_fits <- function(prepped_fishery, fit){
# check <- case_studies %>%
# filter(economic_model == 3, case_study == "realistic",
# period == "end", window == "10")
#
check <- case_studies %>% slice(5)
prepped_fishery <- check$prepped_fishery[[1]]
fit <- check$scrooge_fit[[1]]$result
sampled_years <- prepped_fishery$sampled_years
sampled_years <- data_frame(year =1:length(sampled_years),
sampled_year = sampled_years)
rec_devs <- tidybayes::spread_samples(fit, rec_dev_t[year])
true_recdevs <- prepped_fishery$simed_fishery %>%
group_by(year) %>%
summarise(true_rec_dev = exp(unique(rec_dev))) %>%
ungroup() %>%
mutate(year = 1:nrow(.))
rec_devs %>%
left_join(true_recdevs, by = "year") %>%
ggplot(aes(year,rec_dev_t, group = .iteration)) +
geom_line(alpha = 0.25) +
geom_point(aes(year,true_rec_dev), color = "red") +
geom_hline(aes(yintercept = 1), color = "red")
f_t <- tidybayes::spread_samples(fit, f_t[year]) %>%
ungroup() %>%
left_join(sampled_years, by = "year") %>%
mutate(year = sampled_year) %>%
select(-sampled_year)
true_f <- check$prepped_fishery[[1]]$simed_fishery %>%
group_by(year) %>%
summarise(true_f = unique(f)) %>%
mutate(year = year - min(year) + 1)
f_plot <- f_t %>%
left_join(true_f, by = "year") %>%
group_by(year) %>%
summarise(
lower_90 = quantile(f_t, 0.05),
upper_90 = quantile(f_t, 0.95),
lower_50 = quantile(f_t, 0.25),
upper_50 = quantile(f_t, 0.75),
mean_f = mean(f_t),
true_f = mean(true_f)) %>%
ggplot() +
geom_ribbon(aes(year, ymin = lower_90, ymax = upper_90), fill = "lightgrey") +
geom_ribbon(aes(year, ymin = lower_50, ymax = upper_50), fill = "darkgrey") +
geom_line(aes(year,mean_f), color = "steelblue") +
geom_point(aes(year, true_f), fill = "tomato", size = 4, shape = 21) +
labs(y = "F", x = "Year")
f_plot
delta_f <- tidybayes::spread_samples(fit, delta_f[year]) %>%
ungroup() %>%
left_join(sampled_years, by = "year") %>%
mutate(year = sampled_year) %>%
select(-sampled_year)
ppue <- tidybayes::spread_samples(fit, ppue_hat[year]) %>%
ungroup() %>%
left_join(sampled_years, by = "year") %>%
mutate(year = sampled_year) %>%
select(-sampled_year)
true_ppue <-
data_frame(year = sampled_years$sampled_year,
true_ppue = prepped_fishery$scrooge_data$ppue_t) %>%
ungroup() %>%
mutate(true_ppue = true_ppue / max(true_ppue))
ppue_plot <- ppue %>%
left_join(true_ppue, by = "year") %>%
mutate(resid = ppue_hat - true_ppue) %>%
group_by(year) %>%
summarise(
lower_90 = quantile(ppue_hat, 0.05),
upper_90 = quantile(ppue_hat, 0.95),
lower_50 = quantile(ppue_hat, 0.25),
upper_50 = quantile(ppue_hat, 0.75),
mean_val = mean(ppue_hat),
true_val = mean(true_ppue)) %>%
ggplot() +
geom_ribbon(aes(year, ymin = lower_90, ymax = upper_90), fill = "lightgrey") +
geom_ribbon(aes(year, ymin = lower_50, ymax = upper_50), fill = "darkgrey") +
geom_line(aes(year,mean_val), color = "steelblue") +
geom_point(aes(year, true_val), fill = "tomato", size = 4, shape = 21) +
labs(y = "PPUE", x = "Year")
observed_lcomps <- prepped_fishery$scrooge_data$length_comps %>%
as_data_frame() %>%
mutate(year = sampled_years$sampled_year) %>%
gather(lbin, numbers, -year) %>%
mutate(lbin = as.numeric(lbin)) %>%
group_by(year) %>%
mutate(numbers = numbers / sum(numbers))
pp_n_tl <- tidybayes::spread_samples(fit, n_tl[year,length_bin]) %>%
ungroup() %>%
left_join(sampled_years, by = "year") %>%
mutate(year = sampled_year) %>%
select(-sampled_year)
pp_length_plot <- pp_n_tl %>%
group_by(year, .chain,.iteration) %>%
mutate(p_n_tl = n_tl / sum(n_tl)) %>%
group_by(year, .chain, length_bin) %>%
summarise(lower_90 = quantile(p_n_tl,0.05),
upper_90 = quantile(p_n_tl,0.95)) %>%
ggplot() +
geom_ribbon(aes(x = length_bin, ymin = lower_90, ymax = upper_90), fill = "lightgrey") +
facet_wrap(~year) +
theme_minimal() +
geom_point(data = observed_lcomps, aes(lbin, numbers), size = .5, alpha = 0.5, color = "red")
lcomps <- tidybayes::spread_samples(fit, p_lbin_sampled[year,lbin]) %>%
ungroup() %>%
left_join(sampled_years, by = "year") %>%
mutate(year = sampled_year) %>%
select(-sampled_year)
length_plot <- lcomps %>%
group_by(year, lbin) %>%
summarise(mean_n = mean(p_lbin_sampled)) %>%
group_by(year) %>%
mutate(mean_n = mean_n / sum(mean_n)) %>%
ggplot() +
geom_line(aes(lbin, mean_n, color = year, group = year),show.legend = F) +
geom_point(data = observed_lcomps, aes(lbin, numbers), size = .5, alpha = 0.5) +
facet_wrap(~year) +
theme_classic()
} |
source("orchids_setup_data.R")
library(nimble)
nimbleOptions(disallow_multivariate_argument_expressions = FALSE)
orchids_code <- nimbleCode({
## -------------------------------------------------
## Parameters:
## s: survival probability
## psiV: transitions from vegetative
## psiF: transitions from flowering
## psiD: transitions from dormant
## -------------------------------------------------
## States (S):
## 1 vegetative
## 2 flowering
## 3 dormant
## 4 dead
## Observations (O):
## 1 seen vegetative
## 2 seen flowering
## 3 not seen
## -------------------------------------------------
## Priors and constraints
## Survival: uniform
for (t in 1:(n_occasions-1)){
s[t] ~ dunif(0, 1)
}
## Transitions: gamma priors
for (i in 1:3){
a[i] ~ dgamma(1, 1)
psiD[i] <- a[i]/sum(a[1:3])
b[i] ~ dgamma(1, 1)
psiV[i] <- b[i]/sum(b[1:3])
c[i] ~ dgamma(1, 1)
psiF[i] <- c[i]/sum(c[1:3])
}
## Define state-transition and observation matrices
for (i in 1:nind){
## Define probabilities of state S(t+1) given S(t)
for (t in 1:(n_occasions-1)){
ps[1,i,t,1] <- s[t] * psiV[1]
ps[1,i,t,2] <- s[t] * psiV[2]
ps[1,i,t,3] <- s[t] * psiV[3]
ps[1,i,t,4] <- 1-s[t]
ps[2,i,t,1] <- s[t] * psiF[1]
ps[2,i,t,2] <- s[t] * psiF[2]
ps[2,i,t,3] <- s[t] * psiF[3]
ps[2,i,t,4] <- 1-s[t]
ps[3,i,t,1] <- s[t] * psiD[1]
ps[3,i,t,2] <- s[t] * psiD[2]
ps[3,i,t,3] <- s[t] * psiD[3]
ps[3,i,t,4] <- 1-s[t]
ps[4,i,t,1] <- 0
ps[4,i,t,2] <- 0
ps[4,i,t,3] <- 0
ps[4,i,t,4] <- 1
## Define probabilities of O(t) given S(t)
po[1,i,t,1] <- 1
po[1,i,t,2] <- 0
po[1,i,t,3] <- 0
po[2,i,t,1] <- 0
po[2,i,t,2] <- 1
po[2,i,t,3] <- 0
po[3,i,t,1] <- 0
po[3,i,t,2] <- 0
po[3,i,t,3] <- 1
po[4,i,t,1] <- 0
po[4,i,t,2] <- 0
po[4,i,t,3] <- 1
} #t
} #i
## Likelihood
for (i in 1:nind){
## Define latent state at first capture
z[i,f[i]] <- y[i,f[i]]
for (t in (f[i]+1):n_occasions){
## State process: draw S(t) given S(t-1)
z[i,t] ~ dcat(ps[z[i,t-1], i, t-1, 1:4])
## Observation process: draw O(t) given S(t)
y[i,t] ~ dcat(po[z[i,t], i, t-1, 1:3])
} #t
} #i
})
orchids_info <- list(code=orchids_code, data=orchids_data, inits=orchids_inits())
## parameters <- c("s", "psiV", "psiF", "psiD")
## ## One could argue to monitor the underlying random variables instead:
## ## parameters <- c('a', 'b', 'c', 's')
## orchid_basic <- compareMCMCs(
## orchid,
## MCMCs = c("jags", "nimble"),
## monitors = parameters,
## niter = 20000,
## burnin = 2000,
## summary = FALSE
## )
## ## These runs are not long enough.
## ## Slice sampling b[1] and b[2] might help
## make_MCMC_comparison_pages(orchid_basic,
## dir = "comparison_results",
## modelNames = "orchid_basic")
## browseURL(Sys.glob(file.path("comparison_results", "orchid_basic.html")))
| /Content/examples_code/multi_state_CR_orchids/orchids_basic.R | no_license | lponisio/Vogelwarte_NIMBLE_workshop | R | false | false | 3,447 | r | source("orchids_setup_data.R")
library(nimble)
nimbleOptions(disallow_multivariate_argument_expressions = FALSE)
orchids_code <- nimbleCode({
## -------------------------------------------------
## Parameters:
## s: survival probability
## psiV: transitions from vegetative
## psiF: transitions from flowering
## psiD: transitions from dormant
## -------------------------------------------------
## States (S):
## 1 vegetative
## 2 flowering
## 3 dormant
## 4 dead
## Observations (O):
## 1 seen vegetative
## 2 seen flowering
## 3 not seen
## -------------------------------------------------
## Priors and constraints
## Survival: uniform
for (t in 1:(n_occasions-1)){
s[t] ~ dunif(0, 1)
}
## Transitions: gamma priors
for (i in 1:3){
a[i] ~ dgamma(1, 1)
psiD[i] <- a[i]/sum(a[1:3])
b[i] ~ dgamma(1, 1)
psiV[i] <- b[i]/sum(b[1:3])
c[i] ~ dgamma(1, 1)
psiF[i] <- c[i]/sum(c[1:3])
}
## Define state-transition and observation matrices
for (i in 1:nind){
## Define probabilities of state S(t+1) given S(t)
for (t in 1:(n_occasions-1)){
ps[1,i,t,1] <- s[t] * psiV[1]
ps[1,i,t,2] <- s[t] * psiV[2]
ps[1,i,t,3] <- s[t] * psiV[3]
ps[1,i,t,4] <- 1-s[t]
ps[2,i,t,1] <- s[t] * psiF[1]
ps[2,i,t,2] <- s[t] * psiF[2]
ps[2,i,t,3] <- s[t] * psiF[3]
ps[2,i,t,4] <- 1-s[t]
ps[3,i,t,1] <- s[t] * psiD[1]
ps[3,i,t,2] <- s[t] * psiD[2]
ps[3,i,t,3] <- s[t] * psiD[3]
ps[3,i,t,4] <- 1-s[t]
ps[4,i,t,1] <- 0
ps[4,i,t,2] <- 0
ps[4,i,t,3] <- 0
ps[4,i,t,4] <- 1
## Define probabilities of O(t) given S(t)
po[1,i,t,1] <- 1
po[1,i,t,2] <- 0
po[1,i,t,3] <- 0
po[2,i,t,1] <- 0
po[2,i,t,2] <- 1
po[2,i,t,3] <- 0
po[3,i,t,1] <- 0
po[3,i,t,2] <- 0
po[3,i,t,3] <- 1
po[4,i,t,1] <- 0
po[4,i,t,2] <- 0
po[4,i,t,3] <- 1
} #t
} #i
## Likelihood
for (i in 1:nind){
## Define latent state at first capture
z[i,f[i]] <- y[i,f[i]]
for (t in (f[i]+1):n_occasions){
## State process: draw S(t) given S(t-1)
z[i,t] ~ dcat(ps[z[i,t-1], i, t-1, 1:4])
## Observation process: draw O(t) given S(t)
y[i,t] ~ dcat(po[z[i,t], i, t-1, 1:3])
} #t
} #i
})
orchids_info <- list(code=orchids_code, data=orchids_data, inits=orchids_inits())
## parameters <- c("s", "psiV", "psiF", "psiD")
## ## One could argue to monitor the underlying random variables instead:
## ## parameters <- c('a', 'b', 'c', 's')
## orchid_basic <- compareMCMCs(
## orchid,
## MCMCs = c("jags", "nimble"),
## monitors = parameters,
## niter = 20000,
## burnin = 2000,
## summary = FALSE
## )
## ## These runs are not long enough.
## ## Slice sampling b[1] and b[2] might help
## make_MCMC_comparison_pages(orchid_basic,
## dir = "comparison_results",
## modelNames = "orchid_basic")
## browseURL(Sys.glob(file.path("comparison_results", "orchid_basic.html")))
|
source("makeSim.R")
load("meanDispPairs.RData")
library("DESeq2")
library("PoiClaClu")
library("mclust")
set.seed(1)
n <- 2000
# create 20 samples, then remove first group, leaving 16
# this way the groups are equidistant from each other
m <- 20
k <- 4
methods <- c("norm Eucl","log2 Eucl","rlog Eucl","VST Eucl","PoisDist")
condition0 <- factor(rep(c("null","A","B","C","D"), each = m/(k+1)))
x <- model.matrix(~ condition0)
rnormsds <- list(seq(from=0, to=.6, length=7),
seq(from=0, to=.8, length=7),
seq(from=0, to=1.2, length=7))
sfs <- list(equal=rep(1,m), unequal=rep(c(1,1,1/3,3), times=(k+1)))
dispScales <- c(.1, .25, 1)
nreps <- 20
res <- do.call(rbind, lapply(seq_along(dispScales), function(idx) {
dispScale <- dispScales[idx]
do.call(rbind, lapply(rnormsds[[idx]], function(rnormsd) {
do.call(rbind, lapply(seq_along(sfs), function(sf.idx) {
sf <- sfs[[sf.idx]]
do.call(rbind, lapply(seq_len(nreps), function(i) {
beta <- replicate(k, c(rep(0,8/10 * n), rnorm(2/10 * n, 0, rnormsd)))
mdp <- meanDispPairs
mdp$disp <- mdp$disp * dispScale
mat0 <- makeSim(n,m,x,beta,mdp,sf)$mat
mat <- mat0[,5:20]
mode(mat) <- "integer"
condition <- droplevels(condition0[5:20])
dds <- DESeqDataSetFromMatrix(mat, DataFrame(condition), ~ 1)
dds <- estimateSizeFactors(dds)
dds <- estimateDispersionsGeneEst(dds)
# don't warn if local fit is used
dds <- suppressWarnings({estimateDispersionsFit(dds)})
norm <- t(counts(dds, normalized=TRUE))
lognorm <- t(log2(counts(dds, normalized=TRUE) + 1))
rld <- t(assay(rlog(dds, blind=FALSE)))
vsd <- t(assay(varianceStabilizingTransformation(dds, blind=FALSE)))
poiDist <- PoissonDistance(t(mat))$dd
normARI <- adjustedRandIndex(condition, cutree(hclust(dist(norm)),k=k))
lognormARI <- adjustedRandIndex(condition, cutree(hclust(dist(lognorm)),k=k))
rlogARI <- adjustedRandIndex(condition, cutree(hclust(dist(rld)),k=k))
vstARI <- adjustedRandIndex(condition, cutree(hclust(dist(vsd)),k=k))
poiDistARI <- adjustedRandIndex(condition, cutree(hclust(poiDist),k=k))
data.frame(ARI = c(normARI, lognormARI, rlogARI, vstARI, poiDistARI),
method = methods,
rnormsd = rep(rnormsd,length(methods)),
dispScale = rep(dispScale,length(methods)),
sizeFactor = rep(names(sfs)[sf.idx], length(methods)))
}))
}))
}))
}))
res$method <- factor(res$method, methods)
save(res, file="results_simulateCluster.RData")
| /inst/script/simulateCluster.R | no_license | zihua/DESeq2 | R | false | false | 2,665 | r | source("makeSim.R")
load("meanDispPairs.RData")
library("DESeq2")
library("PoiClaClu")
library("mclust")
set.seed(1)
n <- 2000
# create 20 samples, then remove first group, leaving 16
# this way the groups are equidistant from each other
m <- 20
k <- 4
methods <- c("norm Eucl","log2 Eucl","rlog Eucl","VST Eucl","PoisDist")
condition0 <- factor(rep(c("null","A","B","C","D"), each = m/(k+1)))
x <- model.matrix(~ condition0)
rnormsds <- list(seq(from=0, to=.6, length=7),
seq(from=0, to=.8, length=7),
seq(from=0, to=1.2, length=7))
sfs <- list(equal=rep(1,m), unequal=rep(c(1,1,1/3,3), times=(k+1)))
dispScales <- c(.1, .25, 1)
nreps <- 20
res <- do.call(rbind, lapply(seq_along(dispScales), function(idx) {
dispScale <- dispScales[idx]
do.call(rbind, lapply(rnormsds[[idx]], function(rnormsd) {
do.call(rbind, lapply(seq_along(sfs), function(sf.idx) {
sf <- sfs[[sf.idx]]
do.call(rbind, lapply(seq_len(nreps), function(i) {
beta <- replicate(k, c(rep(0,8/10 * n), rnorm(2/10 * n, 0, rnormsd)))
mdp <- meanDispPairs
mdp$disp <- mdp$disp * dispScale
mat0 <- makeSim(n,m,x,beta,mdp,sf)$mat
mat <- mat0[,5:20]
mode(mat) <- "integer"
condition <- droplevels(condition0[5:20])
dds <- DESeqDataSetFromMatrix(mat, DataFrame(condition), ~ 1)
dds <- estimateSizeFactors(dds)
dds <- estimateDispersionsGeneEst(dds)
# don't warn if local fit is used
dds <- suppressWarnings({estimateDispersionsFit(dds)})
norm <- t(counts(dds, normalized=TRUE))
lognorm <- t(log2(counts(dds, normalized=TRUE) + 1))
rld <- t(assay(rlog(dds, blind=FALSE)))
vsd <- t(assay(varianceStabilizingTransformation(dds, blind=FALSE)))
poiDist <- PoissonDistance(t(mat))$dd
normARI <- adjustedRandIndex(condition, cutree(hclust(dist(norm)),k=k))
lognormARI <- adjustedRandIndex(condition, cutree(hclust(dist(lognorm)),k=k))
rlogARI <- adjustedRandIndex(condition, cutree(hclust(dist(rld)),k=k))
vstARI <- adjustedRandIndex(condition, cutree(hclust(dist(vsd)),k=k))
poiDistARI <- adjustedRandIndex(condition, cutree(hclust(poiDist),k=k))
data.frame(ARI = c(normARI, lognormARI, rlogARI, vstARI, poiDistARI),
method = methods,
rnormsd = rep(rnormsd,length(methods)),
dispScale = rep(dispScale,length(methods)),
sizeFactor = rep(names(sfs)[sf.idx], length(methods)))
}))
}))
}))
}))
res$method <- factor(res$method, methods)
save(res, file="results_simulateCluster.RData")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.