content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
testlist <- list(doy = -1.72131968218895e+83, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078985708188e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) | /meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615828441-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 734 | r | testlist <- list(doy = -1.72131968218895e+83, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078985708188e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) |
#' @importFrom httr parse_url
#' @importFrom httr build_url
#' @importFrom httr GET
#' @importFrom httr POST
#' @importFrom httr DELETE
#' @importFrom httr content
#' @importFrom httr message_for_status
#' @importFrom xml2 read_html
#' @importFrom jsonlite toJSON
#' @importFrom jsonlite fromJSON
#' @importFrom jsonlite base64_dec
#' @importFrom whisker whisker.render
NULL
#' Create a remote driver
#'
#' \code{remoteDr}: Create a remote Driver object
#' @param remoteServerAddr Object of class \code{"character"}, giving the
#' ip of the remote server. Defaults to localhost
#' @param port Object of class \code{"integer"}, the port of the remote
#' server on which to connect
#' @param browserName Object of class \code{"character"}. The name of the
#' browser being used; choices include
#' {chrome|firefox|internet explorer|iphone}. Defaults to firefox.
#' @param version Object of class \code{"character"}. The browser version,
#' or the empty string if unknown.
#' @param platform Object of class \code{"character"}. A key specifying
#' which platform the browser is running on. This value should be one
#' of {WINDOWS|XP|VISTA|MAC|LINUX|UNIX}. When requesting a new session,
#' the client may specify "ANY" to indicate any available platform may
#' be used.
#' @param javascript Object of class \code{"logical"}. Whether the session
#' supports executing user supplied JavaScript in the context of the
#' current page.
#' @param nativeEvents Object of class \code{"logical"}. Whether the
#' session supports native events. n WebDriver advanced user
#' interactions are provided by either simulating the Javascript events
#' directly (i.e. synthetic events) or by letting the browser generate
#' the Javascript events (i.e. native events). Native events simulate
#' the user interactions better.
#' @param extraCapabilities A list containing any os/platform/driver
#' specific arguments.
#' @param path Path on the server side to issue webdriver calls to.
#' Normally use the default value.
#' @param newSession Logical value whether to start an instance of the
#' browser. If TRUE a browser will be opened using
#' \code{\link{newSession}}
#' @param ... Pass addional arguments to newSession. Currently used to
#' pass \code{\link{retry}}
#' @return An object of class "rDriver" is returned. This is a remote
#' Driver object that is used in many of the remote driver specific
#' functions. Many functions that take a remote driver object as input
#' also return the remote driver object. This allows chaining of
#' commands. See the examples for chaining in action.
#' @export
#'
#' @examples
#' \dontrun{
#' # assume a server is available at the default location.
#' remDr <- remoteDr()
#' remDR %>% go("http://www.google.com") %>%
#' findElement("name", "q") %>%
#' elementSendKeys("R project", key = "enter")
#' # close our browser
#' remDr %>% deleteSession
#' }
#'
remoteDr <- function(remoteServerAddr = "http://localhost",
port = 4444L,
browserName = "firefox",
version = "",
platform = "ANY",
javascript = TRUE,
nativeEvents = TRUE,
extraCapabilities = list(),
path = "wd/hub",
newSession = TRUE,
...
){
remServAdd <- parse_url(remoteServerAddr)
remServAdd[["port"]] <- port
remServAdd[["path"]] <- if(identical(remServAdd[["path"]], "")){
path
}else{
file.path(remServAdd[["path"]], path)
}
if(is.null(remServAdd[["scheme"]])){
remServAdd[["scheme"]] <- "http"
}
session <- structure(
list(
remServAdd = remServAdd,
desiredCapabilities = list(
browserName = browserName,
version = version,
javascriptEnabled = javascript,
platform = platform,
nativeEvents = nativeEvents
),
extraCapabilities = extraCapabilities,
sessionId = function(drvID){.e$sessionId[[drvID]]},
sessionInfo = NULL,
drvID = {
chk <- FALSE
while(!chk){
proID <- make.names(tempfile("",""))
if(!proID %in% names(.e$sessionId)) chk <- TRUE
}
proID
}
)
, class = "rDriver")
if(newSession){
session <- newSession(session, ...)
}
invisible(session)
}
#' Create a Web Element
#'
#' \code{wbElement} Create a Web Element object of class "wElement"
#'
#' @param elementId This is a string returned by the web driver that
#' identifies the web element.
#' @template remDr
#'
#' @return An object of class "wElement" is returned. This is a web
#' element object that is used in many of the web Element specific
#' functions. Many functions that take a web Element object as input
#' also return the web Element object. This allows chaining of
#' commands. See the examples for chaining in action.
#' @export
#'
#' @examples \dontrun{
#' remDr <- remoteDr()
#' webElem <- remDR %>% go("http://www.google.com") %>%
#' findElement("name", "q")
#' # print the webElement
#' webElem
#'
#' # send keys to the web Element
#' webElem %>% elementSendKeys("R project", key = "enter")
#'
#' # close browser
#' remDr %>% deleteSession()
#' }
wbElement <- function(elementId, remDr){
structure(
list(
sessionId = function(drvID){.e$sessionId[[drvID]]},
elementId = elementId,
remDr = remDr
)
, class = "wElement")
}
#' Send a query to remote Driver.
#'
#' \code{queryDriver} A function to send a query to a remote driver.
#' Intended for seleniumPipes internal use mainly.
#' @param verb The http method to use. See \code{\link{VERB}}
#' @param url The url of the remote server endpoint.
#' @param source The name of the seleniumPipes function that called
#' queryDriver.
#' @param drvID The driver id of the session as given by an object of
#' class "remoteDr"
#' @param ... additonal arguments
#'
#' @return The contents of the response from the remote server. See
#' \code{\link{content}} for details.
#' @export
#'
#' @examples \dontrun{
#' # internal method
#' }
#'
queryDriver <- function(verb = GET, url, source, drvID, ...){
if(!identical(source, "newSession")){
if(is.null(.e$sessionId[[drvID]])){
message("Driver id is not registered. Has the session been deleted?")
message("Alternatively no session exists:
\tRun remoteRd with newSession = TRUE or
\trun newSession()")
stop("sessionId error")
}
}
# Add error checking code here
vArg <- c(list(url), body = list(...)[["json"]])
noTry <- getOption("seleniumPipes_no_try")
delay <- getOption("seleniumPipes_no_try_delay")
if(!is.null(rtry <- list(...)[["retry"]])){
if(is.logical(rtry)){
if(!rtry){
noTry <- 1L
delay <- 100L
}
}
if(is.list(rtry)){
noTry <- ifelse(is.null(rtry$noTry),
getOption("seleniumPipes_no_try"),
as.integer(rtry$noTry))
delay <- ifelse(is.null(rtry$delay),
getOption("seleniumPipes_no_try_delay"),
as.integer(rtry$delay))
}
}
res <- retry(func = checkResponse, v = verb, vArg, source,
noTry = noTry, delay = delay)
res <- content(res)
.e$sessionId[[drvID]] <- res$sessionId
res
}
#' Check the response from remote server
#'
#' \code{checkResponse} checks the response from a remote web driver and
#' checks against known errors. uses statusCodes in sysdata.rda see
#' seleniumPipes:::statusCodes
#'
#' @param response The value returned by a http method from httr see
#' \code{\link{VERB}}
#'
#' @return Stops with appropriate error if any found. On error
#' \code{\link{errorResponse}} and \code{\link{errorContent}} may
#' provide additional detail.
#' @export
#'
#' @examples \dontrun{
#' # internal method
#' }
#'
checkResponse <- function(response){
if(identical(response$status_code, 200L) &&
identical(content(response)$status, 0L)) return()
errFunc <- function(){
message("Error detected:")
message("Response status code : ", response$status_code)
errTest <- tryCatch(content(response, encoding = "UTF-8")$value,
error = function(e)e
)
errTest <- inherits(errTest, "error")
if(!errTest){
if(!is.null(content(response)$value$class)){
message("Selenium class exception: ",
content(response)$value$class)
}
if(!is.null(content(response)$status)){
scDetail <-
statusCodes[statusCodes$Code == content(response)$status,]
message("Selenium Status code: ", scDetail$Code)
message("Selenium Status summary: ", scDetail$Summary)
message("Selenium Status detail: ", scDetail$Detail)
}
if(!is.null(content(response)$value$message)){
messageDetail <- content(response)$value$message
message("Selenium message: ", messageDetail)
}
}else{
message("Response message:")
message_for_status(response)
}
message("Please check the response with errorResponse()")
message("Please check the content returned with errorContent()")
.e$errorResponse <- response
.e$errorContent <- content(response)
stop("Selenium Server error", call. = FALSE)
}
structure(list(err = errFunc), class = "checkResponse")
}
#' Return the response from remote webdriver
#'
#'\code{errorResponse} returns the response from the remote webdriver on
#' an error.
#' @return returns response see \code{\link{VERB}}. Headers, request etc.
#' can be examined from this object.
#' @export
#'
#' @examples \dontrun{
#' remDr <- remoteDr()
#' remDr %>% findElement("name", "sdofnsdofk")
#' errorResponse()
#' }
errorResponse <- function(){
.e$errorResponse
}
#' Returns the content from remote webdriver
#'
#'\code{errorContent} returns the content from the remote webdriver on an
#' error.
#' @return returns content see \code{\link{content}}
#' @export
#'
#' @examples \dontrun{
#' remDr <- remoteDr()
#' remDr %>% findElement("name", "sdofnsdofk")
#' errorContent()
#' }
errorContent <- function(){
.e$errorContent
}
retry <- function(func, v, vArg, source,
noTry = getOption("seleniumPipes_no_try"),
delay = getOption("seleniumPipes_no_try_delay")){
tryNo <- 1L
while(!tryNo > noTry){
tst <- tryCatch({func(res <- do.call(v, vArg))},
error = function(e){
if(identical(e$message,
"Couldn't connect to server")
){
stop(e$message, " on ", vArg[[1]])
}else{
stop(e$message)
}
}
)
if(inherits(tst, "checkResponse")){
message("Called ",source, " - Try no: ", tryNo, " of ", noTry)
if(!identical(tryNo, noTry)){Sys.sleep(delay/1000)}
tryNo <- tryNo + 1
}else{
tryNo <- noTry + 1
}
}
if(inherits(func(res), "checkResponse")){
func(res)$err()
}else{
res
}
}
| /R/init.R | no_license | johndharrison/seleniumPipes | R | false | false | 11,277 | r | #' @importFrom httr parse_url
#' @importFrom httr build_url
#' @importFrom httr GET
#' @importFrom httr POST
#' @importFrom httr DELETE
#' @importFrom httr content
#' @importFrom httr message_for_status
#' @importFrom xml2 read_html
#' @importFrom jsonlite toJSON
#' @importFrom jsonlite fromJSON
#' @importFrom jsonlite base64_dec
#' @importFrom whisker whisker.render
NULL
#' Create a remote driver
#'
#' \code{remoteDr}: Create a remote Driver object
#' @param remoteServerAddr Object of class \code{"character"}, giving the
#' ip of the remote server. Defaults to localhost
#' @param port Object of class \code{"integer"}, the port of the remote
#' server on which to connect
#' @param browserName Object of class \code{"character"}. The name of the
#' browser being used; choices include
#' {chrome|firefox|internet explorer|iphone}. Defaults to firefox.
#' @param version Object of class \code{"character"}. The browser version,
#' or the empty string if unknown.
#' @param platform Object of class \code{"character"}. A key specifying
#' which platform the browser is running on. This value should be one
#' of {WINDOWS|XP|VISTA|MAC|LINUX|UNIX}. When requesting a new session,
#' the client may specify "ANY" to indicate any available platform may
#' be used.
#' @param javascript Object of class \code{"logical"}. Whether the session
#' supports executing user supplied JavaScript in the context of the
#' current page.
#' @param nativeEvents Object of class \code{"logical"}. Whether the
#' session supports native events. n WebDriver advanced user
#' interactions are provided by either simulating the Javascript events
#' directly (i.e. synthetic events) or by letting the browser generate
#' the Javascript events (i.e. native events). Native events simulate
#' the user interactions better.
#' @param extraCapabilities A list containing any os/platform/driver
#' specific arguments.
#' @param path Path on the server side to issue webdriver calls to.
#' Normally use the default value.
#' @param newSession Logical value whether to start an instance of the
#' browser. If TRUE a browser will be opened using
#' \code{\link{newSession}}
#' @param ... Pass addional arguments to newSession. Currently used to
#' pass \code{\link{retry}}
#' @return An object of class "rDriver" is returned. This is a remote
#' Driver object that is used in many of the remote driver specific
#' functions. Many functions that take a remote driver object as input
#' also return the remote driver object. This allows chaining of
#' commands. See the examples for chaining in action.
#' @export
#'
#' @examples
#' \dontrun{
#' # assume a server is available at the default location.
#' remDr <- remoteDr()
#' remDR %>% go("http://www.google.com") %>%
#' findElement("name", "q") %>%
#' elementSendKeys("R project", key = "enter")
#' # close our browser
#' remDr %>% deleteSession
#' }
#'
remoteDr <- function(remoteServerAddr = "http://localhost",
port = 4444L,
browserName = "firefox",
version = "",
platform = "ANY",
javascript = TRUE,
nativeEvents = TRUE,
extraCapabilities = list(),
path = "wd/hub",
newSession = TRUE,
...
){
remServAdd <- parse_url(remoteServerAddr)
remServAdd[["port"]] <- port
remServAdd[["path"]] <- if(identical(remServAdd[["path"]], "")){
path
}else{
file.path(remServAdd[["path"]], path)
}
if(is.null(remServAdd[["scheme"]])){
remServAdd[["scheme"]] <- "http"
}
session <- structure(
list(
remServAdd = remServAdd,
desiredCapabilities = list(
browserName = browserName,
version = version,
javascriptEnabled = javascript,
platform = platform,
nativeEvents = nativeEvents
),
extraCapabilities = extraCapabilities,
sessionId = function(drvID){.e$sessionId[[drvID]]},
sessionInfo = NULL,
drvID = {
chk <- FALSE
while(!chk){
proID <- make.names(tempfile("",""))
if(!proID %in% names(.e$sessionId)) chk <- TRUE
}
proID
}
)
, class = "rDriver")
if(newSession){
session <- newSession(session, ...)
}
invisible(session)
}
#' Create a Web Element
#'
#' \code{wbElement} Create a Web Element object of class "wElement"
#'
#' @param elementId This is a string returned by the web driver that
#' identifies the web element.
#' @template remDr
#'
#' @return An object of class "wElement" is returned. This is a web
#' element object that is used in many of the web Element specific
#' functions. Many functions that take a web Element object as input
#' also return the web Element object. This allows chaining of
#' commands. See the examples for chaining in action.
#' @export
#'
#' @examples \dontrun{
#' remDr <- remoteDr()
#' webElem <- remDR %>% go("http://www.google.com") %>%
#' findElement("name", "q")
#' # print the webElement
#' webElem
#'
#' # send keys to the web Element
#' webElem %>% elementSendKeys("R project", key = "enter")
#'
#' # close browser
#' remDr %>% deleteSession()
#' }
wbElement <- function(elementId, remDr){
structure(
list(
sessionId = function(drvID){.e$sessionId[[drvID]]},
elementId = elementId,
remDr = remDr
)
, class = "wElement")
}
#' Send a query to remote Driver.
#'
#' \code{queryDriver} A function to send a query to a remote driver.
#' Intended for seleniumPipes internal use mainly.
#' @param verb The http method to use. See \code{\link{VERB}}
#' @param url The url of the remote server endpoint.
#' @param source The name of the seleniumPipes function that called
#' queryDriver.
#' @param drvID The driver id of the session as given by an object of
#' class "remoteDr"
#' @param ... additonal arguments
#'
#' @return The contents of the response from the remote server. See
#' \code{\link{content}} for details.
#' @export
#'
#' @examples \dontrun{
#' # internal method
#' }
#'
queryDriver <- function(verb = GET, url, source, drvID, ...){
if(!identical(source, "newSession")){
if(is.null(.e$sessionId[[drvID]])){
message("Driver id is not registered. Has the session been deleted?")
message("Alternatively no session exists:
\tRun remoteRd with newSession = TRUE or
\trun newSession()")
stop("sessionId error")
}
}
# Add error checking code here
vArg <- c(list(url), body = list(...)[["json"]])
noTry <- getOption("seleniumPipes_no_try")
delay <- getOption("seleniumPipes_no_try_delay")
if(!is.null(rtry <- list(...)[["retry"]])){
if(is.logical(rtry)){
if(!rtry){
noTry <- 1L
delay <- 100L
}
}
if(is.list(rtry)){
noTry <- ifelse(is.null(rtry$noTry),
getOption("seleniumPipes_no_try"),
as.integer(rtry$noTry))
delay <- ifelse(is.null(rtry$delay),
getOption("seleniumPipes_no_try_delay"),
as.integer(rtry$delay))
}
}
res <- retry(func = checkResponse, v = verb, vArg, source,
noTry = noTry, delay = delay)
res <- content(res)
.e$sessionId[[drvID]] <- res$sessionId
res
}
#' Check the response from remote server
#'
#' \code{checkResponse} checks the response from a remote web driver and
#' checks against known errors. uses statusCodes in sysdata.rda see
#' seleniumPipes:::statusCodes
#'
#' @param response The value returned by a http method from httr see
#' \code{\link{VERB}}
#'
#' @return Stops with appropriate error if any found. On error
#' \code{\link{errorResponse}} and \code{\link{errorContent}} may
#' provide additional detail.
#' @export
#'
#' @examples \dontrun{
#' # internal method
#' }
#'
checkResponse <- function(response){
if(identical(response$status_code, 200L) &&
identical(content(response)$status, 0L)) return()
errFunc <- function(){
message("Error detected:")
message("Response status code : ", response$status_code)
errTest <- tryCatch(content(response, encoding = "UTF-8")$value,
error = function(e)e
)
errTest <- inherits(errTest, "error")
if(!errTest){
if(!is.null(content(response)$value$class)){
message("Selenium class exception: ",
content(response)$value$class)
}
if(!is.null(content(response)$status)){
scDetail <-
statusCodes[statusCodes$Code == content(response)$status,]
message("Selenium Status code: ", scDetail$Code)
message("Selenium Status summary: ", scDetail$Summary)
message("Selenium Status detail: ", scDetail$Detail)
}
if(!is.null(content(response)$value$message)){
messageDetail <- content(response)$value$message
message("Selenium message: ", messageDetail)
}
}else{
message("Response message:")
message_for_status(response)
}
message("Please check the response with errorResponse()")
message("Please check the content returned with errorContent()")
.e$errorResponse <- response
.e$errorContent <- content(response)
stop("Selenium Server error", call. = FALSE)
}
structure(list(err = errFunc), class = "checkResponse")
}
#' Return the response from remote webdriver
#'
#'\code{errorResponse} returns the response from the remote webdriver on
#' an error.
#' @return returns response see \code{\link{VERB}}. Headers, request etc.
#' can be examined from this object.
#' @export
#'
#' @examples \dontrun{
#' remDr <- remoteDr()
#' remDr %>% findElement("name", "sdofnsdofk")
#' errorResponse()
#' }
errorResponse <- function(){
.e$errorResponse
}
#' Returns the content from remote webdriver
#'
#'\code{errorContent} returns the content from the remote webdriver on an
#' error.
#' @return returns content see \code{\link{content}}
#' @export
#'
#' @examples \dontrun{
#' remDr <- remoteDr()
#' remDr %>% findElement("name", "sdofnsdofk")
#' errorContent()
#' }
errorContent <- function(){
.e$errorContent
}
retry <- function(func, v, vArg, source,
noTry = getOption("seleniumPipes_no_try"),
delay = getOption("seleniumPipes_no_try_delay")){
tryNo <- 1L
while(!tryNo > noTry){
tst <- tryCatch({func(res <- do.call(v, vArg))},
error = function(e){
if(identical(e$message,
"Couldn't connect to server")
){
stop(e$message, " on ", vArg[[1]])
}else{
stop(e$message)
}
}
)
if(inherits(tst, "checkResponse")){
message("Called ",source, " - Try no: ", tryNo, " of ", noTry)
if(!identical(tryNo, noTry)){Sys.sleep(delay/1000)}
tryNo <- tryNo + 1
}else{
tryNo <- noTry + 1
}
}
if(inherits(func(res), "checkResponse")){
func(res)$err()
}else{
res
}
}
|
remove.packages("patchwork")
remove.packages(c("ggplot2","rlang"))
devtools::install_github("r-lib/rlang")
devtools::install_github("thomasp85/patchwork")
library(patchwork)
# ggplot graphics
# 10 April 2018
# LAP
# preliminaries
library(ggplot2)
library(ggthemes)
library(patchwork)
library(TeachingDemos)
char2seed("10th Avenue Freeze-Out")
d<-mpg
str(d)
# create 4 individual graphs
g1<-ggplot(data=d, mapping=aes(x=displ, y=cty)) +
geom_point()+
geom_smooth()
print(g1)
# second graph
g2<-ggplot(data=d,
mapping=aes(x=fl,fill=I("tomato"),color=I("black")))+
geom_bar(stat="count")+
theme(legend.position="none")
print(g2)
g3<-ggplot(data=d,
mapping=aes(x=displ,fill=I("royalblue"),color=I("black"))) + geom_histogram()
print(g3)
g4<-ggplot(data=d,
mapping=aes(x=fl,y=cty,fill=fl))+
geom_boxplot()+theme(legend.position="none")
print(g4)
# patchwork for awesom multipanel grphs
# place two plots horizontally
g1+g2
# place 3 plots vertically
g1+g2+g3+plot_layout(ncol=1)
# change relative area of each plot
g1+g2+plot_layout(ncol=1,heights=c(2,1))
g1+g2+g3+plot_layout(ncol=2, widths=c(1,2))
# add a spacer plot under constuction
g1 + plot_spacer()+g2
# set up nested plots
g1+ {
g2+ {
g3+ {
g4+ {
plot_layout(ncol=1)
}
} +
plot_layout(ncol=2))
# - operator fr subtrack placement
g1+g2_g3+plot_layout(ncol=1)
# \\ for very intuitive layouts
(g1|g2|g3|)/g4
# swapping axis orinetation within a plot
g3a<-g3+scale_x_reverse()
g3b<-g3+scale_y_reverse()
g3c<-g3+scale_x_reverse()+scale_y_reverse()
(g3|g3a)/g3b|g3c)
# switch orientation of coordinates
(g3 + coord_flip()|g3a+coord_flip())/((g3b+coord_flip()|g3c+coord_flip())
# ggsave for creating and savign plots
ggsave(filename="MyPlot.pdf",plot=g3, device="pdf",width=20,height=20,units="cm",dpi=300)
# mapping of variables to aesthetics
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,color=class)))+geom_point()
print(m1)
# limited to 6 shapes
m1<-ggplot(data=mpg,
mapping=aes(x=displ,y=cty,shape=class))+geom_point()
print(m1)
# mapping of a discrete variable to point size
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,size=class)))+geom_point()
print(m1)
# map a continuous variable to point size
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,size=hwy)))+geom_point()
print(m1)
# map a continous variable onto color
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,size=hwy)))+geom_point()
print(m1)
# map two variables to tow different aesthetics
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,shape=class,color=hwy)))+geom_point()
print(m1)
# mapping a variable to the same aesthetic for two different geoms
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,shape=class,color=hwy)))+geom_point()
print(m1)
# mapping 3 variables onto shape, size, and color
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,color=drv))+geom_smooth(method=lm)
print(m1)
# faceting for excellent visualization in a set of related plots
m1<-ggplot(data=mpg,
mapping=aes(x=displ,y=cty))+geom_points()
m1+facet_grid(class~fl)
m1+facet_grid(class~fl,scales="free_y")
# facet on only a single variable
m1+facet_grid(.~class)
m1+facet_grid(class~.)
# use facet wrap for unordered grpahs
m1+facet_wrap(~class)
# combine variables in a facet wrap
m1+facet_wrap(~class+fl,drop=FALSE)
# use facet in combination with aesthetics
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,color=drv)))+
geom_point()m1+facet_grid(.~class)
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,color=drv)))+
geom_smooth(method="lm",se=FALSE)
m1+facet_grid(.~class)
# fitting with boxplots over a continuous variable
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty)))+
geom_boxplot()
m1+facet_grid(.~class)
#-------------------------------------------------------------#
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,group=drv,fill=drv))+
geom_boxplot()
m1+facet_grid(.~class)
| /ggplotGraphics2.R | no_license | lpett/Bio381_2018 | R | false | false | 3,958 | r | remove.packages("patchwork")
remove.packages(c("ggplot2","rlang"))
devtools::install_github("r-lib/rlang")
devtools::install_github("thomasp85/patchwork")
library(patchwork)
# ggplot graphics
# 10 April 2018
# LAP
# preliminaries
library(ggplot2)
library(ggthemes)
library(patchwork)
library(TeachingDemos)
char2seed("10th Avenue Freeze-Out")
d<-mpg
str(d)
# create 4 individual graphs
g1<-ggplot(data=d, mapping=aes(x=displ, y=cty)) +
geom_point()+
geom_smooth()
print(g1)
# second graph
g2<-ggplot(data=d,
mapping=aes(x=fl,fill=I("tomato"),color=I("black")))+
geom_bar(stat="count")+
theme(legend.position="none")
print(g2)
g3<-ggplot(data=d,
mapping=aes(x=displ,fill=I("royalblue"),color=I("black"))) + geom_histogram()
print(g3)
g4<-ggplot(data=d,
mapping=aes(x=fl,y=cty,fill=fl))+
geom_boxplot()+theme(legend.position="none")
print(g4)
# patchwork for awesom multipanel grphs
# place two plots horizontally
g1+g2
# place 3 plots vertically
g1+g2+g3+plot_layout(ncol=1)
# change relative area of each plot
g1+g2+plot_layout(ncol=1,heights=c(2,1))
g1+g2+g3+plot_layout(ncol=2, widths=c(1,2))
# add a spacer plot under constuction
g1 + plot_spacer()+g2
# set up nested plots
g1+ {
g2+ {
g3+ {
g4+ {
plot_layout(ncol=1)
}
} +
plot_layout(ncol=2))
# - operator fr subtrack placement
g1+g2_g3+plot_layout(ncol=1)
# \\ for very intuitive layouts
(g1|g2|g3|)/g4
# swapping axis orinetation within a plot
g3a<-g3+scale_x_reverse()
g3b<-g3+scale_y_reverse()
g3c<-g3+scale_x_reverse()+scale_y_reverse()
(g3|g3a)/g3b|g3c)
# switch orientation of coordinates
(g3 + coord_flip()|g3a+coord_flip())/((g3b+coord_flip()|g3c+coord_flip())
# ggsave for creating and savign plots
ggsave(filename="MyPlot.pdf",plot=g3, device="pdf",width=20,height=20,units="cm",dpi=300)
# mapping of variables to aesthetics
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,color=class)))+geom_point()
print(m1)
# limited to 6 shapes
m1<-ggplot(data=mpg,
mapping=aes(x=displ,y=cty,shape=class))+geom_point()
print(m1)
# mapping of a discrete variable to point size
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,size=class)))+geom_point()
print(m1)
# map a continuous variable to point size
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,size=hwy)))+geom_point()
print(m1)
# map a continous variable onto color
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,size=hwy)))+geom_point()
print(m1)
# map two variables to tow different aesthetics
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,shape=class,color=hwy)))+geom_point()
print(m1)
# mapping a variable to the same aesthetic for two different geoms
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,shape=class,color=hwy)))+geom_point()
print(m1)
# mapping 3 variables onto shape, size, and color
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,color=drv))+geom_smooth(method=lm)
print(m1)
# faceting for excellent visualization in a set of related plots
m1<-ggplot(data=mpg,
mapping=aes(x=displ,y=cty))+geom_points()
m1+facet_grid(class~fl)
m1+facet_grid(class~fl,scales="free_y")
# facet on only a single variable
m1+facet_grid(.~class)
m1+facet_grid(class~.)
# use facet wrap for unordered grpahs
m1+facet_wrap(~class)
# combine variables in a facet wrap
m1+facet_wrap(~class+fl,drop=FALSE)
# use facet in combination with aesthetics
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,color=drv)))+
geom_point()m1+facet_grid(.~class)
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,color=drv)))+
geom_smooth(method="lm",se=FALSE)
m1+facet_grid(.~class)
# fitting with boxplots over a continuous variable
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty)))+
geom_boxplot()
m1+facet_grid(.~class)
#-------------------------------------------------------------#
m1<-ggplot(data=mpg,mapping=aes(x=displ,y=cty,group=drv,fill=drv))+
geom_boxplot()
m1+facet_grid(.~class)
|
library(data.table)
power <- read.table("data/household_power_consumption.txt", h=T, sep=";", strings=F)
pow_Feb_1_2_2007 <- subset(power, Date == "1/2/2007" | Date == "2/2/2007")
global_active_power <- as.numeric(pow_Feb_1_2_2007$Global_active_power)
png("plot1.png", h=480, w=480)
hist(global_active_power, col="orangered2", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off() | /plot1.R | no_license | razvanvoicu/ExData_Plotting1 | R | false | false | 404 | r | library(data.table)
power <- read.table("data/household_power_consumption.txt", h=T, sep=";", strings=F)
pow_Feb_1_2_2007 <- subset(power, Date == "1/2/2007" | Date == "2/2/2007")
global_active_power <- as.numeric(pow_Feb_1_2_2007$Global_active_power)
png("plot1.png", h=480, w=480)
hist(global_active_power, col="orangered2", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off() |
# Data downloaded from
# https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
# 6/4/14 @ 1:15PM
# Measurements of electric power consumption in
# one household with a one-minute sampling rate over a period of almost
# 4 years. Different electrical quantities and some sub-metering values
# are available.
# The following descriptions of the 9 variables in the dataset are taken
# from
# the web site:
# Date</b>: Date in format dd/mm/yyyy </li>
# Time</b>: time in format hh:mm:ss </li>
# Global_active_power</b>: household global minute-averaged active power (in kilowatt)
# Global_reactive_power</b>: household global minute-averaged reactive power (in kilowatt)
# Voltage</b>: minute-averaged voltage (in volt)
# Global_intensity</b>: household global minute-averaged current intensity (in ampere) </li>
# Sub_metering_1</b>: energy sub-metering No. 1 (in watt-hour of active energy). It corresponds to the kitchen, containing mainly a dishwasher, an oven and a microwave (hot plates are not electric but gas powered).
# Sub_metering_2</b>: energy sub-metering No. 2 (in watt-hour of active energy). It corresponds to the laundry room, containing a washing-machine, a tumble-drier, a refrigerator and a light.
# Sub_metering_3</b>: energy sub-metering No. 3 (in watt-hour of active energy). It corresponds to an electric water-heater and an air-conditioner.
#
setwd("~/Documents/datasciencecoursera/ExData_Plotting1")
rawData <- read.table("./household_power_consumption.txt", sep=";", stringsAsFactors=FALSE)
names(rawData) <- c("Date", "Time", "ActivePower", "ReactivePower", "Voltage", "GlobalIntensity", "Submetering1", "Submetering2", "Submetering3")
#
# Subset just the two days Feb1 and 2 2007
feb1Data <- rawData[rawData$Date == "1/2/2007",]
feb2Data <- rawData[rawData$Date == "2/2/2007",]
smallData <- rbind(feb1Data, feb2Data)
#
# Reformat the date and time into a single DateTime date object
x <- paste(smallData$Date, smallData$Time)
y <- strptime(x, "%d/%m/%Y %H:%M:%S")
dateData = cbind(y, smallData[,3:9])
names(dateData) <- c("DateTime", "ActivePower" , "ReactivePower" , "Voltage" , "GlobalIntensity", "Submetering1" , "Submetering2" , "Submetering3")
tidyData <- dateData
#
# Plot 4 shows
# a. Global Active Power as a function of time
# b. Voltage as a function of time
# c. Energy submetering as a function of time
# d. Global Reactive Power as a function of time
#
par(mfrow=c(2,2))
#
plot(tidyData$DateTime, tidyData$ActivePower, ylab="Global Active Power", xlab="", type="l", cex.lab=3/4, cex.axis=3/4)
#
with (tidyData, plot(DateTime, Voltage, ylab="Voltage", xlab="datetime", type="l", cex.lab=3/4, cex.axis=3/4))
#
with (tidyData, plot(DateTime, Submetering1, ylab="Energy sub metering", xlab="", type="l", cex.lab=3/4, cex.axis=3/4))
with (tidyData, lines(DateTime, Submetering2, col="red"))
with (tidyData, lines(DateTime, Submetering3, col="blue"))
legend("topright", col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex=.6, pt.cex=.5, bty="n", y.intersp=.5, lty=c(1,1,1), lwd=c(2.5, 2.5, 2.5), inset=c(-0.15, -.05))
#
with (tidyData, plot(DateTime, ReactivePower, ylab="Global_reactive_power", xlab="datetime", type="l", cex.lab=3/4, cex.axis=3/4))
#
# Save to file
dev.copy(png, file="plot4.png", width=480, height=480)
dev.off()
#
| /plot4.R | no_license | MDMC/ExData_Plotting1 | R | false | false | 3,379 | r | # Data downloaded from
# https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
# 6/4/14 @ 1:15PM
# Measurements of electric power consumption in
# one household with a one-minute sampling rate over a period of almost
# 4 years. Different electrical quantities and some sub-metering values
# are available.
# The following descriptions of the 9 variables in the dataset are taken
# from
# the web site:
# Date</b>: Date in format dd/mm/yyyy </li>
# Time</b>: time in format hh:mm:ss </li>
# Global_active_power</b>: household global minute-averaged active power (in kilowatt)
# Global_reactive_power</b>: household global minute-averaged reactive power (in kilowatt)
# Voltage</b>: minute-averaged voltage (in volt)
# Global_intensity</b>: household global minute-averaged current intensity (in ampere) </li>
# Sub_metering_1</b>: energy sub-metering No. 1 (in watt-hour of active energy). It corresponds to the kitchen, containing mainly a dishwasher, an oven and a microwave (hot plates are not electric but gas powered).
# Sub_metering_2</b>: energy sub-metering No. 2 (in watt-hour of active energy). It corresponds to the laundry room, containing a washing-machine, a tumble-drier, a refrigerator and a light.
# Sub_metering_3</b>: energy sub-metering No. 3 (in watt-hour of active energy). It corresponds to an electric water-heater and an air-conditioner.
#
setwd("~/Documents/datasciencecoursera/ExData_Plotting1")
rawData <- read.table("./household_power_consumption.txt", sep=";", stringsAsFactors=FALSE)
names(rawData) <- c("Date", "Time", "ActivePower", "ReactivePower", "Voltage", "GlobalIntensity", "Submetering1", "Submetering2", "Submetering3")
#
# Subset just the two days Feb1 and 2 2007
feb1Data <- rawData[rawData$Date == "1/2/2007",]
feb2Data <- rawData[rawData$Date == "2/2/2007",]
smallData <- rbind(feb1Data, feb2Data)
#
# Reformat the date and time into a single DateTime date object
x <- paste(smallData$Date, smallData$Time)
y <- strptime(x, "%d/%m/%Y %H:%M:%S")
dateData = cbind(y, smallData[,3:9])
names(dateData) <- c("DateTime", "ActivePower" , "ReactivePower" , "Voltage" , "GlobalIntensity", "Submetering1" , "Submetering2" , "Submetering3")
tidyData <- dateData
#
# Plot 4 shows
# a. Global Active Power as a function of time
# b. Voltage as a function of time
# c. Energy submetering as a function of time
# d. Global Reactive Power as a function of time
#
par(mfrow=c(2,2))
#
plot(tidyData$DateTime, tidyData$ActivePower, ylab="Global Active Power", xlab="", type="l", cex.lab=3/4, cex.axis=3/4)
#
with (tidyData, plot(DateTime, Voltage, ylab="Voltage", xlab="datetime", type="l", cex.lab=3/4, cex.axis=3/4))
#
with (tidyData, plot(DateTime, Submetering1, ylab="Energy sub metering", xlab="", type="l", cex.lab=3/4, cex.axis=3/4))
with (tidyData, lines(DateTime, Submetering2, col="red"))
with (tidyData, lines(DateTime, Submetering3, col="blue"))
legend("topright", col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex=.6, pt.cex=.5, bty="n", y.intersp=.5, lty=c(1,1,1), lwd=c(2.5, 2.5, 2.5), inset=c(-0.15, -.05))
#
with (tidyData, plot(DateTime, ReactivePower, ylab="Global_reactive_power", xlab="datetime", type="l", cex.lab=3/4, cex.axis=3/4))
#
# Save to file
dev.copy(png, file="plot4.png", width=480, height=480)
dev.off()
#
|
library(ranger)
library(ggpubr) # for as_ggplot function to get legend
library(cowplot)
library(forestFloor)
source("R/Functions/ranger_RFadaptor.R")
source("R/Functions/ranger_plot.forestFloor.HD.R")
## Random Forest covariance matrix, with only predictors selected ####
trib_Cl.select <- trib_Cl %>%
group_by(hydroID_GLAHF, streamName) %>%
summarise_if(is.numeric, median, na.rm = TRUE) %>%
ungroup()
rf_cov = trib_Cl.select %>% dplyr::select(urban:Areakm2,
Population_Density_n_km2,
Road_Density_kmroad_km2,
MeanImperviousness)
sapply(rf_cov, function(x) sum(is.na(x))) # Check if there are NA values
# Sampling routine to use 95% of lakes as in-bag samples ####
ntree = 1000
# Custom inbag sampling routine
random_lake_samps <- lapply(1:ntree, function(i){
unique_lakes <- unique(trib_Cl.select$hydroID_GLAHF)
lake_samp <- sample(unique_lakes, size =0.95*length(unique_lakes), replace=F) # In-bag uses 95% of lakes
samp = as.integer(trib_Cl.select$hydroID_GLAHF %in% lake_samp)
return(samp)
}
)
## Run RF model ####
rf_model <- ranger(dependent.variable.name = 'chloride',
data = data.frame(chloride = trib_Cl.select$chloride, rf_cov),
inbag = random_lake_samps,
mtry = 4,
num.trees = ntree, quantreg = T,
importance = 'permutation',
keep.inbag = TRUE)
rf_model
# Calculate oob quantiles
oob_quantiles <- predict(rf_model, type = 'quantiles', quantiles = c(0.05,0.50,0.95))
#variable importance
v <- as.numeric(rf_model$variable.importance)
w <- as.character(names(rf_model$variable.importance))
DF <- data.frame(w=w,v=as.numeric(v)) %>% arrange(v)
DF$w <- factor(DF$w, levels = DF$w)
# variable importance plot
pvar = ggplot(DF, aes(x=w, y=v,fill=v))+
geom_bar(stat="identity", position="dodge") + coord_flip() +
scale_fill_gradient(low = 'lightsteelblue3', high = 'lightsteelblue4') +
ylab("Variable Importance") + xlab("")+
theme_bw(base_size = 8) +
# theme(axis.text = element_text(size=8), axis.title=element_text(size=8)) +
guides(fill=F)
pvar
# Forest floor plots
ff_rf_model <- ranger_RFadaptor(rf_model,trib_Cl.select$chloride)
ffra = forestFloor(ff_rf_model,rf_cov,calc_np = T)
ffra$FCmatrix
# Row sums should add up to predicted value
rws = rowSums(ffra$FCmatrix)
rowMeans(ffra$FCmatrix)
plot(rws, ff_rf_model$predicted)
abline(0,1)
lm(ff_rf_model$predicted ~ rws)
# varNames = read_csv('LAGOS_prediction/variableNames.csv')
ffra2 = ffra
names = data.frame(Name = names(ffra2$X), FullName =
c('Urban (%)', 'Barren (%)','Forest (%)','Shrubland (%)',
'Herbaceous (%)','Agriculture (%)','Wetland (%)',
'Strahler Order','Area (km2)','Population (#/km2)','Road Density (km/km2)','Mean Imperviousness (%)')) #%>% left_join(varNames)
ffra2$imp_ind = order(ffra2$importance,decreasing = T)
library(scales)
Col = fcol.HD(ffra2,1)
# plot(ffra, plot_seq=c(1,2,3), plot_GOF=F, limitY=F, col=Col, orderByImportance = T, pch = 16)
# Plot feature contribution plots
pp = plot.forestFloor.HD(ffra2,plot_seq=c(1,2,3,4,5,6,7,8), cols = Col, varNames = names, shape = 16, size = 0.7)
# do.call(plot_grid, c(pp, list(nrow = 2, align = 'hv'))) # plot multiple plots
# Fake plot just to get legend
WSlegend = ggplot() +
geom_point(aes(x=(ffra2$X$urban), y=ffra2$FCmatrix[,ffra2$imp_ind[1]],
fill = (ffra2$X$urban)), pch = 21, color = 'grey50') +
scale_fill_distiller(palette='RdYlBu', direction = -1, name = 'Imperviousness (%)',
breaks= c(25,50,75), labels = c(25,50,75)) +
scale_x_continuous(labels = scales::number_format(accuracy = 0.01)) +
theme_bw(base_size = 8) +
theme(legend.text = element_text(size=8),
legend.title = element_text(size=8))
# Using the cowplot package
legend <- cowplot::get_legend(WSlegend)
# layout <- "
# AAAAAAA
# BCCDDEE
# #FFGGHH
# "
# pvar + as_ggplot(legend) +pp[[1]] + pp[[2]] + pp[[3]] + pp[[4]] + pp[[5]] + pp[[6]] +
# plot_layout(design = layout)
# ggsave('Figures/Figure6_RF.png',width = 6.5, height = 5, units = 'in', dpi = 500)
#
# plot_grid(pvar, as_ggplot(legend),
# pp[[1]], pp[[2]], pp[[3]], pp[[4]], pp[[5]], pp[[6]],
# rel_widths = c(2,1,1,1),
# nrow = 3, align = 'h', labels = c('a','b','c','d','e','f'),label_size = 10)
toprow = plot_grid(pvar, as_ggplot(legend),
rel_widths = c(4,1),
nrow = 1, align = 'h', labels = c('a', 'b'),label_size = 10)
bottomrow = plot_grid(pp[[1]], pp[[2]], pp[[3]], pp[[4]], pp[[5]], pp[[6]],
nrow = 2, align = 'h', labels = c('c','d','e','f','g','h'),label_size = 10)
plot_grid(toprow, bottomrow, nrow = 2, rel_heights = c(1,2))
ggsave('Figures/Figure6_RF.png',width = 6.5, height = 6, units = 'in', dpi = 500)
| /R/2_RF.R | permissive | LinneaRock/LakeMichigan_Salinization | R | false | false | 5,000 | r | library(ranger)
library(ggpubr) # for as_ggplot function to get legend
library(cowplot)
library(forestFloor)
source("R/Functions/ranger_RFadaptor.R")
source("R/Functions/ranger_plot.forestFloor.HD.R")
## Random Forest covariance matrix, with only predictors selected ####
trib_Cl.select <- trib_Cl %>%
group_by(hydroID_GLAHF, streamName) %>%
summarise_if(is.numeric, median, na.rm = TRUE) %>%
ungroup()
rf_cov = trib_Cl.select %>% dplyr::select(urban:Areakm2,
Population_Density_n_km2,
Road_Density_kmroad_km2,
MeanImperviousness)
sapply(rf_cov, function(x) sum(is.na(x))) # Check if there are NA values
# Sampling routine to use 95% of lakes as in-bag samples ####
ntree = 1000
# Custom inbag sampling routine
random_lake_samps <- lapply(1:ntree, function(i){
unique_lakes <- unique(trib_Cl.select$hydroID_GLAHF)
lake_samp <- sample(unique_lakes, size =0.95*length(unique_lakes), replace=F) # In-bag uses 95% of lakes
samp = as.integer(trib_Cl.select$hydroID_GLAHF %in% lake_samp)
return(samp)
}
)
## Run RF model ####
rf_model <- ranger(dependent.variable.name = 'chloride',
data = data.frame(chloride = trib_Cl.select$chloride, rf_cov),
inbag = random_lake_samps,
mtry = 4,
num.trees = ntree, quantreg = T,
importance = 'permutation',
keep.inbag = TRUE)
rf_model
# Calculate oob quantiles
oob_quantiles <- predict(rf_model, type = 'quantiles', quantiles = c(0.05,0.50,0.95))
#variable importance
v <- as.numeric(rf_model$variable.importance)
w <- as.character(names(rf_model$variable.importance))
DF <- data.frame(w=w,v=as.numeric(v)) %>% arrange(v)
DF$w <- factor(DF$w, levels = DF$w)
# variable importance plot
pvar = ggplot(DF, aes(x=w, y=v,fill=v))+
geom_bar(stat="identity", position="dodge") + coord_flip() +
scale_fill_gradient(low = 'lightsteelblue3', high = 'lightsteelblue4') +
ylab("Variable Importance") + xlab("")+
theme_bw(base_size = 8) +
# theme(axis.text = element_text(size=8), axis.title=element_text(size=8)) +
guides(fill=F)
pvar
# Forest floor plots
ff_rf_model <- ranger_RFadaptor(rf_model,trib_Cl.select$chloride)
ffra = forestFloor(ff_rf_model,rf_cov,calc_np = T)
ffra$FCmatrix
# Row sums should add up to predicted value
rws = rowSums(ffra$FCmatrix)
rowMeans(ffra$FCmatrix)
plot(rws, ff_rf_model$predicted)
abline(0,1)
lm(ff_rf_model$predicted ~ rws)
# varNames = read_csv('LAGOS_prediction/variableNames.csv')
ffra2 = ffra
names = data.frame(Name = names(ffra2$X), FullName =
c('Urban (%)', 'Barren (%)','Forest (%)','Shrubland (%)',
'Herbaceous (%)','Agriculture (%)','Wetland (%)',
'Strahler Order','Area (km2)','Population (#/km2)','Road Density (km/km2)','Mean Imperviousness (%)')) #%>% left_join(varNames)
ffra2$imp_ind = order(ffra2$importance,decreasing = T)
library(scales)
Col = fcol.HD(ffra2,1)
# plot(ffra, plot_seq=c(1,2,3), plot_GOF=F, limitY=F, col=Col, orderByImportance = T, pch = 16)
# Plot feature contribution plots
pp = plot.forestFloor.HD(ffra2,plot_seq=c(1,2,3,4,5,6,7,8), cols = Col, varNames = names, shape = 16, size = 0.7)
# do.call(plot_grid, c(pp, list(nrow = 2, align = 'hv'))) # plot multiple plots
# Fake plot just to get legend
WSlegend = ggplot() +
geom_point(aes(x=(ffra2$X$urban), y=ffra2$FCmatrix[,ffra2$imp_ind[1]],
fill = (ffra2$X$urban)), pch = 21, color = 'grey50') +
scale_fill_distiller(palette='RdYlBu', direction = -1, name = 'Imperviousness (%)',
breaks= c(25,50,75), labels = c(25,50,75)) +
scale_x_continuous(labels = scales::number_format(accuracy = 0.01)) +
theme_bw(base_size = 8) +
theme(legend.text = element_text(size=8),
legend.title = element_text(size=8))
# Using the cowplot package
legend <- cowplot::get_legend(WSlegend)
# layout <- "
# AAAAAAA
# BCCDDEE
# #FFGGHH
# "
# pvar + as_ggplot(legend) +pp[[1]] + pp[[2]] + pp[[3]] + pp[[4]] + pp[[5]] + pp[[6]] +
# plot_layout(design = layout)
# ggsave('Figures/Figure6_RF.png',width = 6.5, height = 5, units = 'in', dpi = 500)
#
# plot_grid(pvar, as_ggplot(legend),
# pp[[1]], pp[[2]], pp[[3]], pp[[4]], pp[[5]], pp[[6]],
# rel_widths = c(2,1,1,1),
# nrow = 3, align = 'h', labels = c('a','b','c','d','e','f'),label_size = 10)
toprow = plot_grid(pvar, as_ggplot(legend),
rel_widths = c(4,1),
nrow = 1, align = 'h', labels = c('a', 'b'),label_size = 10)
bottomrow = plot_grid(pp[[1]], pp[[2]], pp[[3]], pp[[4]], pp[[5]], pp[[6]],
nrow = 2, align = 'h', labels = c('c','d','e','f','g','h'),label_size = 10)
plot_grid(toprow, bottomrow, nrow = 2, rel_heights = c(1,2))
ggsave('Figures/Figure6_RF.png',width = 6.5, height = 6, units = 'in', dpi = 500)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_yes_no.R
\name{make_yes_no}
\alias{make_yes_no}
\title{make_yes_no}
\usage{
make_yes_no(x)
}
\arguments{
\item{x}{x variable to be converted to hold "Yes" or "No or Unknown"}
}
\value{
a factor with "Yes" or "No or Unknown"
}
\description{
Convert a "Yes-No", "True-False" or "Checkboxes (Multiple
Answers)" question in REDCap to a factor holding "Yes" or
"No or Unknown". Technically "yes" or "checked" (ignoring case), 1 or
TRUE responses are converted to "Yes" and all other values to
"No or Unknown". Also see \code{make_yes_no_unknown()}.
}
\examples{
make_yes_no(c(0, 1, NA))
make_yes_no(c("unchecked", "Checked", NA))
}
| /man/make_yes_no.Rd | permissive | RaymondBalise/tidyREDCap | R | false | true | 710 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_yes_no.R
\name{make_yes_no}
\alias{make_yes_no}
\title{make_yes_no}
\usage{
make_yes_no(x)
}
\arguments{
\item{x}{x variable to be converted to hold "Yes" or "No or Unknown"}
}
\value{
a factor with "Yes" or "No or Unknown"
}
\description{
Convert a "Yes-No", "True-False" or "Checkboxes (Multiple
Answers)" question in REDCap to a factor holding "Yes" or
"No or Unknown". Technically "yes" or "checked" (ignoring case), 1 or
TRUE responses are converted to "Yes" and all other values to
"No or Unknown". Also see \code{make_yes_no_unknown()}.
}
\examples{
make_yes_no(c(0, 1, NA))
make_yes_no(c("unchecked", "Checked", NA))
}
|
\name{loadMdeaths2R}
\alias{loadMdeaths2R}
\title{loadMdeaths2R translate the R internal Deaths data.frame to matlab LDB standard}
\usage{
loadMdeaths2R(full.path.mat, PopName = "HUN")
}
\arguments{
\item{full.path.mat}{the full path to the matlab file
containing only the given deaths matrix and including the
\code{.mat} suffix}
\item{PopName}{character. HMD country code, since matlab
does not preserve this}
}
\description{
this function does as promised. Given the standard
numeric matrix used by the legacy matlab code and saved
to a \code{.mat} file by matlab, it'll read it into R,
give it colnames, convert columns as necessary, augment
for the new R standard, and reorder the columns according
to that returned by \code{readInputDB()} and used
downstream in R. R scripts use column names as
identifiers, whereas matlab scripts use indices. The
matlab code rearranges indices as well, and sheds some
columns, which necessitates specialty functions such as
this.
}
| /LDButils/LDButils/man/loadMdeaths2R.Rd | no_license | timriffe/LDButils | R | false | false | 1,009 | rd | \name{loadMdeaths2R}
\alias{loadMdeaths2R}
\title{loadMdeaths2R translate the R internal Deaths data.frame to matlab LDB standard}
\usage{
loadMdeaths2R(full.path.mat, PopName = "HUN")
}
\arguments{
\item{full.path.mat}{the full path to the matlab file
containing only the given deaths matrix and including the
\code{.mat} suffix}
\item{PopName}{character. HMD country code, since matlab
does not preserve this}
}
\description{
this function does as promised. Given the standard
numeric matrix used by the legacy matlab code and saved
to a \code{.mat} file by matlab, it'll read it into R,
give it colnames, convert columns as necessary, augment
for the new R standard, and reorder the columns according
to that returned by \code{readInputDB()} and used
downstream in R. R scripts use column names as
identifiers, whereas matlab scripts use indices. The
matlab code rearranges indices as well, and sheds some
columns, which necessitates specialty functions such as
this.
}
|
\name{createClassBindings}
\alias{createClassBindings}
\title{Generate R and C/C++ code bindings for a C++ class.}
\description{
This function is used to generate code that provides
an R-language interface to a C++ class.
Currently, it generates code to access the instances of the class as
references. This makes sense for C++ classes.
}
\usage{
createClassBindings(def, nodes, className = rdef@name, # rdef computed in function from def.
types = DefinitionContainer(nodes),
polymorphicNames = unique(names(mm)[duplicated(names(mm))]),
abstract = isAbstractClass(mm, nodes),
resolvedMethods = resolveType(getClassMethods(def), nodes, types),
typeMap = list(),
generateOverloaded = TRUE, ifdef = character(),
helperInfo = NULL, access = "public",
dynamicCast = list(),
otherClassMethods = NULL, useClassNameMethod = FALSE,
signatures = list(),
useSignatureParameters = TRUE,
dispatchInfo = data.frame(),
defaultBaseClass = if(useClassNameMethod)
"RC++ReferenceUseName"
else
"RC++Reference",
classDefs = NULL, ...)
}
\arguments{
\item{def}{the node giving the class definition.
In the future, we will allow this object to be a fully resolved
description of the class, i.e. with the methods and types resolved.}
\item{nodes}{the TU parser returned from \code{\link{parseTU}}
which is an array of the tu nodes.
}
\item{className}{the name of the C++ class being processed}
\item{types}{the collection of resolved data types, routines, etc.
typically a \code{DefinitionContainer}.}
\item{polymorphicNames}{a character vector giving the names of the
methods that are overloaded, typically just within this class.}
\item{abstract}{a logical value indicating if this class is an abstract
class for which there can be no C++-level instances.}
\item{resolvedMethods}{a list of the fully resolved methods, typically
obtained from a call to \code{\link{resolveType}} on the methods
returned by \code{\link{getClassMethods}}.
This can be either a list of all the resolved methods
}
% \item{methods}{a list of the unresolved methods.}
\item{typeMap}{a user-specifiable list of "hints" for mapping
particular data types in the generated code, i.e. for
converting between R and C/C++ and coercing in R functions to the
appropriate type for the C/C++ method.
}
\item{\dots}{additional arguments that are passed on to
\code{\link{createMethodBinding}}.}
\item{generateOverloaded}{a logical value XXX}
\item{ifdef}{a character string which, if non-empty, is used to enclose
the entire generated code within and \code{#ifdef string ... #endif}
block.
This allows one to generate code that is conditionally compiled,
e.g. for specific platforms.}
\item{helperInfo}{passed on to each call to \code{\link{createMethodBinding}}.}
\item{access}{a character vector containing one or more of
public, protected, private to indicate which methods should be
processed. For each method, we compare its accessability value
to this vector and only process it if there is a corresponding
entry in this vector. Thus, a value of \code{c("public",
"protected")} will process both the public and protected methods.
}
\item{dynamicCast}{a logical value or a list that controls how dynamic
casting code is generated.}
\item{otherClassMethods}{methods from other classes}
\item{useClassNameMethod}{prefix methods, etc. with the name of hte
class rather than using translation unit-wide overloading}
\item{signatures}{a list of the signatures of all functions/methods in
the translation unit.}
\item{dispatchInfo}{ a data frame. This is passed to and from the different
helper functions and builds up information about the number of
possible arguments for different methods.
}
\item{useSignatureParameters}{a logical controlling whether to use the
parameters from the local definition or the centralized overloaded method.
}
\item{defaultBaseClass}{the name of the base C++ class to use for
defining a new class.}
\item{classDefs}{a collection of the resolved type descriptions of the
classes in the translation unit.}
}
\value{
}
\references{http://www.omegahat.org/RGCCTranslationUnit}
\author{Duncan Temple Lang <duncan@wald.ucdavis.edu>}
\note{
For regular C/C++ structures, we can access the fields and
work with instances rather than references to instances,
passing such objects between R and C/C++ representations
by explicitly copying the fields.
}
\seealso{
\code{\link{writeCode}}
\code{\link{parseTU}}
}
\examples{
}
\keyword{programming}
\keyword{interface}
| /man/createClassBindings.Rd | no_license | kashenfelter/RGCCTranslationUnit | R | false | false | 4,990 | rd | \name{createClassBindings}
\alias{createClassBindings}
\title{Generate R and C/C++ code bindings for a C++ class.}
\description{
This function is used to generate code that provides
an R-language interface to a C++ class.
Currently, it generates code to access the instances of the class as
references. This makes sense for C++ classes.
}
\usage{
createClassBindings(def, nodes, className = rdef@name, # rdef computed in function from def.
types = DefinitionContainer(nodes),
polymorphicNames = unique(names(mm)[duplicated(names(mm))]),
abstract = isAbstractClass(mm, nodes),
resolvedMethods = resolveType(getClassMethods(def), nodes, types),
typeMap = list(),
generateOverloaded = TRUE, ifdef = character(),
helperInfo = NULL, access = "public",
dynamicCast = list(),
otherClassMethods = NULL, useClassNameMethod = FALSE,
signatures = list(),
useSignatureParameters = TRUE,
dispatchInfo = data.frame(),
defaultBaseClass = if(useClassNameMethod)
"RC++ReferenceUseName"
else
"RC++Reference",
classDefs = NULL, ...)
}
\arguments{
\item{def}{the node giving the class definition.
In the future, we will allow this object to be a fully resolved
description of the class, i.e. with the methods and types resolved.}
\item{nodes}{the TU parser returned from \code{\link{parseTU}}
which is an array of the tu nodes.
}
\item{className}{the name of the C++ class being processed}
\item{types}{the collection of resolved data types, routines, etc.
typically a \code{DefinitionContainer}.}
\item{polymorphicNames}{a character vector giving the names of the
methods that are overloaded, typically just within this class.}
\item{abstract}{a logical value indicating if this class is an abstract
class for which there can be no C++-level instances.}
\item{resolvedMethods}{a list of the fully resolved methods, typically
obtained from a call to \code{\link{resolveType}} on the methods
returned by \code{\link{getClassMethods}}.
This can be either a list of all the resolved methods
}
% \item{methods}{a list of the unresolved methods.}
\item{typeMap}{a user-specifiable list of "hints" for mapping
particular data types in the generated code, i.e. for
converting between R and C/C++ and coercing in R functions to the
appropriate type for the C/C++ method.
}
\item{\dots}{additional arguments that are passed on to
\code{\link{createMethodBinding}}.}
\item{generateOverloaded}{a logical value XXX}
\item{ifdef}{a character string which, if non-empty, is used to enclose
the entire generated code within and \code{#ifdef string ... #endif}
block.
This allows one to generate code that is conditionally compiled,
e.g. for specific platforms.}
\item{helperInfo}{passed on to each call to \code{\link{createMethodBinding}}.}
\item{access}{a character vector containing one or more of
public, protected, private to indicate which methods should be
processed. For each method, we compare its accessability value
to this vector and only process it if there is a corresponding
entry in this vector. Thus, a value of \code{c("public",
"protected")} will process both the public and protected methods.
}
\item{dynamicCast}{a logical value or a list that controls how dynamic
casting code is generated.}
\item{otherClassMethods}{methods from other classes}
\item{useClassNameMethod}{prefix methods, etc. with the name of hte
class rather than using translation unit-wide overloading}
\item{signatures}{a list of the signatures of all functions/methods in
the translation unit.}
\item{dispatchInfo}{ a data frame. This is passed to and from the different
helper functions and builds up information about the number of
possible arguments for different methods.
}
\item{useSignatureParameters}{a logical controlling whether to use the
parameters from the local definition or the centralized overloaded method.
}
\item{defaultBaseClass}{the name of the base C++ class to use for
defining a new class.}
\item{classDefs}{a collection of the resolved type descriptions of the
classes in the translation unit.}
}
\value{
}
\references{http://www.omegahat.org/RGCCTranslationUnit}
\author{Duncan Temple Lang <duncan@wald.ucdavis.edu>}
\note{
For regular C/C++ structures, we can access the fields and
work with instances rather than references to instances,
passing such objects between R and C/C++ representations
by explicitly copying the fields.
}
\seealso{
\code{\link{writeCode}}
\code{\link{parseTU}}
}
\examples{
}
\keyword{programming}
\keyword{interface}
|
#Libraries
library(dplyr)
library(missForest)
library(DMwR)
library(mice)
library(BaylorEdPsych)
library(mvnmle)
#Data Reading
DataRusset = read.delim("/Users/goktugcengiz/Desktop/Datasets/Russet_ineqdata2.txt", header = TRUE, sep="\t", dec =".")
#Data Understanding
str(DataRusset)
summary(DataRusset)
apply(DataRusset, 2, function(x) sum(is.na(x))) #Missing Value Detection
md.pattern(DataRusset) #Missing Value Visualization
t = LittleMCAR(DataRusset)
attributes(t)
t$p.value #0.7910595
#Data Preprocessing
#Missing Value Imputation with RandomForest
DataRusset = missForest(DataRusset)
DataRusset = DataRusset$ximp
DataRusset$ecks = round(DataRusset$demo)
DataRusset$Rent = round(DataRusset$demo)
sum(is.na(DataRusset))
#Defining as X Matrix
X = matrix(DataRusset)
#principal component analysis
#pca = prcomp(DataRusset, scale. = T) | /Ex2/HW2.R | no_license | goktugc7/MVA | R | false | false | 838 | r | #Libraries
library(dplyr)
library(missForest)
library(DMwR)
library(mice)
library(BaylorEdPsych)
library(mvnmle)
#Data Reading
DataRusset = read.delim("/Users/goktugcengiz/Desktop/Datasets/Russet_ineqdata2.txt", header = TRUE, sep="\t", dec =".")
#Data Understanding
str(DataRusset)
summary(DataRusset)
apply(DataRusset, 2, function(x) sum(is.na(x))) #Missing Value Detection
md.pattern(DataRusset) #Missing Value Visualization
t = LittleMCAR(DataRusset)
attributes(t)
t$p.value #0.7910595
#Data Preprocessing
#Missing Value Imputation with RandomForest
DataRusset = missForest(DataRusset)
DataRusset = DataRusset$ximp
DataRusset$ecks = round(DataRusset$demo)
DataRusset$Rent = round(DataRusset$demo)
sum(is.na(DataRusset))
#Defining as X Matrix
X = matrix(DataRusset)
#principal component analysis
#pca = prcomp(DataRusset, scale. = T) |
# connection details ----
connectionSpecifications <- cdmSources %>%
dplyr::filter(sequence == 1) %>%
dplyr::filter(database == 'truven_mdcd')
connectionDetails <-
DatabaseConnector::createConnectionDetails(
dbms = connectionSpecifications$dbms,
user = keyring::key_get(service = userNameService),
password = keyring::key_get(service = passwordService),
port = connectionSpecifications$port,
server = connectionSpecifications$server
)
cdmSourceTable <-
DatabaseConnector::renderTranslateQuerySql(
connection = DatabaseConnector::connect(connectionDetails),
sql = "SELECT * FROM @cdm_database_schema.cdm_source;",
cdm_database_schema = connectionSpecifications$cdmDatabaseSchema
) %>%
dplyr::tibble()
databaseId = cdmSourceTable$CDM_SOURCE_ABBREVIATION
databaseName = cdmSourceTable$CDM_SOURCE_NAME
databaseDescription = cdmSourceTable$SOURCE_DESCRIPTION
cdmDatabaseSchema = connectionSpecifications$cdmDatabaseSchema
vocabularyDatabaseSchema = connectionSpecifications$vocabDatabaseSchema
cohortDatabaseSchema = connectionSpecifications$cohortDatabaseSchema
tempEmulationSchema = getOption("sqlRenderTempEmulationSchema")
# Cohort Definitions ----
remotes::install_github('OHDSI/SkeletonCohortDiagnosticsStudy', ref = "develop")
studyName <- 'epi999'
## get cohort definition set ----
cohortDefinitionSet <-
CohortGenerator::getCohortDefinitionSet(
settingsFileName = "settings/CohortsToCreate.csv",
jsonFolder = "cohorts",
sqlFolder = "sql/sql_server",
packageName = "SkeletonCohortDiagnosticsStudy",
cohortFileNameValue = "cohortId"
) %>% dplyr::tibble()
cohortTableNames = CohortGenerator::getCohortTableNames(cohortTable =
paste0("s", studyName, "_", connectionSpecifications$sourceId))
# output folder information ----
outputFolder <-
file.path("D:", "temp", "outputFolder", studyName, connectionSpecifications$database)
## optionally delete previous execution ----
unlink(x = outputFolder,
recursive = TRUE,
force = TRUE)
dir.create(path = outputFolder,
showWarnings = FALSE,
recursive = TRUE)
# Execution ----
## Create cohort tables on remote ----
CohortGenerator::createCohortTables(connectionDetails = connectionDetails,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTableNames = cohortTableNames,
incremental = TRUE)
## Generate cohort on remote ----
CohortGenerator::generateCohortSet(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
tempEmulationSchema = tempEmulationSchema,
cohortTableNames = cohortTableNames,
cohortDefinitionSet = cohortDefinitionSet,
cohortDatabaseSchema = cohortDatabaseSchema,
incremental = TRUE,
incrementalFolder = file.path(outputFolder, "incremental"))
## Execute Cohort Diagnostics on remote ----
CohortDiagnostics::executeDiagnostics(
cohortDefinitionSet = cohortDefinitionSet,
exportFolder = outputFolder,
databaseId = databaseId,
databaseName = databaseName,
databaseDescription = databaseDescription,
cohortDatabaseSchema = cohortDatabaseSchema,
cdmDatabaseSchema = cdmDatabaseSchema,
tempEmulationSchema = tempEmulationSchema,
connectionDetails = connectionDetails,
cohortTableNames = cohortTableNames,
vocabularyDatabaseSchema = vocabularyDatabaseSchema,
incremental = TRUE
)
# example of how to run full time series diagnostics outside executeDiagnostics
data <-
CohortDiagnostics::runCohortTimeSeriesDiagnostics(
connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTableNames$cohortTable,
runCohortTimeSeries = TRUE,
runDataSourceTimeSeries = TRUE
)
# to export data into csv in Cohort Diagnostics compatible form
data <- CohortDiagnostics:::makeDataExportable(x = data,
tableName = "time_series",
databaseId = databaseId)
CohortDiagnostics:::writeToCsv(
data = data,
fileName = file.path(outputFolder, "time_series.csv"),
incremental = FALSE,
cohortId = data$cohortId %>% unique()
)
# package results ----
CohortDiagnostics::createMergedResultsFile(dataFolder = outputFolder, overwrite = TRUE)
# Launch diagnostics explorer shiny app ----
CohortDiagnostics::launchDiagnosticsExplorer()
# upload to postgres db ----
# connectionDetailsToUpload <- createConnectionDetails(dbms = "postgresql",
# server = paste(Sys.getenv("shinydbServer"),
# Sys.getenv("shinydbDatabase"),
# sep = "/"),
# port = Sys.getenv("shinydbPort"),
# user = Sys.getenv("shinyDbUser"),
# password = Sys.getenv("shinydbPw"))
# resultsSchema <- "eunomiaCd"
# CohortDiagnostics::createResultsDataModel(connectionDetails = connectionDetailsToUpload,
# schema = resultsSchema)
# zipFilesToUpload <- list.files(path = outputFolder,
# pattern = ".zip",
# recursive = TRUE,
# full.names = TRUE)
#
# for (i in (1:length(zipFilesToUpload))) {
# CohortDiagnostics::uploadResults(connectionDetails = connectionDetailsToUpload,
# schema = resultsSchema,
# zipFileName = zipFilesToUpload[[i]])
# }
#
# CohortDiagnostics::launchDiagnosticsExplorer(connectionDetails = connectionDetailsToUpload,
# resultsDatabaseSchema = resultsSchema)
| /extras/exampleCodeToRun/CodeToRun_Other.R | permissive | gowthamrao/CohortDiagnostics | R | false | false | 6,269 | r | # connection details ----
connectionSpecifications <- cdmSources %>%
dplyr::filter(sequence == 1) %>%
dplyr::filter(database == 'truven_mdcd')
connectionDetails <-
DatabaseConnector::createConnectionDetails(
dbms = connectionSpecifications$dbms,
user = keyring::key_get(service = userNameService),
password = keyring::key_get(service = passwordService),
port = connectionSpecifications$port,
server = connectionSpecifications$server
)
cdmSourceTable <-
DatabaseConnector::renderTranslateQuerySql(
connection = DatabaseConnector::connect(connectionDetails),
sql = "SELECT * FROM @cdm_database_schema.cdm_source;",
cdm_database_schema = connectionSpecifications$cdmDatabaseSchema
) %>%
dplyr::tibble()
databaseId = cdmSourceTable$CDM_SOURCE_ABBREVIATION
databaseName = cdmSourceTable$CDM_SOURCE_NAME
databaseDescription = cdmSourceTable$SOURCE_DESCRIPTION
cdmDatabaseSchema = connectionSpecifications$cdmDatabaseSchema
vocabularyDatabaseSchema = connectionSpecifications$vocabDatabaseSchema
cohortDatabaseSchema = connectionSpecifications$cohortDatabaseSchema
tempEmulationSchema = getOption("sqlRenderTempEmulationSchema")
# Cohort Definitions ----
remotes::install_github('OHDSI/SkeletonCohortDiagnosticsStudy', ref = "develop")
studyName <- 'epi999'
## get cohort definition set ----
cohortDefinitionSet <-
CohortGenerator::getCohortDefinitionSet(
settingsFileName = "settings/CohortsToCreate.csv",
jsonFolder = "cohorts",
sqlFolder = "sql/sql_server",
packageName = "SkeletonCohortDiagnosticsStudy",
cohortFileNameValue = "cohortId"
) %>% dplyr::tibble()
cohortTableNames = CohortGenerator::getCohortTableNames(cohortTable =
paste0("s", studyName, "_", connectionSpecifications$sourceId))
# output folder information ----
outputFolder <-
file.path("D:", "temp", "outputFolder", studyName, connectionSpecifications$database)
## optionally delete previous execution ----
unlink(x = outputFolder,
recursive = TRUE,
force = TRUE)
dir.create(path = outputFolder,
showWarnings = FALSE,
recursive = TRUE)
# Execution ----
## Create cohort tables on remote ----
CohortGenerator::createCohortTables(connectionDetails = connectionDetails,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTableNames = cohortTableNames,
incremental = TRUE)
## Generate cohort on remote ----
CohortGenerator::generateCohortSet(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
tempEmulationSchema = tempEmulationSchema,
cohortTableNames = cohortTableNames,
cohortDefinitionSet = cohortDefinitionSet,
cohortDatabaseSchema = cohortDatabaseSchema,
incremental = TRUE,
incrementalFolder = file.path(outputFolder, "incremental"))
## Execute Cohort Diagnostics on remote ----
CohortDiagnostics::executeDiagnostics(
cohortDefinitionSet = cohortDefinitionSet,
exportFolder = outputFolder,
databaseId = databaseId,
databaseName = databaseName,
databaseDescription = databaseDescription,
cohortDatabaseSchema = cohortDatabaseSchema,
cdmDatabaseSchema = cdmDatabaseSchema,
tempEmulationSchema = tempEmulationSchema,
connectionDetails = connectionDetails,
cohortTableNames = cohortTableNames,
vocabularyDatabaseSchema = vocabularyDatabaseSchema,
incremental = TRUE
)
# example of how to run full time series diagnostics outside executeDiagnostics
data <-
CohortDiagnostics::runCohortTimeSeriesDiagnostics(
connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTableNames$cohortTable,
runCohortTimeSeries = TRUE,
runDataSourceTimeSeries = TRUE
)
# to export data into csv in Cohort Diagnostics compatible form
data <- CohortDiagnostics:::makeDataExportable(x = data,
tableName = "time_series",
databaseId = databaseId)
CohortDiagnostics:::writeToCsv(
data = data,
fileName = file.path(outputFolder, "time_series.csv"),
incremental = FALSE,
cohortId = data$cohortId %>% unique()
)
# package results ----
CohortDiagnostics::createMergedResultsFile(dataFolder = outputFolder, overwrite = TRUE)
# Launch diagnostics explorer shiny app ----
CohortDiagnostics::launchDiagnosticsExplorer()
# upload to postgres db ----
# connectionDetailsToUpload <- createConnectionDetails(dbms = "postgresql",
# server = paste(Sys.getenv("shinydbServer"),
# Sys.getenv("shinydbDatabase"),
# sep = "/"),
# port = Sys.getenv("shinydbPort"),
# user = Sys.getenv("shinyDbUser"),
# password = Sys.getenv("shinydbPw"))
# resultsSchema <- "eunomiaCd"
# CohortDiagnostics::createResultsDataModel(connectionDetails = connectionDetailsToUpload,
# schema = resultsSchema)
# zipFilesToUpload <- list.files(path = outputFolder,
# pattern = ".zip",
# recursive = TRUE,
# full.names = TRUE)
#
# for (i in (1:length(zipFilesToUpload))) {
# CohortDiagnostics::uploadResults(connectionDetails = connectionDetailsToUpload,
# schema = resultsSchema,
# zipFileName = zipFilesToUpload[[i]])
# }
#
# CohortDiagnostics::launchDiagnosticsExplorer(connectionDetails = connectionDetailsToUpload,
# resultsDatabaseSchema = resultsSchema)
|
dqe_verteilungsmodelle_verteilungen_box <- function(id) {
ns <- NS(id)
tagList(
module_verteilungen_input_header(
id = ns("id_module_verteilungen_input")
),
module_verteilungen_input_tables(
id = ns("id_module_verteilungen_input")
)
)
}
dqe_verteilungsmodelle_verteilungen <- function(
input, output, session, .data, .values, parent, ...
) {
self <- node$new("verteilungen", parent, session)
ns <- session$ns
call_module_verteilungen_input <- callModule(
module = module_verteilungen_input,
id = "id_module_verteilungen_input",
.data = .data,
.values = .values,
parent = self,
.mode = "ideal"
)
}
| /www/modules/dqe_verteilungsmodelle_verteilungen.R | no_license | DavidBarke/shinyQW | R | false | false | 669 | r | dqe_verteilungsmodelle_verteilungen_box <- function(id) {
ns <- NS(id)
tagList(
module_verteilungen_input_header(
id = ns("id_module_verteilungen_input")
),
module_verteilungen_input_tables(
id = ns("id_module_verteilungen_input")
)
)
}
dqe_verteilungsmodelle_verteilungen <- function(
input, output, session, .data, .values, parent, ...
) {
self <- node$new("verteilungen", parent, session)
ns <- session$ns
call_module_verteilungen_input <- callModule(
module = module_verteilungen_input,
id = "id_module_verteilungen_input",
.data = .data,
.values = .values,
parent = self,
.mode = "ideal"
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nf_natsalt_names.R
\name{nf_file_names}
\alias{nf_file_names}
\alias{natsalt_file_names}
\alias{nf_gage_names}
\alias{nf_gage_abbrv}
\title{Get vector of natural flow and salt gage names, filenames, and abbreviations}
\usage{
nf_file_names(version = 5)
natsalt_file_names(version = 5)
nf_gage_names()
nf_gage_abbrv()
}
\arguments{
\item{version}{The CRSS version number. Current version of CRSS is 5. Valid
versions are 1-5.}
}
\value{
Vector of characters with 29 entries (file names, gage names, gage
abbreviations).
}
\description{
These functions return the natural flow and salt file names
(\code{nf_file_names()} and \code{natsalt_file_names()}), the natural flow gage names
(\code{nf_gage_names()}), and the abbreviations (\code{nf_gage_abbrv()}) of the natural
flow gage names in the standard node order used by Reclamation and expected
by CRSS.
}
\details{
\code{nf_file_names()} and \code{natsalt_file_names()} return file names that
CRSS is expecting to read in for natural flow and salt input data.
\code{nf_gage_names()} returns a vector with the USGS gage names used by
CRSS corresponding to the natural flow basins. The order matches the node
order used by Reclamation and CRSS and thus should not be modified.
\code{nf_gage_abbrv()} returns an abbreviated shorthand name for the natural flow
gages/nodes, because it is it is desirable to have shorthand names for many
purposes, i.e., variable names.
}
\examples{
fileNames <- nf_file_names()
\dontrun{
iFiles <- 'NaturalFlows1906-2012_withExtensions_1.8.15.xlsx'
crssi_create_dnf_files(iFile,'NFSinput/','2015-1-31',50,fileNames)
}
# get the gage name for node 20
nf_gage_names()[20]
# and its shorthand name
nf_gage_abbrv()[20]
# and then see the CRSS natural inflow file name corresponding to this gage
nf_file_names()[20]
}
\seealso{
\code{\link{crssi_create_dnf_files}}
}
| /man/nf_natsalt_names.Rd | no_license | rabutler-usbr/CRSSIO | R | false | true | 1,929 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nf_natsalt_names.R
\name{nf_file_names}
\alias{nf_file_names}
\alias{natsalt_file_names}
\alias{nf_gage_names}
\alias{nf_gage_abbrv}
\title{Get vector of natural flow and salt gage names, filenames, and abbreviations}
\usage{
nf_file_names(version = 5)
natsalt_file_names(version = 5)
nf_gage_names()
nf_gage_abbrv()
}
\arguments{
\item{version}{The CRSS version number. Current version of CRSS is 5. Valid
versions are 1-5.}
}
\value{
Vector of characters with 29 entries (file names, gage names, gage
abbreviations).
}
\description{
These functions return the natural flow and salt file names
(\code{nf_file_names()} and \code{natsalt_file_names()}), the natural flow gage names
(\code{nf_gage_names()}), and the abbreviations (\code{nf_gage_abbrv()}) of the natural
flow gage names in the standard node order used by Reclamation and expected
by CRSS.
}
\details{
\code{nf_file_names()} and \code{natsalt_file_names()} return file names that
CRSS is expecting to read in for natural flow and salt input data.
\code{nf_gage_names()} returns a vector with the USGS gage names used by
CRSS corresponding to the natural flow basins. The order matches the node
order used by Reclamation and CRSS and thus should not be modified.
\code{nf_gage_abbrv()} returns an abbreviated shorthand name for the natural flow
gages/nodes, because it is it is desirable to have shorthand names for many
purposes, i.e., variable names.
}
\examples{
fileNames <- nf_file_names()
\dontrun{
iFiles <- 'NaturalFlows1906-2012_withExtensions_1.8.15.xlsx'
crssi_create_dnf_files(iFile,'NFSinput/','2015-1-31',50,fileNames)
}
# get the gage name for node 20
nf_gage_names()[20]
# and its shorthand name
nf_gage_abbrv()[20]
# and then see the CRSS natural inflow file name corresponding to this gage
nf_file_names()[20]
}
\seealso{
\code{\link{crssi_create_dnf_files}}
}
|
\alias{gdkWindowGeometryChanged}
\name{gdkWindowGeometryChanged}
\title{gdkWindowGeometryChanged}
\description{This function informs GDK that the geometry of an embedded
offscreen window has changed. This is necessary for GDK to keep
track of which offscreen window the pointer is in.}
\usage{gdkWindowGeometryChanged(object)}
\arguments{\item{\verb{object}}{an embedded offscreen \code{\link{GdkWindow}}}}
\details{Since 2.18}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /RGtk2/man/gdkWindowGeometryChanged.Rd | no_license | lawremi/RGtk2 | R | false | false | 499 | rd | \alias{gdkWindowGeometryChanged}
\name{gdkWindowGeometryChanged}
\title{gdkWindowGeometryChanged}
\description{This function informs GDK that the geometry of an embedded
offscreen window has changed. This is necessary for GDK to keep
track of which offscreen window the pointer is in.}
\usage{gdkWindowGeometryChanged(object)}
\arguments{\item{\verb{object}}{an embedded offscreen \code{\link{GdkWindow}}}}
\details{Since 2.18}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
#' ---
#' author: "Zhenyok Nazedwox"
#' ---
html = readLines("../data/forbes.htm")
length(html)
sum(nchar(html))
regex = "<td class=\"worth\">(.*)</td>"
worth = grep(regex, html, value=TRUE)
regex = "\\W{1}\\d+\\W{1}\\d*\\s?\\w+"
worth = regmatches(worth, regexpr(regex, worth))
worth
regex = "<h3>\\w+"
rich = grep(regex, html, value=TRUE, perl = TRUE)
rich = rich[3:length(rich)]
regex = "(\\w*.?\\s+\\w*&?\\s?\\w+|-\\w+)+[^<]?"
rich = regmatches(rich, regexpr(regex, rich, perl = TRUE))
rich
worths = as.double(gsub(",", ".", regmatches(worth, regexpr("\\d+(\\.|,)?\\d*", worth))))
worths
df = data.frame(rich, worths)
names(df) = c("Name", "Worth")
df
nrow(df)
richest = df[df$Worth == max(df$Worth),]
richest
nrow(richest)
df[df$Name == "Larry Ellison",]
df[duplicated(df$Worth),][1:10,]
worthsB = worths * 1000000000
worthsB
median(worthsB)
mean(worthsB)
sum(worthsB > 5000000000)
sum(worthsB > 10000000000)
sum(worthsB > 25000000000)
sum(worthsB)
sum(worthsB[1:5]) / sum(worthsB)
sum(worthsB[1:20]) / sum(worthsB)
householdNetWorth = 90196 * 1000000000
sum(worthsB) / householdNetWorth
| /lab3/lab3.R | permissive | Nazedwox/da2016 | R | false | false | 1,098 | r | #' ---
#' author: "Zhenyok Nazedwox"
#' ---
html = readLines("../data/forbes.htm")
length(html)
sum(nchar(html))
regex = "<td class=\"worth\">(.*)</td>"
worth = grep(regex, html, value=TRUE)
regex = "\\W{1}\\d+\\W{1}\\d*\\s?\\w+"
worth = regmatches(worth, regexpr(regex, worth))
worth
regex = "<h3>\\w+"
rich = grep(regex, html, value=TRUE, perl = TRUE)
rich = rich[3:length(rich)]
regex = "(\\w*.?\\s+\\w*&?\\s?\\w+|-\\w+)+[^<]?"
rich = regmatches(rich, regexpr(regex, rich, perl = TRUE))
rich
worths = as.double(gsub(",", ".", regmatches(worth, regexpr("\\d+(\\.|,)?\\d*", worth))))
worths
df = data.frame(rich, worths)
names(df) = c("Name", "Worth")
df
nrow(df)
richest = df[df$Worth == max(df$Worth),]
richest
nrow(richest)
df[df$Name == "Larry Ellison",]
df[duplicated(df$Worth),][1:10,]
worthsB = worths * 1000000000
worthsB
median(worthsB)
mean(worthsB)
sum(worthsB > 5000000000)
sum(worthsB > 10000000000)
sum(worthsB > 25000000000)
sum(worthsB)
sum(worthsB[1:5]) / sum(worthsB)
sum(worthsB[1:20]) / sum(worthsB)
householdNetWorth = 90196 * 1000000000
sum(worthsB) / householdNetWorth
|
#Polynomial Regression
#Importing data set
#http://archive.ics.uci.edu/ml/machine-learning-databases/abalone/
ds=read.csv('C:/Users/91944/Documents/R/praxis/Machine Learning/abalone.csv')
head(ds) #Top 6 observations
dim(ds) #Dimensions of dataset
sapply(ds,class) #Check Data type
summary(ds) #Calculate summary of attributes
colSums(is.na(ds)) #Checking the number of missing values in columns
nums <- unlist(lapply(ds, is.numeric)) #Storing numerical seperately
correlation=cor(ds[,nums],method="pearson") #Correlation values
print(correlation)
library(corrplot)
# create correlation plot
corrplot(correlation, method="circle")
#Scatter plot mattrix
pairs(ds[,nums])
#Ignore Height as low correlation
#Whole-Weight = Shucked weight+ Viscera weight + Shell weight #Almost equal and corelations are same
fit=lm(ds$Rings~Whole.weight,data=ds) #Residual standard error: 2.713 #Adjusted R-squared: 0.2919
summary(fit)
fit2=lm(ds$Rings~Length,data=ds) #Residual standard error: 2.679 #Adjusted R-squared: 0.3098
summary(fit2)
fit3=lm(ds$Rings~Diameter,data=ds) #Residual standard error: 2.639 #Adjusted R-squared: 0.3301
summary(fit3)
#Less R.Std.error and good Adjusted R-square so considering Diameter
rand = sample(1:nrow(ds),3600) #nrow(...) means length,, 1 to that length take 20 random #it takes index
train = ds[rand, ] # 88% in train
test = ds[-rand, ] # other than that remaining in test
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 1
#=============================================================================================
#Sample size 78
n=78
rand1 = sample(1:nrow(train),n)
trainrand1=train[rand1, ]
m1 <- lm(Rings ~ Diameter, trainrand1)
m1
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 2
#=============================================================================================
m2 <- lm(Rings ~ Diameter+I(Diameter^2), trainrand1)
m2
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 3
#=============================================================================================
m3 <- lm(Rings ~ Diameter+I(Diameter^2)+I(Diameter^3), trainrand1)
m3
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 4
#=============================================================================================
m4 <- lm(Rings ~ Diameter+I(Diameter^2)+I(Diameter^3)+I(Diameter^4), trainrand1)
m4
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 5
#=============================================================================================
m5 <- lm(Rings ~ Diameter+I(Diameter^2)+I(Diameter^3)+I(Diameter^4)+I(Diameter^5), trainrand1)
m5
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 6
#=============================================================================================
m6 <- lm(Rings ~ Diameter+I(Diameter^2)+I(Diameter^3)+I(Diameter^4)+I(Diameter^5)+I(Diameter^6), trainrand1)
m6
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 7
#=============================================================================================
m7 <- lm(Rings ~ Diameter+I(Diameter^2)+I(Diameter^3)+I(Diameter^4)+I(Diameter^5)+I(Diameter^6)+I(Diameter^7), trainrand1)
m7
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 8
#=============================================================================================
m8 <- lm(Rings ~ Diameter+I(Diameter^2)+I(Diameter^3)+I(Diameter^4)+I(Diameter^5)+I(Diameter^6)+I(Diameter^7)+I(Diameter^8), trainrand1)
m8
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 9
#=============================================================================================
m9 <- lm(Rings ~ Diameter+I(Diameter^2)+I(Diameter^3)+I(Diameter^4)+I(Diameter^5)+I(Diameter^6)+I(Diameter^7)+I(Diameter^8)+I(Diameter^9), trainrand1)
m9
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 10
#=============================================================================================
m10 <- loess.smooth(Rings ~ Diameter+I(Diameter^2)+I(Diameter^3)+I(Diameter^4)+I(Diameter^5)+I(Diameter^6)+I(Diameter^7)+I(Diameter^8)+I(Diameter^9)+I(Diameter^10), trainrand1)
m10
#PLOTTING THE MODEL OVER THE DATA
#PLOTTING THE MODEL OVER THE DATA
plot(trainrand1$Diameter,trainrand1$Rings, pch=19, cex=0.1) #pch= symbols, cex=symbol size
lines(sort(trainrand1$Diameter), fitted(m1)[order(trainrand1$Diameter)], col='red', type='l',lwd=2)
lines(sort(trainrand1$Diameter), fitted(m2)[order(trainrand1$Diameter)], col='blue', type='l',lwd=2)
lines(sort(trainrand1$Diameter), fitted(m3)[order(trainrand1$Diameter)], col='green', type='l',lwd=2)
lines(sort(trainrand1$Diameter), fitted(m4)[order(trainrand1$Diameter)], col='yellow', type='l',lwd=2)
lines(sort(trainrand1$Diameter), fitted(m5)[order(trainrand1$Diameter)], col='pink', type='l',lwd=2)
lines(sort(trainrand1$Diameter), fitted(m6)[order(trainrand1$Diameter)], col='violet', type='l',lwd=2)
lines(sort(trainrand1$Diameter), fitted(m7)[order(trainrand1$Diameter)], col='orange', type='l',lwd=2)
lines(sort(trainrand1$Diameter), fitted(m8)[order(trainrand1$Diameter)], col='magenta', type='l',lwd=2)
lines(sort(trainrand1$Diameter), fitted(m9)[order(trainrand1$Diameter)], col='purple', type='l',lwd=2)
lines(sort(trainrand1$Diameter), fitted(m10)[order(trainrand1$Diameter)], col='grey', type='l',lwd=2)
legend("topleft", legend=c("Degree 1", "Degree 2","Degree 3","Degree 4","Degree 5","Degree 6","Degree 7","Degree 8","Degree 9","Degree 10"),
col=c("red", "blue","green","yellow","pink","violet","orange","magenta","purple","grey"), lty=1:2, cex=0.41,box.lty=2)
| /poly2.R | no_license | Saheer7/polynomial-regression | R | false | false | 6,593 | r | #Polynomial Regression
#Importing data set
#http://archive.ics.uci.edu/ml/machine-learning-databases/abalone/
ds=read.csv('C:/Users/91944/Documents/R/praxis/Machine Learning/abalone.csv')
head(ds) #Top 6 observations
dim(ds) #Dimensions of dataset
sapply(ds,class) #Check Data type
summary(ds) #Calculate summary of attributes
colSums(is.na(ds)) #Checking the number of missing values in columns
nums <- unlist(lapply(ds, is.numeric)) #Storing numerical seperately
correlation=cor(ds[,nums],method="pearson") #Correlation values
print(correlation)
library(corrplot)
# create correlation plot
corrplot(correlation, method="circle")
#Scatter plot mattrix
pairs(ds[,nums])
#Ignore Height as low correlation
#Whole-Weight = Shucked weight+ Viscera weight + Shell weight #Almost equal and corelations are same
fit=lm(ds$Rings~Whole.weight,data=ds) #Residual standard error: 2.713 #Adjusted R-squared: 0.2919
summary(fit)
fit2=lm(ds$Rings~Length,data=ds) #Residual standard error: 2.679 #Adjusted R-squared: 0.3098
summary(fit2)
fit3=lm(ds$Rings~Diameter,data=ds) #Residual standard error: 2.639 #Adjusted R-squared: 0.3301
summary(fit3)
#Less R.Std.error and good Adjusted R-square so considering Diameter
rand = sample(1:nrow(ds),3600) #nrow(...) means length,, 1 to that length take 20 random #it takes index
train = ds[rand, ] # 88% in train
test = ds[-rand, ] # other than that remaining in test
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 1
#=============================================================================================
#Sample size 78
n=78
rand1 = sample(1:nrow(train),n)
trainrand1=train[rand1, ]
m1 <- lm(Rings ~ Diameter, trainrand1)
m1
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 2
#=============================================================================================
m2 <- lm(Rings ~ Diameter+I(Diameter^2), trainrand1)
m2
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 3
#=============================================================================================
m3 <- lm(Rings ~ Diameter+I(Diameter^2)+I(Diameter^3), trainrand1)
m3
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 4
#=============================================================================================
m4 <- lm(Rings ~ Diameter+I(Diameter^2)+I(Diameter^3)+I(Diameter^4), trainrand1)
m4
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 5
#=============================================================================================
m5 <- lm(Rings ~ Diameter+I(Diameter^2)+I(Diameter^3)+I(Diameter^4)+I(Diameter^5), trainrand1)
m5
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 6
#=============================================================================================
m6 <- lm(Rings ~ Diameter+I(Diameter^2)+I(Diameter^3)+I(Diameter^4)+I(Diameter^5)+I(Diameter^6), trainrand1)
m6
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 7
#=============================================================================================
m7 <- lm(Rings ~ Diameter+I(Diameter^2)+I(Diameter^3)+I(Diameter^4)+I(Diameter^5)+I(Diameter^6)+I(Diameter^7), trainrand1)
m7
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 8
#=============================================================================================
m8 <- lm(Rings ~ Diameter+I(Diameter^2)+I(Diameter^3)+I(Diameter^4)+I(Diameter^5)+I(Diameter^6)+I(Diameter^7)+I(Diameter^8), trainrand1)
m8
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 9
#=============================================================================================
m9 <- lm(Rings ~ Diameter+I(Diameter^2)+I(Diameter^3)+I(Diameter^4)+I(Diameter^5)+I(Diameter^6)+I(Diameter^7)+I(Diameter^8)+I(Diameter^9), trainrand1)
m9
#=============================================================================================
# FITTING A POLYNOMIAL REGRESSION OF ORDER 10
#=============================================================================================
m10 <- loess.smooth(Rings ~ Diameter+I(Diameter^2)+I(Diameter^3)+I(Diameter^4)+I(Diameter^5)+I(Diameter^6)+I(Diameter^7)+I(Diameter^8)+I(Diameter^9)+I(Diameter^10), trainrand1)
m10
#PLOTTING THE MODEL OVER THE DATA
#PLOTTING THE MODEL OVER THE DATA
plot(trainrand1$Diameter,trainrand1$Rings, pch=19, cex=0.1) #pch= symbols, cex=symbol size
lines(sort(trainrand1$Diameter), fitted(m1)[order(trainrand1$Diameter)], col='red', type='l',lwd=2)
lines(sort(trainrand1$Diameter), fitted(m2)[order(trainrand1$Diameter)], col='blue', type='l',lwd=2)
lines(sort(trainrand1$Diameter), fitted(m3)[order(trainrand1$Diameter)], col='green', type='l',lwd=2)
lines(sort(trainrand1$Diameter), fitted(m4)[order(trainrand1$Diameter)], col='yellow', type='l',lwd=2)
lines(sort(trainrand1$Diameter), fitted(m5)[order(trainrand1$Diameter)], col='pink', type='l',lwd=2)
lines(sort(trainrand1$Diameter), fitted(m6)[order(trainrand1$Diameter)], col='violet', type='l',lwd=2)
lines(sort(trainrand1$Diameter), fitted(m7)[order(trainrand1$Diameter)], col='orange', type='l',lwd=2)
lines(sort(trainrand1$Diameter), fitted(m8)[order(trainrand1$Diameter)], col='magenta', type='l',lwd=2)
lines(sort(trainrand1$Diameter), fitted(m9)[order(trainrand1$Diameter)], col='purple', type='l',lwd=2)
lines(sort(trainrand1$Diameter), fitted(m10)[order(trainrand1$Diameter)], col='grey', type='l',lwd=2)
legend("topleft", legend=c("Degree 1", "Degree 2","Degree 3","Degree 4","Degree 5","Degree 6","Degree 7","Degree 8","Degree 9","Degree 10"),
col=c("red", "blue","green","yellow","pink","violet","orange","magenta","purple","grey"), lty=1:2, cex=0.41,box.lty=2)
|
# deut_analysis.R
rm(list=ls())
require(warningsignals)
###############
require(socialR)
script <- "deut2_analysis.R"
gitaddr <- gitcommit(script)
tags="warningsignals, stochpop"
###############
cpu <- 1
nboot <- 500
freq <- c(25, 50, 200)
source("analysis.R")
data(deuterium)
i <- 2 ## Which deut?
m <- fit_models(deuterium[[i]], "LSN")
sampling <- sampling_freq(m$const, m$timedep, cpu=cpu, nboot=nboot,
sample_effort=freq)
taus <- reformat_tau_dists(bootstrap_tau(m$X, m$const, m$timedep,
cpu=cpu, nboot=nboot))
mc <- remove_unconverged(montecarlotest(m$const, m$timedep,
cpu=cpu, nboot=nboot))
indicator_sampling <- indicator_sampling_freq(m, cpu, nboot,
sample_effort=freq)
save(list=ls(), file="deut2.Rdat")
| /demo/manuscript/deut2_analysis.R | no_license | cboettig/warningsignals | R | false | false | 878 | r | # deut_analysis.R
rm(list=ls())
require(warningsignals)
###############
require(socialR)
script <- "deut2_analysis.R"
gitaddr <- gitcommit(script)
tags="warningsignals, stochpop"
###############
cpu <- 1
nboot <- 500
freq <- c(25, 50, 200)
source("analysis.R")
data(deuterium)
i <- 2 ## Which deut?
m <- fit_models(deuterium[[i]], "LSN")
sampling <- sampling_freq(m$const, m$timedep, cpu=cpu, nboot=nboot,
sample_effort=freq)
taus <- reformat_tau_dists(bootstrap_tau(m$X, m$const, m$timedep,
cpu=cpu, nboot=nboot))
mc <- remove_unconverged(montecarlotest(m$const, m$timedep,
cpu=cpu, nboot=nboot))
indicator_sampling <- indicator_sampling_freq(m, cpu, nboot,
sample_effort=freq)
save(list=ls(), file="deut2.Rdat")
|
channel_groupings <- function(df) {
data <- df %>%
dplyr::group_by(channel_grouping) %>%
dplyr::summarise(n = dplyr::n()) %>%
dplyr::ungroup() %>%
dplyr::mutate(all = round(n / sum(n) * 100, 2))
fig <- data %>%
plot_ly(labels = ~channel_grouping, values = ~all, type = "pie")
fig <- fig %>% layout(
title = "",
xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE)
)
return(fig)
}
| /R/channel_groupings.R | no_license | muzairaslam/GoogleAnalyticsDashboard | R | false | false | 518 | r | channel_groupings <- function(df) {
data <- df %>%
dplyr::group_by(channel_grouping) %>%
dplyr::summarise(n = dplyr::n()) %>%
dplyr::ungroup() %>%
dplyr::mutate(all = round(n / sum(n) * 100, 2))
fig <- data %>%
plot_ly(labels = ~channel_grouping, values = ~all, type = "pie")
fig <- fig %>% layout(
title = "",
xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE)
)
return(fig)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cumnorm.R
\name{slope.cumnorm}
\alias{slope.cumnorm}
\title{slope.cumnorm}
\usage{
slope.cumnorm(x, m = -2, std = 1)
}
\arguments{
\item{x}{PLC value}
\item{m}{mean (P50)}
\item{std}{standard deviation}
}
\description{
Returns slope of cumnorm
}
\author{
Félicien Meunier
}
| /man/slope.cumnorm.Rd | no_license | femeunier/LianaHydro | R | false | true | 355 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cumnorm.R
\name{slope.cumnorm}
\alias{slope.cumnorm}
\title{slope.cumnorm}
\usage{
slope.cumnorm(x, m = -2, std = 1)
}
\arguments{
\item{x}{PLC value}
\item{m}{mean (P50)}
\item{std}{standard deviation}
}
\description{
Returns slope of cumnorm
}
\author{
Félicien Meunier
}
|
#library script
library(ape)
library(tidyverse)
library(geiger)
library(ggplot2)
library(dplyr)
library(phytools)
library(here)
library(ggpubr)
library(viridis)
library(mapdata)
library(maps)
library(maptools)
library(grDevices)
library(rethinking)
library(bayestraitr)
library(ggtree)
library(pheatmap)
library(RColorBrewer)
library(patchwork)
library(ggrepel)
source('world_tree_transformation.R')
#script written by Simon Greenhill and Olena Shcherbakova
# helper function to convert and load trait data into a named vector for
# plotting.
get_trait_vector <- function(tree, data, variable) {
x <- data[tree$tip.label, variable]
x[is.na(x)] <- 0 # set NA's to zero to enable plotting.
names(x) <- tree$tip.label
x
}
Nominal_words_complexity <- list(
#cases in nouns and pronouns:
c("GB070", "GB072", "GB071", "GB073"),
#number in nouns
c("singular", "dual", "plural", "trial", "paucal", "GB031", "GB185", "GB186"),
#gender in nouns
c("gender_nouns", "gender_pronouns", "GB170", "GB198", "GB172", "GB171", "GB057"),
#possession marked on nouns:
c("possession_on_possessor", "possession_on_possessed"),
#articles
c("GB020", "GB021")
)
Verbal_complexity <- list(
#marking of arguments:
c("S_arg", "A_arg", "P_arg"),
#transitivity-related features:
c("GB113", "GB124", "GB149", "GB155", "passive", "antipassive"),
#negation:
c("negation"),
#tenses:
c("tense"),
#aspect:
c("aspect"),
#mood:
c("mood"),
#markers_arguments_non_core:
c("GB103", "GB104", "GB108", "reflexivity", "reciprocity"),
#syntactic features:
c("GB151", "GB152")
)
# A function to create a metric from a dataset and a given set of recodings.
create_metric <- function(df, recodings) {
# apply rowMeans to all the sets of variables in the recodings list
scores <- sapply(
recodings,
function(df, varlist) { rowMeans(df[varlist], na.rm = TRUE) },
df=df, simplify=TRUE
)
rowMeans(scores, na.rm = TRUE)
}
# Function to load trees from DPLACE-data repository
# can be used either with or without renameto = 'glottocode'
load_trees <- function(dirname, type='posterior', mappingfile='taxa.csv', renameto=NA) {
# check file type
if (type == 'summary') {
treefile <- file.path(dirname, 'summary.trees')
}
else if (type == 'posterior') {
treefile <- file.path(dirname, 'posterior.trees')
} else {
stop(paste("Unknown Tree Type:", type))
}
# check file exists
if (file.exists(treefile) == FALSE) {
stop(paste("Invalid file:", treefile))
}
trees <- ape::read.nexus(treefile)
if (class(trees) == 'phylo') { trees <- c(trees) ; class(trees) <- 'multiPhylo' }
# make full path if just given taxa.csv
if (mappingfile == 'taxa.csv') { mappingfile <- file.path(dirname, mappingfile) }
if (file.exists(mappingfile) & is.na(renameto) == FALSE) {
mapping <- read.csv(mappingfile, header = TRUE, stringsAsFactors = FALSE, na.string="")
# check the required columns exist
if ('taxon' %in% colnames(mapping) == FALSE) stop(paste('column `taxon` not in', mappingfile))
if (renameto %in% colnames(mapping) == FALSE) stop(paste('colname', renameto, 'not in', mappingfile))
trees <- ape::.uncompressTipLabel(trees)
for (i in 1:length(trees)){
# remove tips not in `taxon` mapping
missing <- trees[[i]]$tip.label[trees[[i]]$tip.label %in% mapping[['taxon']] == FALSE]
if (length(missing) > 0) {
trees[[i]] <- ape::drop.tip(trees[[i]], missing)
}
# remove tips not in `renameto` mapping
missing <- mapping[is.na(mapping[[renameto]]), 'taxon']
if (length(missing) > 0) {
trees[[i]] <- ape::drop.tip(trees[[i]], missing)
}
# handle duplicate rename tips
dupes <- mapping[duplicated(mapping[[renameto]], incomparables=NA), ]
if (nrow(dupes)) {
warning(paste("Removing ", nrow(dupes), "tips that will be duplicated after rename:", paste(dupes[['taxon']], collapse=", ")))
trees[[i]] <- ape::drop.tip(trees[[i]], dupes[['taxon']])
}
# rename tips
matches <- match(trees[[i]]$tip.label, mapping[['taxon']])
trees[[i]]$tip.label <- mapping[matches, renameto]
}
trees <- ape::.compressTipLabel(trees, ref=mapping[matches, renameto])
}
trees
}
load_data <- function(filename="data/GB.tsv") {
grambank <- read.csv(filename, header = TRUE, sep = '\t', stringsAsFactors=FALSE)
colnames(grambank)[colnames(grambank)=="Language_ID"] <- "Glottocode" # rename column
#removing languages that have NAs for Grambank features included in the metric
grambank <- subset(x=grambank, select=c("Glottocode",
"GB070", "GB072", "GB071", "GB073",
"GB042", "GB316",
"GB043", "GB317",
"GB044", "GB318",
"GB165", "GB319",
"GB166", "GB320",
"GB057",
"GB184", "GB031", "GB185", "GB186",
"GB051", "GB052", "GB053", "GB054", "GB192",
"GB170", "GB196", "GB197", "GB030",
"GB198", "GB172", "GB171",
"GB430", "GB432", "GB431", "GB433",
"GB020", "GB021",
"GB089", "GB090", "GB091", "GB092", "GB093", "GB094",
"GB113", "GB124", "GB149", "GB155", "GB147", "GB302", "GB148", "GB303",
"GB107", "GB298", "GB299",
"GB082", "GB083", "GB084", "GB121", "GB521",
"GB086", "GB120", "GB520",
"GB312", "GB119", "GB519",
"GB103", "GB104", "GB108", "GB114", "GB305", "GB115", "GB306",
"GB151", "GB152"))
grambank <- na.omit(grambank)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB042[i]), as.numeric(grambank$GB316[i])), na.rm = T)
if(summ > 0 ){grambank$singular[i] <- 1}
else(grambank$singular[i] <- 0)
}
grambank$singular <- as.numeric(grambank$singular)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB043[i]), as.numeric(grambank$GB317[i])), na.rm = T)
if(summ > 0 ){grambank$dual[i] <- 1}
else(grambank$dual[i] <- 0)
}
grambank$dual <- as.numeric(grambank$dual)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB044[i]), as.numeric(grambank$GB318[i])), na.rm = T)
if(summ > 0 ){grambank$plural[i] <- 1}
else(grambank$plural[i] <- 0)
}
grambank$plural <- as.numeric(grambank$plural)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB165[i]), as.numeric(grambank$GB319[i])), na.rm = T)
if(summ > 0 ){grambank$trial[i] <- 1}
else(grambank$trial[i] <- 0)
}
grambank$trial <- as.numeric(grambank$trial)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB166[i]), as.numeric(grambank$GB320[i])), na.rm = T)
if(summ > 0 ){grambank$paucal[i] <- 1}
else(grambank$paucal[i] <- 0)
}
grambank$paucal <- as.numeric(grambank$paucal)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB431[i]), as.numeric(grambank$GB433[i])), na.rm = T)
if(summ > 0 ){grambank$possession_on_possessed[i] <- 1}
else(grambank$possession_on_possessed[i] <- 0)
}
grambank$possession_on_possessed <- as.numeric(grambank$possession_on_possessed)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB430[i]), as.numeric(grambank$GB432[i])), na.rm = T)
if(summ > 0 ){grambank$possession_on_possessor[i] <- 1}
else(grambank$possession_on_possessor[i] <- 0)
}
grambank$possession_on_possessor <- as.numeric(grambank$possession_on_possessor)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB051[i]), as.numeric(grambank$GB052[i]), as.numeric(grambank$GB053[i]), as.numeric(grambank$GB054[i]), as.numeric(grambank$GB192[i])), na.rm = T)
if(summ > 0 ){grambank$gender_nouns[i] <- 1}
else(grambank$gender_nouns[i] <- 0)
}
grambank$gender_nouns <- as.numeric(grambank$gender_nouns)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB196[i]), as.numeric(grambank$GB197[i]), as.numeric(grambank$GB030[i])), na.rm = T)
if(summ > 0 ){grambank$gender_pronouns[i] <- 1}
else(grambank$gender_pronouns[i] <- 0)
}
grambank$gender_pronouns <- as.numeric(grambank$gender_pronouns)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB089[i]), as.numeric(grambank$GB090[i])), na.rm = T)
if(summ > 0 ){grambank$S_arg[i] <- 1}
else(grambank$S_arg[i] <- 0)
}
grambank$S_arg <- as.numeric(grambank$S_arg)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB091[i]), as.numeric(grambank$GB092[i])), na.rm = T)
if(summ > 0 ){grambank$A_arg[i] <- 1}
else(grambank$A_arg[i] <- 0)
}
grambank$A_arg <- as.numeric(grambank$A_arg)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB093[i]), as.numeric(grambank$GB094[i])), na.rm = T)
if(summ > 0 ){grambank$P_arg[i] <- 1}
else(grambank$P_arg[i] <- 0)
}
grambank$P_arg <- as.numeric(grambank$P_arg)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB147[i]), as.numeric(grambank$GB302[i])), na.rm = T)
if(summ > 0 ){grambank$passive[i] <- 1}
else(grambank$passive[i] <- 0)
}
grambank$passive <- as.numeric(grambank$passive)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB148[i]), as.numeric(grambank$GB303[i])), na.rm = T)
if(summ > 0 ){grambank$antipassive[i] <- 1}
else(grambank$antipassive[i] <- 0)
}
grambank$antipassive <- as.numeric(grambank$antipassive)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB107[i]), as.numeric(grambank$GB298[i]), as.numeric(grambank$GB299[i])), na.rm = T)
if(summ > 0 ){grambank$negation[i] <- 1}
else(grambank$negation[i] <- 0)
}
grambank$negation <- as.numeric(grambank$negation)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB082[i]), as.numeric(grambank$GB083[i]), as.numeric(grambank$GB084[i]), as.numeric(grambank$GB121[i]), as.numeric(grambank$GB521[i])), na.rm = T)
if(summ > 0 ){grambank$tense[i] <- 1}
else(grambank$tense[i] <- 0)
}
grambank$tense <- as.numeric(grambank$tense)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB114[i]), as.numeric(grambank$GB305[i])), na.rm = T)
if(summ > 0 ){grambank$reflexivity[i] <- 1}
else(grambank$reflexivity[i] <- 0)
}
grambank$reflexivity <- as.numeric(grambank$reflexivity)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB115[i]), as.numeric(grambank$GB306[i])), na.rm = T)
if(summ > 0 ){grambank$reciprocity[i] <- 1}
else(grambank$reciprocity[i] <- 0)
}
grambank$reciprocity <- as.numeric(grambank$reciprocity)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB086[i]), as.numeric(grambank$GB120[i]), as.numeric(grambank$GB520[i])), na.rm = T)
if(summ > 0 ){grambank$aspect[i] <- 1}
else(grambank$aspect[i] <- 0)
}
grambank$aspect <- as.numeric(grambank$aspect)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB312[i]), as.numeric(grambank$GB119[i]), as.numeric(grambank$GB519[i])), na.rm = T)
if(summ > 0 ){grambank$mood[i] <- 1}
else(grambank$mood[i] <- 0)
}
grambank$mood <- as.numeric(grambank$mood)
# setup metrics..
#Metric 1. Verbal domain
grambank$Verbal_complexity <- create_metric(grambank, Verbal_complexity)
#Metric 2. Nominal words domain
grambank$Nominal_words_complexity <- create_metric(grambank, Nominal_words_complexity)
#multiplying scores by 10 (otherwise BayesTraits complains about the values)
grambank$Verbal_complexity_10 <- grambank$Verbal_complexity * 10
grambank$Nominal_words_complexity_10 <- grambank$Nominal_words_complexity * 10
grambank$case <- rowMeans(grambank[,c("GB070", "GB071", "GB072", "GB073")])
grambank$number <- rowMeans(grambank[,c("singular", "dual", "plural", "trial", "paucal", "GB031", "GB185", "GB186", "GB057")])
grambank$gender <- rowMeans(grambank[,c("gender_nouns", "gender_pronouns", "GB170", "GB198", "GB172", "GB171")])
grambank$possession <- rowMeans(grambank[,c("possession_on_possessor", "possession_on_possessed")])
grambank$articles <- rowMeans(grambank[,c("GB020", "GB021")])
grambank$arguments <- rowMeans(grambank[,c("S_arg", "A_arg", "P_arg")])
grambank$transitivity <- rowMeans(grambank[,c("GB113", "GB124", "GB149", "GB155", "passive", "antipassive")])
grambank$markers_arguments_non_core <- rowMeans(grambank[,c("GB103", "GB104", "GB108", "reflexivity", "reciprocity")])
grambank$clause_v <- rowMeans(grambank[,c("GB151", "GB152")])
sample <- subset(x = grambank, select = c("Glottocode", "Verbal_complexity_10", "Nominal_words_complexity_10", "Verbal_complexity", "Nominal_words_complexity", "case", "number", "gender", "possession", "articles", "arguments", "transitivity", "markers_arguments_non_core", "clause_v", "negation", "tense", "aspect", "mood"))
rownames(sample) <- sample$Glottocode
sample
}
| /library.R | permissive | OlenaShcherbakova/A_quantitative_global_test_of_the_complexity_trade_off_hypothesis | R | false | false | 13,667 | r | #library script
library(ape)
library(tidyverse)
library(geiger)
library(ggplot2)
library(dplyr)
library(phytools)
library(here)
library(ggpubr)
library(viridis)
library(mapdata)
library(maps)
library(maptools)
library(grDevices)
library(rethinking)
library(bayestraitr)
library(ggtree)
library(pheatmap)
library(RColorBrewer)
library(patchwork)
library(ggrepel)
source('world_tree_transformation.R')
#script written by Simon Greenhill and Olena Shcherbakova
# helper function to convert and load trait data into a named vector for
# plotting.
get_trait_vector <- function(tree, data, variable) {
x <- data[tree$tip.label, variable]
x[is.na(x)] <- 0 # set NA's to zero to enable plotting.
names(x) <- tree$tip.label
x
}
Nominal_words_complexity <- list(
#cases in nouns and pronouns:
c("GB070", "GB072", "GB071", "GB073"),
#number in nouns
c("singular", "dual", "plural", "trial", "paucal", "GB031", "GB185", "GB186"),
#gender in nouns
c("gender_nouns", "gender_pronouns", "GB170", "GB198", "GB172", "GB171", "GB057"),
#possession marked on nouns:
c("possession_on_possessor", "possession_on_possessed"),
#articles
c("GB020", "GB021")
)
Verbal_complexity <- list(
#marking of arguments:
c("S_arg", "A_arg", "P_arg"),
#transitivity-related features:
c("GB113", "GB124", "GB149", "GB155", "passive", "antipassive"),
#negation:
c("negation"),
#tenses:
c("tense"),
#aspect:
c("aspect"),
#mood:
c("mood"),
#markers_arguments_non_core:
c("GB103", "GB104", "GB108", "reflexivity", "reciprocity"),
#syntactic features:
c("GB151", "GB152")
)
# A function to create a metric from a dataset and a given set of recodings.
create_metric <- function(df, recodings) {
# apply rowMeans to all the sets of variables in the recodings list
scores <- sapply(
recodings,
function(df, varlist) { rowMeans(df[varlist], na.rm = TRUE) },
df=df, simplify=TRUE
)
rowMeans(scores, na.rm = TRUE)
}
# Function to load trees from DPLACE-data repository
# can be used either with or without renameto = 'glottocode'
load_trees <- function(dirname, type='posterior', mappingfile='taxa.csv', renameto=NA) {
# check file type
if (type == 'summary') {
treefile <- file.path(dirname, 'summary.trees')
}
else if (type == 'posterior') {
treefile <- file.path(dirname, 'posterior.trees')
} else {
stop(paste("Unknown Tree Type:", type))
}
# check file exists
if (file.exists(treefile) == FALSE) {
stop(paste("Invalid file:", treefile))
}
trees <- ape::read.nexus(treefile)
if (class(trees) == 'phylo') { trees <- c(trees) ; class(trees) <- 'multiPhylo' }
# make full path if just given taxa.csv
if (mappingfile == 'taxa.csv') { mappingfile <- file.path(dirname, mappingfile) }
if (file.exists(mappingfile) & is.na(renameto) == FALSE) {
mapping <- read.csv(mappingfile, header = TRUE, stringsAsFactors = FALSE, na.string="")
# check the required columns exist
if ('taxon' %in% colnames(mapping) == FALSE) stop(paste('column `taxon` not in', mappingfile))
if (renameto %in% colnames(mapping) == FALSE) stop(paste('colname', renameto, 'not in', mappingfile))
trees <- ape::.uncompressTipLabel(trees)
for (i in 1:length(trees)){
# remove tips not in `taxon` mapping
missing <- trees[[i]]$tip.label[trees[[i]]$tip.label %in% mapping[['taxon']] == FALSE]
if (length(missing) > 0) {
trees[[i]] <- ape::drop.tip(trees[[i]], missing)
}
# remove tips not in `renameto` mapping
missing <- mapping[is.na(mapping[[renameto]]), 'taxon']
if (length(missing) > 0) {
trees[[i]] <- ape::drop.tip(trees[[i]], missing)
}
# handle duplicate rename tips
dupes <- mapping[duplicated(mapping[[renameto]], incomparables=NA), ]
if (nrow(dupes)) {
warning(paste("Removing ", nrow(dupes), "tips that will be duplicated after rename:", paste(dupes[['taxon']], collapse=", ")))
trees[[i]] <- ape::drop.tip(trees[[i]], dupes[['taxon']])
}
# rename tips
matches <- match(trees[[i]]$tip.label, mapping[['taxon']])
trees[[i]]$tip.label <- mapping[matches, renameto]
}
trees <- ape::.compressTipLabel(trees, ref=mapping[matches, renameto])
}
trees
}
load_data <- function(filename="data/GB.tsv") {
grambank <- read.csv(filename, header = TRUE, sep = '\t', stringsAsFactors=FALSE)
colnames(grambank)[colnames(grambank)=="Language_ID"] <- "Glottocode" # rename column
#removing languages that have NAs for Grambank features included in the metric
grambank <- subset(x=grambank, select=c("Glottocode",
"GB070", "GB072", "GB071", "GB073",
"GB042", "GB316",
"GB043", "GB317",
"GB044", "GB318",
"GB165", "GB319",
"GB166", "GB320",
"GB057",
"GB184", "GB031", "GB185", "GB186",
"GB051", "GB052", "GB053", "GB054", "GB192",
"GB170", "GB196", "GB197", "GB030",
"GB198", "GB172", "GB171",
"GB430", "GB432", "GB431", "GB433",
"GB020", "GB021",
"GB089", "GB090", "GB091", "GB092", "GB093", "GB094",
"GB113", "GB124", "GB149", "GB155", "GB147", "GB302", "GB148", "GB303",
"GB107", "GB298", "GB299",
"GB082", "GB083", "GB084", "GB121", "GB521",
"GB086", "GB120", "GB520",
"GB312", "GB119", "GB519",
"GB103", "GB104", "GB108", "GB114", "GB305", "GB115", "GB306",
"GB151", "GB152"))
grambank <- na.omit(grambank)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB042[i]), as.numeric(grambank$GB316[i])), na.rm = T)
if(summ > 0 ){grambank$singular[i] <- 1}
else(grambank$singular[i] <- 0)
}
grambank$singular <- as.numeric(grambank$singular)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB043[i]), as.numeric(grambank$GB317[i])), na.rm = T)
if(summ > 0 ){grambank$dual[i] <- 1}
else(grambank$dual[i] <- 0)
}
grambank$dual <- as.numeric(grambank$dual)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB044[i]), as.numeric(grambank$GB318[i])), na.rm = T)
if(summ > 0 ){grambank$plural[i] <- 1}
else(grambank$plural[i] <- 0)
}
grambank$plural <- as.numeric(grambank$plural)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB165[i]), as.numeric(grambank$GB319[i])), na.rm = T)
if(summ > 0 ){grambank$trial[i] <- 1}
else(grambank$trial[i] <- 0)
}
grambank$trial <- as.numeric(grambank$trial)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB166[i]), as.numeric(grambank$GB320[i])), na.rm = T)
if(summ > 0 ){grambank$paucal[i] <- 1}
else(grambank$paucal[i] <- 0)
}
grambank$paucal <- as.numeric(grambank$paucal)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB431[i]), as.numeric(grambank$GB433[i])), na.rm = T)
if(summ > 0 ){grambank$possession_on_possessed[i] <- 1}
else(grambank$possession_on_possessed[i] <- 0)
}
grambank$possession_on_possessed <- as.numeric(grambank$possession_on_possessed)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB430[i]), as.numeric(grambank$GB432[i])), na.rm = T)
if(summ > 0 ){grambank$possession_on_possessor[i] <- 1}
else(grambank$possession_on_possessor[i] <- 0)
}
grambank$possession_on_possessor <- as.numeric(grambank$possession_on_possessor)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB051[i]), as.numeric(grambank$GB052[i]), as.numeric(grambank$GB053[i]), as.numeric(grambank$GB054[i]), as.numeric(grambank$GB192[i])), na.rm = T)
if(summ > 0 ){grambank$gender_nouns[i] <- 1}
else(grambank$gender_nouns[i] <- 0)
}
grambank$gender_nouns <- as.numeric(grambank$gender_nouns)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB196[i]), as.numeric(grambank$GB197[i]), as.numeric(grambank$GB030[i])), na.rm = T)
if(summ > 0 ){grambank$gender_pronouns[i] <- 1}
else(grambank$gender_pronouns[i] <- 0)
}
grambank$gender_pronouns <- as.numeric(grambank$gender_pronouns)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB089[i]), as.numeric(grambank$GB090[i])), na.rm = T)
if(summ > 0 ){grambank$S_arg[i] <- 1}
else(grambank$S_arg[i] <- 0)
}
grambank$S_arg <- as.numeric(grambank$S_arg)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB091[i]), as.numeric(grambank$GB092[i])), na.rm = T)
if(summ > 0 ){grambank$A_arg[i] <- 1}
else(grambank$A_arg[i] <- 0)
}
grambank$A_arg <- as.numeric(grambank$A_arg)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB093[i]), as.numeric(grambank$GB094[i])), na.rm = T)
if(summ > 0 ){grambank$P_arg[i] <- 1}
else(grambank$P_arg[i] <- 0)
}
grambank$P_arg <- as.numeric(grambank$P_arg)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB147[i]), as.numeric(grambank$GB302[i])), na.rm = T)
if(summ > 0 ){grambank$passive[i] <- 1}
else(grambank$passive[i] <- 0)
}
grambank$passive <- as.numeric(grambank$passive)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB148[i]), as.numeric(grambank$GB303[i])), na.rm = T)
if(summ > 0 ){grambank$antipassive[i] <- 1}
else(grambank$antipassive[i] <- 0)
}
grambank$antipassive <- as.numeric(grambank$antipassive)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB107[i]), as.numeric(grambank$GB298[i]), as.numeric(grambank$GB299[i])), na.rm = T)
if(summ > 0 ){grambank$negation[i] <- 1}
else(grambank$negation[i] <- 0)
}
grambank$negation <- as.numeric(grambank$negation)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB082[i]), as.numeric(grambank$GB083[i]), as.numeric(grambank$GB084[i]), as.numeric(grambank$GB121[i]), as.numeric(grambank$GB521[i])), na.rm = T)
if(summ > 0 ){grambank$tense[i] <- 1}
else(grambank$tense[i] <- 0)
}
grambank$tense <- as.numeric(grambank$tense)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB114[i]), as.numeric(grambank$GB305[i])), na.rm = T)
if(summ > 0 ){grambank$reflexivity[i] <- 1}
else(grambank$reflexivity[i] <- 0)
}
grambank$reflexivity <- as.numeric(grambank$reflexivity)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB115[i]), as.numeric(grambank$GB306[i])), na.rm = T)
if(summ > 0 ){grambank$reciprocity[i] <- 1}
else(grambank$reciprocity[i] <- 0)
}
grambank$reciprocity <- as.numeric(grambank$reciprocity)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB086[i]), as.numeric(grambank$GB120[i]), as.numeric(grambank$GB520[i])), na.rm = T)
if(summ > 0 ){grambank$aspect[i] <- 1}
else(grambank$aspect[i] <- 0)
}
grambank$aspect <- as.numeric(grambank$aspect)
for(i in 1:nrow(grambank)){
summ <- sum(c(as.numeric(grambank$GB312[i]), as.numeric(grambank$GB119[i]), as.numeric(grambank$GB519[i])), na.rm = T)
if(summ > 0 ){grambank$mood[i] <- 1}
else(grambank$mood[i] <- 0)
}
grambank$mood <- as.numeric(grambank$mood)
# setup metrics..
#Metric 1. Verbal domain
grambank$Verbal_complexity <- create_metric(grambank, Verbal_complexity)
#Metric 2. Nominal words domain
grambank$Nominal_words_complexity <- create_metric(grambank, Nominal_words_complexity)
#multiplying scores by 10 (otherwise BayesTraits complains about the values)
grambank$Verbal_complexity_10 <- grambank$Verbal_complexity * 10
grambank$Nominal_words_complexity_10 <- grambank$Nominal_words_complexity * 10
grambank$case <- rowMeans(grambank[,c("GB070", "GB071", "GB072", "GB073")])
grambank$number <- rowMeans(grambank[,c("singular", "dual", "plural", "trial", "paucal", "GB031", "GB185", "GB186", "GB057")])
grambank$gender <- rowMeans(grambank[,c("gender_nouns", "gender_pronouns", "GB170", "GB198", "GB172", "GB171")])
grambank$possession <- rowMeans(grambank[,c("possession_on_possessor", "possession_on_possessed")])
grambank$articles <- rowMeans(grambank[,c("GB020", "GB021")])
grambank$arguments <- rowMeans(grambank[,c("S_arg", "A_arg", "P_arg")])
grambank$transitivity <- rowMeans(grambank[,c("GB113", "GB124", "GB149", "GB155", "passive", "antipassive")])
grambank$markers_arguments_non_core <- rowMeans(grambank[,c("GB103", "GB104", "GB108", "reflexivity", "reciprocity")])
grambank$clause_v <- rowMeans(grambank[,c("GB151", "GB152")])
sample <- subset(x = grambank, select = c("Glottocode", "Verbal_complexity_10", "Nominal_words_complexity_10", "Verbal_complexity", "Nominal_words_complexity", "case", "number", "gender", "possession", "articles", "arguments", "transitivity", "markers_arguments_non_core", "clause_v", "negation", "tense", "aspect", "mood"))
rownames(sample) <- sample$Glottocode
sample
}
|
# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_R.py
# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
#'
# -------------------------- H2O Stacked Ensemble -------------------------- #
#'
#' Build a stacked ensemble (aka. Super Learner) using the H2O base
#' learning algorithms specified by the user.
#'
#' @param x (Currently not being used for anything in Stacked Ensemble -- leave blank).
#' A vector containing the names or indices of the predictor variables to use in building the model.
#' @param y The name or column index of the response variable in the data. The response must be either a numeric or a
#' categorical/factor variable. If the response is numeric, then a regression model will be trained, otherwise it will train a classification model.
#' @param model_id Destination id for this model; auto-generated if not specified.
#' @param training_frame Id of the training data frame.
#' @param validation_frame Id of the validation data frame.
#' @param base_models List of model ids which we can stack together. Models must have been cross-validated using nfolds > 1, and
#' folds must be identical across models. Defaults to [].
#' @param keep_levelone_frame \code{Logical}. Keep level one frame used for metalearner training. Defaults to FALSE.
#' @examples
#'
#' # See example R code here:
#' # http://docs.h2o.ai/h2o/latest-stable/h2o-docs/data-science/stacked-ensembles.html
#'
#' @export
h2o.stackedEnsemble <- function(x, y, training_frame,
model_id = NULL,
validation_frame = NULL,
base_models = list(),
keep_levelone_frame = FALSE
)
{
# If x is missing, then assume user wants to use all columns as features.
if (missing(x)) {
if (is.numeric(y)) {
x <- setdiff(col(training_frame), y)
} else {
x <- setdiff(colnames(training_frame), y)
}
}
# Required args: training_frame
if (missing(training_frame)) stop("argument 'training_frame' is missing, with no default")
# Training_frame must be a key or an H2OFrame object
if (!is.H2OFrame(training_frame))
tryCatch(training_frame <- h2o.getFrame(training_frame),
error = function(err) {
stop("argument 'training_frame' must be a valid H2OFrame or key")
})
# Validation_frame must be a key or an H2OFrame object
if (!is.null(validation_frame)) {
if (!is.H2OFrame(validation_frame))
tryCatch(validation_frame <- h2o.getFrame(validation_frame),
error = function(err) {
stop("argument 'validation_frame' must be a valid H2OFrame or key")
})
}
# Parameter list to send to model builder
parms <- list()
parms$training_frame <- training_frame
args <- .verify_dataxy(training_frame, x, y)
parms$response_column <- args$y
if (length(base_models) == 0) stop('base_models is empty')
# If base_models contains models instead of ids, replace with model id
for (i in 1:length(base_models)) {
if (inherits(base_models[[i]], 'H2OModel')) {
base_models[[i]] <- base_models[[i]]@model_id
}
}
if (!missing(model_id))
parms$model_id <- model_id
if (!missing(validation_frame))
parms$validation_frame <- validation_frame
if (!missing(base_models))
parms$base_models <- base_models
if (!missing(keep_levelone_frame))
parms$keep_levelone_frame <- keep_levelone_frame
# Error check and build model
.h2o.modelJob('stackedensemble', parms, h2oRestApiVersion = 99)
}
| /h2o-r/h2o-package/R/stackedensemble.R | permissive | ntopi/h2o-3 | R | false | false | 3,646 | r | # This file is auto-generated by h2o-3/h2o-bindings/bin/gen_R.py
# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)
#'
# -------------------------- H2O Stacked Ensemble -------------------------- #
#'
#' Build a stacked ensemble (aka. Super Learner) using the H2O base
#' learning algorithms specified by the user.
#'
#' @param x (Currently not being used for anything in Stacked Ensemble -- leave blank).
#' A vector containing the names or indices of the predictor variables to use in building the model.
#' @param y The name or column index of the response variable in the data. The response must be either a numeric or a
#' categorical/factor variable. If the response is numeric, then a regression model will be trained, otherwise it will train a classification model.
#' @param model_id Destination id for this model; auto-generated if not specified.
#' @param training_frame Id of the training data frame.
#' @param validation_frame Id of the validation data frame.
#' @param base_models List of model ids which we can stack together. Models must have been cross-validated using nfolds > 1, and
#' folds must be identical across models. Defaults to [].
#' @param keep_levelone_frame \code{Logical}. Keep level one frame used for metalearner training. Defaults to FALSE.
#' @examples
#'
#' # See example R code here:
#' # http://docs.h2o.ai/h2o/latest-stable/h2o-docs/data-science/stacked-ensembles.html
#'
#' @export
h2o.stackedEnsemble <- function(x, y, training_frame,
model_id = NULL,
validation_frame = NULL,
base_models = list(),
keep_levelone_frame = FALSE
)
{
# If x is missing, then assume user wants to use all columns as features.
if (missing(x)) {
if (is.numeric(y)) {
x <- setdiff(col(training_frame), y)
} else {
x <- setdiff(colnames(training_frame), y)
}
}
# Required args: training_frame
if (missing(training_frame)) stop("argument 'training_frame' is missing, with no default")
# Training_frame must be a key or an H2OFrame object
if (!is.H2OFrame(training_frame))
tryCatch(training_frame <- h2o.getFrame(training_frame),
error = function(err) {
stop("argument 'training_frame' must be a valid H2OFrame or key")
})
# Validation_frame must be a key or an H2OFrame object
if (!is.null(validation_frame)) {
if (!is.H2OFrame(validation_frame))
tryCatch(validation_frame <- h2o.getFrame(validation_frame),
error = function(err) {
stop("argument 'validation_frame' must be a valid H2OFrame or key")
})
}
# Parameter list to send to model builder
parms <- list()
parms$training_frame <- training_frame
args <- .verify_dataxy(training_frame, x, y)
parms$response_column <- args$y
if (length(base_models) == 0) stop('base_models is empty')
# If base_models contains models instead of ids, replace with model id
for (i in 1:length(base_models)) {
if (inherits(base_models[[i]], 'H2OModel')) {
base_models[[i]] <- base_models[[i]]@model_id
}
}
if (!missing(model_id))
parms$model_id <- model_id
if (!missing(validation_frame))
parms$validation_frame <- validation_frame
if (!missing(base_models))
parms$base_models <- base_models
if (!missing(keep_levelone_frame))
parms$keep_levelone_frame <- keep_levelone_frame
# Error check and build model
.h2o.modelJob('stackedensemble', parms, h2oRestApiVersion = 99)
}
|
SNP.calling <-
function(d,FDR=0.01){
p.fdr = d$p.fdr;
p.val = d$p.value;
Xsum = d$Xsum;
M = length(Xsum);
position = d$position[Xsum>0];
called.snps = function(p.fdr,index)
{
ind = which(p.fdr<FDR);
cbind(index[ind],p.fdr[ind],p.fdr[ind]);
}
SNPs = called.snps(p.fdr,position);
colnames(SNPs)=c('position','p.value','p.fdr');
d$snps = SNPs;
rm(p.fdr,SNPs,position);
gc();
return (d);
}
| /R/SNP.calling.R | no_license | JiyuanHu/MAFsnp | R | false | false | 428 | r | SNP.calling <-
function(d,FDR=0.01){
p.fdr = d$p.fdr;
p.val = d$p.value;
Xsum = d$Xsum;
M = length(Xsum);
position = d$position[Xsum>0];
called.snps = function(p.fdr,index)
{
ind = which(p.fdr<FDR);
cbind(index[ind],p.fdr[ind],p.fdr[ind]);
}
SNPs = called.snps(p.fdr,position);
colnames(SNPs)=c('position','p.value','p.fdr');
d$snps = SNPs;
rm(p.fdr,SNPs,position);
gc();
return (d);
}
|
library(LSAfun)
### Name: coherence
### Title: Coherence of a text
### Aliases: coherence
### ** Examples
data(wonderland)
coherence ("There was certainly too much of it in the air. Even the Duchess
sneezed occasionally; and as for the baby, it was sneezing and howling
alternately without a moment's pause. The only things in the kitchen
that did not sneeze, were the cook, and a large cat which was sitting on
the hearth and grinning from ear to ear.",
tvectors=wonderland)
| /data/genthat_extracted_code/LSAfun/examples/coherence.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 482 | r | library(LSAfun)
### Name: coherence
### Title: Coherence of a text
### Aliases: coherence
### ** Examples
data(wonderland)
coherence ("There was certainly too much of it in the air. Even the Duchess
sneezed occasionally; and as for the baby, it was sneezing and howling
alternately without a moment's pause. The only things in the kitchen
that did not sneeze, were the cook, and a large cat which was sitting on
the hearth and grinning from ear to ear.",
tvectors=wonderland)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/variantkey.R
\name{GetVrRsid}
\alias{GetVrRsid}
\title{Returns the RSID at the specified position in the VR file.}
\usage{
GetVrRsid(src, item)
}
\arguments{
\item{src}{Address of the memory mapped binary file containing the VariantKey to rsID lookup table (vkrs.bin).}
\item{item}{Binary block number.}
}
\description{
Returns the RSID at the specified position in the VR file.
}
| /r/variantkey/man/GetVrRsid.Rd | permissive | yzharold/variantkey | R | false | true | 460 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/variantkey.R
\name{GetVrRsid}
\alias{GetVrRsid}
\title{Returns the RSID at the specified position in the VR file.}
\usage{
GetVrRsid(src, item)
}
\arguments{
\item{src}{Address of the memory mapped binary file containing the VariantKey to rsID lookup table (vkrs.bin).}
\item{item}{Binary block number.}
}
\description{
Returns the RSID at the specified position in the VR file.
}
|
#load packages
library(dplyr)
#read the orders.csv
rawData <- read.csv("orders.csv",header=T)
#check NA
is.na(rawData)
#filter NA
rawData <- na.omit(rawData)
#or replace NA
rawData[is.na(rawData)] <- 0
#check data type
sapply(rawData, class)
#change data types
rawData$quantityCount <- as.numeric(rawData$quantityCount)
rawData$totalSales <- as.numeric(rawData$totalSales)
rawData$customer <- as.character(rawData$customer)
#summrise data
summary(rawData)
#case 1: use 'dplyr' package
#filter data
data <- filter(rawData,totalSales >= 500)
data <- select(data,customer,quantityCount)
#group by customers
byCustomers <- group_by(data,customer)
output <- summarise(byCustomers,totalQuantity = sum(quantityCount))
output
#case 2: do not use 'dplyr' package
data <- rawData[rawData$totalSales > 500,]
data <- data[c("customer","quantityCount")]
#calculate via a loop
uniqueCustomers = as.character(unique(data$customer))
customers = rep(0,length(uniqueCustomers))
quantityCounts = rep(0,length(uniqueCustomers))
for (i in 1:length(uniqueCustomers)){
customer = uniqueCustomers[i]
print(customer)
customerQuantity = data[data$customer == customer,]
customers[i] <- customer
print(customers)
quantityCounts[i] <- sum(customerQuantity$quantityCount)
}
output <- data.frame(customer = customers,quantityCount = quantityCounts)
output
#order the output if necessary
output[with(output,order(-totalQuantity)),]
#or
arrange(output,desc(totalQuantity))
| /data/data.R | no_license | wsy1607/Sample-Problems | R | false | false | 1,462 | r | #load packages
library(dplyr)
#read the orders.csv
rawData <- read.csv("orders.csv",header=T)
#check NA
is.na(rawData)
#filter NA
rawData <- na.omit(rawData)
#or replace NA
rawData[is.na(rawData)] <- 0
#check data type
sapply(rawData, class)
#change data types
rawData$quantityCount <- as.numeric(rawData$quantityCount)
rawData$totalSales <- as.numeric(rawData$totalSales)
rawData$customer <- as.character(rawData$customer)
#summrise data
summary(rawData)
#case 1: use 'dplyr' package
#filter data
data <- filter(rawData,totalSales >= 500)
data <- select(data,customer,quantityCount)
#group by customers
byCustomers <- group_by(data,customer)
output <- summarise(byCustomers,totalQuantity = sum(quantityCount))
output
#case 2: do not use 'dplyr' package
data <- rawData[rawData$totalSales > 500,]
data <- data[c("customer","quantityCount")]
#calculate via a loop
uniqueCustomers = as.character(unique(data$customer))
customers = rep(0,length(uniqueCustomers))
quantityCounts = rep(0,length(uniqueCustomers))
for (i in 1:length(uniqueCustomers)){
customer = uniqueCustomers[i]
print(customer)
customerQuantity = data[data$customer == customer,]
customers[i] <- customer
print(customers)
quantityCounts[i] <- sum(customerQuantity$quantityCount)
}
output <- data.frame(customer = customers,quantityCount = quantityCounts)
output
#order the output if necessary
output[with(output,order(-totalQuantity)),]
#or
arrange(output,desc(totalQuantity))
|
# Practise 2
# Book Example Sales vs Sq Feet Area : x - sq ft, y - Sales
rm(list=ls())
options(digits=4)
x = c(1.7,1.6,2.8,5.6,1.3,2.2,1.3,1.1,3.2,1.5,5.2,4.6,5.8,3 )
y = c(3.7,3.9,6.7,9.5,3.4,5.6,3.7,2.7,5.5,2.9,10.7,7.6,11.8,4.1)
store = data.frame(area=x,sales=y)
str(store)
apply(store,2,mean)
store.lm1 = lm(sales ~ area, data=store)
summary(store.lm1)
# predict sales = 0.09645 + 1.6699 * area
with(data=store, plot(area, sales))
abline(store.lm1)
# Excel equivalent
store$storex = store$area - mean(store$area)
store$storey = store$sales - mean(store$sales)
store$ssxy = store$storex * store$storey
store$ssx2 = store$storex^2
store$predicted = fitted.values(store.lm1)
store
actualmeansales = mean(store$sales)
store$ssr = (fitted.values(store.lm1) - actualmeansales)^2
store
store$sse = (store$sales - fitted.values(store.lm1))^2
store
# Residual Value = Observed - Predicted
store$residuals = (store$sales - fitted.values(store.lm1))
store
store$stdresiduals = (store$residuals - mean(store$residuals))/sd(store$residuals)
store
(SSXY = sum(store$ssxy))
(SSX2 = sum(store$ssx2))
(b1 = SSXY/SSX2)
(b0 = mean(store$sales - b1 * mean(store$area)))
coef(store.lm1)
SSR = sum(store$ssr) # Explained Variation
SSE = sum(store$sse) # Unexplained Variation
(SST = SSR + SSE) # Total Variation
(r2 = SSR/SST) # Coeff of Determination
# 90.42 variation in sales is due to area
# Std Error of Estimate Syx
(n = length(store$sales))
(Syx = sqrt(SSE/(n-2)))
b0;b1;r2;Syx
summary(store$residuals)
summary(store.lm1)
# Residual Analysis
plot(store$area, store$residuals)
# No relationship of residuals & area - Linear Model Acceptable
# Fulfills criteria of Linearity Assumptions
# Normality -
# Data collected over 1 period, not spread
# Test for auto correlation not required
qqnorm(store$residuals)
qqline(store$residuals)
#modest departure from normal is ok
# Equal Variance
plot(store$area, store$residuals)
# t -test
(Sb1 = Syx/sqrt(SSX2))
(t = (b1 - 0)/ Sb1) # 10.64
qt(.975,14-2) # df = 14-2, CV at alpha .05, two tail test : 1 - .05/2
# t > qt : Reject Ho
# compare with summary of model p = 1.8e-07 < 0.05
summary(store.lm1)
# F-Test F = MSR/ MSE : MSR = SSR/k; MSE = SSE/(n-k-1)
# k = no of IVs in the model
k=1; n=14
(MSR = SSR/k)
(MSE = SSE/(n-k-1))
(F = MSR/MSE)
qf(.95, df1=1, df2=12) # One sided distribution alpha = 0.05
# Calcu F > F from distribution : Reject Ho that there is no relation between area & sales
# There is significant relationship
summary(store.lm1) # Check F and p value
# Confidence Intervals --------------
summary(store.lm1)
confint(store.lm1)
# for area it varies from 1.34 to 2.01 : it is > 0 : there is relation of area & sales
| /beginningR/practise2.R | no_license | dupadhyaya/gardener | R | false | false | 2,697 | r | # Practise 2
# Book Example Sales vs Sq Feet Area : x - sq ft, y - Sales
rm(list=ls())
options(digits=4)
x = c(1.7,1.6,2.8,5.6,1.3,2.2,1.3,1.1,3.2,1.5,5.2,4.6,5.8,3 )
y = c(3.7,3.9,6.7,9.5,3.4,5.6,3.7,2.7,5.5,2.9,10.7,7.6,11.8,4.1)
store = data.frame(area=x,sales=y)
str(store)
apply(store,2,mean)
store.lm1 = lm(sales ~ area, data=store)
summary(store.lm1)
# predict sales = 0.09645 + 1.6699 * area
with(data=store, plot(area, sales))
abline(store.lm1)
# Excel equivalent
store$storex = store$area - mean(store$area)
store$storey = store$sales - mean(store$sales)
store$ssxy = store$storex * store$storey
store$ssx2 = store$storex^2
store$predicted = fitted.values(store.lm1)
store
actualmeansales = mean(store$sales)
store$ssr = (fitted.values(store.lm1) - actualmeansales)^2
store
store$sse = (store$sales - fitted.values(store.lm1))^2
store
# Residual Value = Observed - Predicted
store$residuals = (store$sales - fitted.values(store.lm1))
store
store$stdresiduals = (store$residuals - mean(store$residuals))/sd(store$residuals)
store
(SSXY = sum(store$ssxy))
(SSX2 = sum(store$ssx2))
(b1 = SSXY/SSX2)
(b0 = mean(store$sales - b1 * mean(store$area)))
coef(store.lm1)
SSR = sum(store$ssr) # Explained Variation
SSE = sum(store$sse) # Unexplained Variation
(SST = SSR + SSE) # Total Variation
(r2 = SSR/SST) # Coeff of Determination
# 90.42 variation in sales is due to area
# Std Error of Estimate Syx
(n = length(store$sales))
(Syx = sqrt(SSE/(n-2)))
b0;b1;r2;Syx
summary(store$residuals)
summary(store.lm1)
# Residual Analysis
plot(store$area, store$residuals)
# No relationship of residuals & area - Linear Model Acceptable
# Fulfills criteria of Linearity Assumptions
# Normality -
# Data collected over 1 period, not spread
# Test for auto correlation not required
qqnorm(store$residuals)
qqline(store$residuals)
#modest departure from normal is ok
# Equal Variance
plot(store$area, store$residuals)
# t -test
(Sb1 = Syx/sqrt(SSX2))
(t = (b1 - 0)/ Sb1) # 10.64
qt(.975,14-2) # df = 14-2, CV at alpha .05, two tail test : 1 - .05/2
# t > qt : Reject Ho
# compare with summary of model p = 1.8e-07 < 0.05
summary(store.lm1)
# F-Test F = MSR/ MSE : MSR = SSR/k; MSE = SSE/(n-k-1)
# k = no of IVs in the model
k=1; n=14
(MSR = SSR/k)
(MSE = SSE/(n-k-1))
(F = MSR/MSE)
qf(.95, df1=1, df2=12) # One sided distribution alpha = 0.05
# Calcu F > F from distribution : Reject Ho that there is no relation between area & sales
# There is significant relationship
summary(store.lm1) # Check F and p value
# Confidence Intervals --------------
summary(store.lm1)
confint(store.lm1)
# for area it varies from 1.34 to 2.01 : it is > 0 : there is relation of area & sales
|
#####Phase shift#####
##The existing models, but on region 1 specifically to account for solar noon; sunset; and sunrise time
#make a list of the procedure/diagnoses/primary diagnosis for region 1 specifically
for(i in 1:length(ICD_9_code_groups)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]] <- primary.diagnosis.ICD_9_code_groups.REGION1[[i]] %>% left_join(primary.diagnosis.ICD_9_code_groups.admission.counts[[i]], by= c("SVYEAR", "ADM_MON", "REGION"))}
for(i in 1:length(ICD_9_code_groups)){ICD_9_code_groups.REGION1[[i]] <- ICD_9_code_groups.REGION1[[i]] %>% left_join(ICD_9_code_groups.admission.counts[[i]], by= c("SVYEAR", "ADM_MON", "REGION"))}
for(i in 1:length(procedure_groups)){procedure_groups.REGION1[[i]] <- procedure_groups.REGION1[[i]] %>% left_join(procedure_groups.counts[[i]], by= c("SVYEAR", "ADM_MON", "REGION"))}
save(ICD_9_code_groups.REGION1, file= "ICD_9_code_groups.REGION1.RData")
#The models for each
#####The model for phase: all diagnoses#####
#Convert the catagorical variables into factors
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$RACE <- factor(ICD_9_code_groups.REGION1[[i]]$RACE)}
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$OWNER <- factor(ICD_9_code_groups.REGION1[[i]]$OWNER)}
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$SEX <- factor(ICD_9_code_groups.REGION1[[i]]$SEX)}
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$ESOP1 <- factor(ICD_9_code_groups.REGION1[[i]]$ESOP1)}
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$ADM_TYPE <- factor(ICD_9_code_groups.REGION1[[i]]$ADM_TYPE)}
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$ASOURCE <- factor(ICD_9_code_groups.REGION1[[i]]$ASOURCE)}
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$DISCSTAT <- factor(ICD_9_code_groups.REGION1[[i]]$DISCSTAT)}
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$MARSTAT <- factor(ICD_9_code_groups.REGION1[[i]]$MARSTAT)}
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$MARSTAT <- factor(ICD_9_code_groups.REGION1[[i]]$MARSTAT)}
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$REGION <- factor(ICD_9_code_groups.REGION1[[i]]$REGION)}
#get a list ready to recieve the outputs from the model (correlation coefficients for each disease)
model_output <- list()
#The model. Multiple regression.
for(i in 1:86){model_output_all_diagnoses_REGION1.DOC[[i]] <- lm(ICD_9_code_groups.REGION1[[i]]$DOC ~ ICD_9_code_groups.REGION1[[i]]$n +
ICD_9_code_groups.REGION1[[i]]$AGE +
ICD_9_code_groups.REGION1[[i]]$SEX +
ICD_9_code_groups.REGION1[[i]]$Mean_of_Sunlight_Duration_minutes +
ICD_9_code_groups.REGION1[[i]]$SVYEAR +
ICD_9_code_groups.REGION1[[i]]$BEDSIZE +
ICD_9_code_groups.REGION1[[i]]$RACE+
ICD_9_code_groups.REGION1[[i]]$OWNER+
ICD_9_code_groups.REGION1[[i]]$ESOP1+
ICD_9_code_groups.REGION1[[i]]$MARSTAT+
ICD_9_code_groups.REGION1[[i]]$REGION+
ICD_9_code_groups.REGION1[[i]]$ALLSKY_SFC_LW_DWN+
ICD_9_code_groups.REGION1[[i]]$ALLSKY_SFC_SW_DWN+
ICD_9_code_groups.REGION1[[i]]$ALLSKY_TOA_SW_DWN+
ICD_9_code_groups.REGION1[[i]]$KT+
ICD_9_code_groups.REGION1[[i]]$PRECTOT+
ICD_9_code_groups.REGION1[[i]]$PS+
ICD_9_code_groups.REGION1[[i]]$QV2M+
ICD_9_code_groups.REGION1[[i]]$RH2M+
ICD_9_code_groups.REGION1[[i]]$T2M+
ICD_9_code_groups.REGION1[[i]]$T2MWET+
ICD_9_code_groups.REGION1[[i]]$WS2M+
ICD_9_code_groups.REGION1[[i]]$mean.pollution
ICD_9_code_groups.REGION1[[i]]$mean_solar_noon+
ICD_9_code_groups.REGION1[[i]]$mean_sunrise_time+
ICD_9_code_groups.REGION1[[i]]$mean_sunset_time+
ICD_9_code_groups.REGION1[[i]]$sd_sunset_time+
ICD_9_code_groups.REGION1[[i]]$sd_sunset_time+
ICD_9_code_groups.REGION1[[i]]$sd_sunset_time)}
for(i in 1:86){model_output_all_diagnoses_REGION1.n[[i]] <- lm(ICD_9_code_groups.REGION1[[i]]$DOC ~ ICD_9_code_groups.REGION1[[i]]$n +
ICD_9_code_groups.REGION1[[i]]$AGE +
ICD_9_code_groups.REGION1[[i]]$SEX +
ICD_9_code_groups.REGION1[[i]]$Mean_of_Sunlight_Duration_minutes +
ICD_9_code_groups.REGION1[[i]]$SVYEAR +
ICD_9_code_groups.REGION1[[i]]$BEDSIZE +
ICD_9_code_groups.REGION1[[i]]$RACE+
ICD_9_code_groups.REGION1[[i]]$OWNER+
ICD_9_code_groups.REGION1[[i]]$ESOP1+
ICD_9_code_groups.REGION1[[i]]$MARSTAT+
ICD_9_code_groups.REGION1[[i]]$REGION+
ICD_9_code_groups.REGION1[[i]]$ALLSKY_SFC_LW_DWN+
ICD_9_code_groups.REGION1[[i]]$ALLSKY_SFC_SW_DWN+
ICD_9_code_groups.REGION1[[i]]$ALLSKY_TOA_SW_DWN+
ICD_9_code_groups.REGION1[[i]]$KT+
ICD_9_code_groups.REGION1[[i]]$PRECTOT+
ICD_9_code_groups.REGION1[[i]]$PS+
ICD_9_code_groups.REGION1[[i]]$QV2M+
ICD_9_code_groups.REGION1[[i]]$RH2M+
ICD_9_code_groups.REGION1[[i]]$T2M+
ICD_9_code_groups.REGION1[[i]]$T2MWET+
ICD_9_code_groups.REGION1[[i]]$WS2M+
ICD_9_code_groups.REGION1[[i]]$mean.pollution
ICD_9_code_groups.REGION1[[i]]$sd_sunset_time+
ICD_9_code_groups.REGION1[[i]]$sd_sunset_time+
ICD_9_code_groups.REGION1[[i]]$sd_sunset_time)}
#####The model for phase: primary diagnoses#####
#Convert the catagorical variables into factors
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$RACE <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$RACE)}
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$OWNER <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$OWNER)}
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$SEX <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$SEX)}
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ESOP1 <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ESOP1)}
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ADM_TYPE <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ADM_TYPE)}
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ASOURCE <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ASOURCE)}
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$DISCSTAT <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$DISCSTAT)}
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$MARSTAT <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$MARSTAT)}
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$MARSTAT <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$MARSTAT)}
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$REGION <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$REGION)}
#get a list ready to recieve the outputs from the model (correlation coefficients for each disease)
model_output <- list()
#The model. Multiple regression.
for(i in 1:86){model_output_primary_diagnosis_REGION1.DOC[[i]] <- lm(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$DOC ~ primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$n +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$AGE +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$SEX +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$Mean_of_Sunlight_Duration_minutes +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$SVYEAR +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$BEDSIZE +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$RACE+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$OWNER+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ESOP1+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$MARSTAT+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$REGION+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ALLSKY_SFC_LW_DWN+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ALLSKY_SFC_SW_DWN+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ALLSKY_TOA_SW_DWN+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$KT+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$PRECTOT+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$PS+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$QV2M+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$RH2M+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$T2M+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$T2M_RANGE+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$T2MWET+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$WS2M+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$mean.pollution+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$mean_solar_noon+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$mean_sunrise_time+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$mean_sunset_time+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$sd_sunset_time+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$sd_sunset_time+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$sd_sunset_time)}
for(i in 1:86){model_output_primary_diagnosis_REGION1.n[[i]] <- lm(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$DOC ~ primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$n +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$AGE +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$SEX +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$Mean_of_Sunlight_Duration_minutes +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$SVYEAR +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$BEDSIZE +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$RACE+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$OWNER+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ESOP1+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$MARSTAT+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$REGION+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ALLSKY_SFC_LW_DWN+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ALLSKY_SFC_SW_DWN+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ALLSKY_TOA_SW_DWN+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$KT+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$PRECTOT+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$PS+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$QV2M+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$RH2M+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$T2M+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$T2M_RANGE+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$T2MWET+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$WS2M+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$mean.pollution+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$mean_solar_noon+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$mean_sunrise_time+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$mean_sunset_time+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$sd_sunset_time+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$sd_sunset_time+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$sd_sunset_time)}
#####Model for phase:procedures#####
#Convert the catagorical variables into factors
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$RACE <- factor(procedure_groups.REGION1[[i]]$RACE)}
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$OWNER <- factor(procedure_groups.REGION1[[i]]$OWNER)}
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$SEX <- factor(procedure_groups.REGION1[[i]]$SEX)}
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$ESOP1 <- factor(procedure_groups.REGION1[[i]]$ESOP1)}
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$ADM_TYPE <- factor(procedure_groups.REGION1[[i]]$ADM_TYPE)}
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$ASOURCE <- factor(procedure_groups.REGION1[[i]]$ASOURCE)}
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$DISCSTAT <- factor(procedure_groups.REGION1[[i]]$DISCSTAT)}
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$MARSTAT <- factor(procedure_groups.REGION1[[i]]$MARSTAT)}
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$MARSTAT <- factor(procedure_groups.REGION1[[i]]$MARSTAT)}
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$REGION <- factor(procedure_groups.REGION1[[i]]$REGION)}
#get a list ready to recieve the outputs from the model (correlation coefficients for each disease)
model_output <- list()
#The model. Multiple regression.
for(i in 1:86){model_output_procedures_REGION1.DOC[[i]] <- lm(procedure_groups.REGION1[[i]]$DOC ~ procedure_groups.REGION1[[i]]$n +
procedure_groups.REGION1[[i]]$AGE +
procedure_groups.REGION1[[i]]$SEX +
procedure_groups.REGION1[[i]]$Mean_of_Sunlight_Duration_minutes +
procedure_groups.REGION1[[i]]$SVYEAR +
procedure_groups.REGION1[[i]]$BEDSIZE +
procedure_groups.REGION1[[i]]$RACE+
procedure_groups.REGION1[[i]]$OWNER+
procedure_groups.REGION1[[i]]$ESOP1+
procedure_groups.REGION1[[i]]$MARSTAT+
procedure_groups.REGION1[[i]]$REGION+
procedure_groups.REGION1[[i]]$ALLSKY_SFC_LW_DWN+
procedure_groups.REGION1[[i]]$ALLSKY_SFC_SW_DWN+
procedure_groups.REGION1[[i]]$ALLSKY_TOA_SW_DWN+
procedure_groups.REGION1[[i]]$KT+
procedure_groups.REGION1[[i]]$PRECTOT+
procedure_groups.REGION1[[i]]$PS+
procedure_groups.REGION1[[i]]$QV2M+
procedure_groups.REGION1[[i]]$RH2M+
procedure_groups.REGION1[[i]]$T2M+
procedure_groups.REGION1[[i]]$T2M_RANGE+
procedure_groups.REGION1[[i]]$T2MWET+
procedure_groups.REGION1[[i]]$WS2M+
procedure_groups.REGION1[[i]]$mean.pollution+
procedure_groups.REGION1[[i]]$mean_solar_noon+
procedure_groups.REGION1[[i]]$mean_sunrise_time+
procedure_groups.REGION1[[i]]$mean_sunset_time+
procedure_groups.REGION1[[i]]$sd_sunset_time+
procedure_groups.REGION1[[i]]$sd_sunset_time+
procedure_groups.REGION1[[i]]$sd_sunset_time)}
for(i in 1:86){model_output_procedures_REGION1.n[[i]] <- lm(procedure_groups.REGION1[[i]]$DOC ~ procedure_groups.REGION1[[i]]$n +
procedure_groups.REGION1[[i]]$AGE +
procedure_groups.REGION1[[i]]$SEX +
procedure_groups.REGION1[[i]]$Mean_of_Sunlight_Duration_minutes +
procedure_groups.REGION1[[i]]$SVYEAR +
procedure_groups.REGION1[[i]]$BEDSIZE +
procedure_groups.REGION1[[i]]$RACE+
procedure_groups.REGION1[[i]]$OWNER+
procedure_groups.REGION1[[i]]$ESOP1+
procedure_groups.REGION1[[i]]$MARSTAT+
procedure_groups.REGION1[[i]]$REGION+
procedure_groups.REGION1[[i]]$ALLSKY_SFC_LW_DWN+
procedure_groups.REGION1[[i]]$ALLSKY_SFC_SW_DWN+
procedure_groups.REGION1[[i]]$ALLSKY_TOA_SW_DWN+
procedure_groups.REGION1[[i]]$KT+
procedure_groups.REGION1[[i]]$PRECTOT+
procedure_groups.REGION1[[i]]$PS+
procedure_groups.REGION1[[i]]$QV2M+
procedure_groups.REGION1[[i]]$RH2M+
procedure_groups.REGION1[[i]]$T2M+
procedure_groups.REGION1[[i]]$T2M_RANGE+
procedure_groups.REGION1[[i]]$T2MWET+
procedure_groups.REGION1[[i]]$WS2M+
procedure_groups.REGION1[[i]]$mean.pollution+
procedure_groups.REGION1[[i]]$mean_solar_noon+
procedure_groups.REGION1[[i]]$mean_sunrise_time+
procedure_groups.REGION1[[i]]$mean_sunset_time+
procedure_groups.REGION1[[i]]$sd_sunset_time+
procedure_groups.REGION1[[i]]$sd_sunset_time+
procedure_groups.REGION1[[i]]$sd_sunset_time)}
#plot it to see whether it is linear
#####Looking at differences in admission during months with a phase shift#####
#plot admission counts (y) against admission month (x)
ICD_9_code_groups[[1]]
ICD_9_code_groups[[27]] %>%
filter(ADM_MON < 13) %>%
ggplot(aes(x=(ADM_MON), y= (n)))+
geom_point(alpha=1)+
coord_cartesian(ylim= c(), xlim= c())+
xlab("Admission month")+
ylab("Admission count")+
ggtitle("The relationship between admission month and admission count", "Infectious and paracitic diseases")
#comparing admission counts in march (and, separately, november/december) with the mean admission counts across the year
ICD_9_code_groups.admission.counts[[1]]$march <- ifelse(ICD_9_code_groups.admission.counts[[1]]$ADM_MON == 3, "1", "0")
ggplot(ICD_9_code_groups.admission.counts[[1]], aes(n)) +
geom_histogram(fill = "white", color = "grey30") +
facet_wrap(~ march)+
scale_x_log10()
t.test(n ~ march, data=ICD_9_code_groups.admission.counts[[1]])
t.test(log(n) ~ march, data=ICD_9_code_groups.admission.counts[[1]]) | /Phase shift.R | no_license | HugoAstley/Light-project | R | false | false | 27,858 | r | #####Phase shift#####
##The existing models, but on region 1 specifically to account for solar noon; sunset; and sunrise time
#make a list of the procedure/diagnoses/primary diagnosis for region 1 specifically
for(i in 1:length(ICD_9_code_groups)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]] <- primary.diagnosis.ICD_9_code_groups.REGION1[[i]] %>% left_join(primary.diagnosis.ICD_9_code_groups.admission.counts[[i]], by= c("SVYEAR", "ADM_MON", "REGION"))}
for(i in 1:length(ICD_9_code_groups)){ICD_9_code_groups.REGION1[[i]] <- ICD_9_code_groups.REGION1[[i]] %>% left_join(ICD_9_code_groups.admission.counts[[i]], by= c("SVYEAR", "ADM_MON", "REGION"))}
for(i in 1:length(procedure_groups)){procedure_groups.REGION1[[i]] <- procedure_groups.REGION1[[i]] %>% left_join(procedure_groups.counts[[i]], by= c("SVYEAR", "ADM_MON", "REGION"))}
save(ICD_9_code_groups.REGION1, file= "ICD_9_code_groups.REGION1.RData")
#The models for each
#####The model for phase: all diagnoses#####
#Convert the catagorical variables into factors
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$RACE <- factor(ICD_9_code_groups.REGION1[[i]]$RACE)}
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$OWNER <- factor(ICD_9_code_groups.REGION1[[i]]$OWNER)}
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$SEX <- factor(ICD_9_code_groups.REGION1[[i]]$SEX)}
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$ESOP1 <- factor(ICD_9_code_groups.REGION1[[i]]$ESOP1)}
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$ADM_TYPE <- factor(ICD_9_code_groups.REGION1[[i]]$ADM_TYPE)}
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$ASOURCE <- factor(ICD_9_code_groups.REGION1[[i]]$ASOURCE)}
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$DISCSTAT <- factor(ICD_9_code_groups.REGION1[[i]]$DISCSTAT)}
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$MARSTAT <- factor(ICD_9_code_groups.REGION1[[i]]$MARSTAT)}
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$MARSTAT <- factor(ICD_9_code_groups.REGION1[[i]]$MARSTAT)}
for(i in 1:length(ICD_9_code_groups.REGION1)){ICD_9_code_groups.REGION1[[i]]$REGION <- factor(ICD_9_code_groups.REGION1[[i]]$REGION)}
#get a list ready to recieve the outputs from the model (correlation coefficients for each disease)
model_output <- list()
#The model. Multiple regression.
for(i in 1:86){model_output_all_diagnoses_REGION1.DOC[[i]] <- lm(ICD_9_code_groups.REGION1[[i]]$DOC ~ ICD_9_code_groups.REGION1[[i]]$n +
ICD_9_code_groups.REGION1[[i]]$AGE +
ICD_9_code_groups.REGION1[[i]]$SEX +
ICD_9_code_groups.REGION1[[i]]$Mean_of_Sunlight_Duration_minutes +
ICD_9_code_groups.REGION1[[i]]$SVYEAR +
ICD_9_code_groups.REGION1[[i]]$BEDSIZE +
ICD_9_code_groups.REGION1[[i]]$RACE+
ICD_9_code_groups.REGION1[[i]]$OWNER+
ICD_9_code_groups.REGION1[[i]]$ESOP1+
ICD_9_code_groups.REGION1[[i]]$MARSTAT+
ICD_9_code_groups.REGION1[[i]]$REGION+
ICD_9_code_groups.REGION1[[i]]$ALLSKY_SFC_LW_DWN+
ICD_9_code_groups.REGION1[[i]]$ALLSKY_SFC_SW_DWN+
ICD_9_code_groups.REGION1[[i]]$ALLSKY_TOA_SW_DWN+
ICD_9_code_groups.REGION1[[i]]$KT+
ICD_9_code_groups.REGION1[[i]]$PRECTOT+
ICD_9_code_groups.REGION1[[i]]$PS+
ICD_9_code_groups.REGION1[[i]]$QV2M+
ICD_9_code_groups.REGION1[[i]]$RH2M+
ICD_9_code_groups.REGION1[[i]]$T2M+
ICD_9_code_groups.REGION1[[i]]$T2MWET+
ICD_9_code_groups.REGION1[[i]]$WS2M+
ICD_9_code_groups.REGION1[[i]]$mean.pollution
ICD_9_code_groups.REGION1[[i]]$mean_solar_noon+
ICD_9_code_groups.REGION1[[i]]$mean_sunrise_time+
ICD_9_code_groups.REGION1[[i]]$mean_sunset_time+
ICD_9_code_groups.REGION1[[i]]$sd_sunset_time+
ICD_9_code_groups.REGION1[[i]]$sd_sunset_time+
ICD_9_code_groups.REGION1[[i]]$sd_sunset_time)}
for(i in 1:86){model_output_all_diagnoses_REGION1.n[[i]] <- lm(ICD_9_code_groups.REGION1[[i]]$DOC ~ ICD_9_code_groups.REGION1[[i]]$n +
ICD_9_code_groups.REGION1[[i]]$AGE +
ICD_9_code_groups.REGION1[[i]]$SEX +
ICD_9_code_groups.REGION1[[i]]$Mean_of_Sunlight_Duration_minutes +
ICD_9_code_groups.REGION1[[i]]$SVYEAR +
ICD_9_code_groups.REGION1[[i]]$BEDSIZE +
ICD_9_code_groups.REGION1[[i]]$RACE+
ICD_9_code_groups.REGION1[[i]]$OWNER+
ICD_9_code_groups.REGION1[[i]]$ESOP1+
ICD_9_code_groups.REGION1[[i]]$MARSTAT+
ICD_9_code_groups.REGION1[[i]]$REGION+
ICD_9_code_groups.REGION1[[i]]$ALLSKY_SFC_LW_DWN+
ICD_9_code_groups.REGION1[[i]]$ALLSKY_SFC_SW_DWN+
ICD_9_code_groups.REGION1[[i]]$ALLSKY_TOA_SW_DWN+
ICD_9_code_groups.REGION1[[i]]$KT+
ICD_9_code_groups.REGION1[[i]]$PRECTOT+
ICD_9_code_groups.REGION1[[i]]$PS+
ICD_9_code_groups.REGION1[[i]]$QV2M+
ICD_9_code_groups.REGION1[[i]]$RH2M+
ICD_9_code_groups.REGION1[[i]]$T2M+
ICD_9_code_groups.REGION1[[i]]$T2MWET+
ICD_9_code_groups.REGION1[[i]]$WS2M+
ICD_9_code_groups.REGION1[[i]]$mean.pollution
ICD_9_code_groups.REGION1[[i]]$sd_sunset_time+
ICD_9_code_groups.REGION1[[i]]$sd_sunset_time+
ICD_9_code_groups.REGION1[[i]]$sd_sunset_time)}
#####The model for phase: primary diagnoses#####
#Convert the catagorical variables into factors
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$RACE <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$RACE)}
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$OWNER <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$OWNER)}
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$SEX <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$SEX)}
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ESOP1 <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ESOP1)}
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ADM_TYPE <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ADM_TYPE)}
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ASOURCE <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ASOURCE)}
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$DISCSTAT <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$DISCSTAT)}
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$MARSTAT <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$MARSTAT)}
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$MARSTAT <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$MARSTAT)}
for(i in 1:length(primary.diagnosis.ICD_9_code_groups.REGION1)){primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$REGION <- factor(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$REGION)}
#get a list ready to recieve the outputs from the model (correlation coefficients for each disease)
model_output <- list()
#The model. Multiple regression.
for(i in 1:86){model_output_primary_diagnosis_REGION1.DOC[[i]] <- lm(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$DOC ~ primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$n +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$AGE +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$SEX +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$Mean_of_Sunlight_Duration_minutes +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$SVYEAR +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$BEDSIZE +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$RACE+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$OWNER+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ESOP1+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$MARSTAT+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$REGION+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ALLSKY_SFC_LW_DWN+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ALLSKY_SFC_SW_DWN+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ALLSKY_TOA_SW_DWN+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$KT+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$PRECTOT+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$PS+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$QV2M+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$RH2M+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$T2M+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$T2M_RANGE+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$T2MWET+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$WS2M+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$mean.pollution+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$mean_solar_noon+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$mean_sunrise_time+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$mean_sunset_time+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$sd_sunset_time+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$sd_sunset_time+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$sd_sunset_time)}
for(i in 1:86){model_output_primary_diagnosis_REGION1.n[[i]] <- lm(primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$DOC ~ primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$n +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$AGE +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$SEX +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$Mean_of_Sunlight_Duration_minutes +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$SVYEAR +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$BEDSIZE +
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$RACE+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$OWNER+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ESOP1+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$MARSTAT+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$REGION+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ALLSKY_SFC_LW_DWN+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ALLSKY_SFC_SW_DWN+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$ALLSKY_TOA_SW_DWN+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$KT+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$PRECTOT+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$PS+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$QV2M+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$RH2M+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$T2M+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$T2M_RANGE+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$T2MWET+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$WS2M+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$mean.pollution+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$mean_solar_noon+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$mean_sunrise_time+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$mean_sunset_time+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$sd_sunset_time+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$sd_sunset_time+
primary.diagnosis.ICD_9_code_groups.REGION1[[i]]$sd_sunset_time)}
#####Model for phase:procedures#####
#Convert the catagorical variables into factors
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$RACE <- factor(procedure_groups.REGION1[[i]]$RACE)}
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$OWNER <- factor(procedure_groups.REGION1[[i]]$OWNER)}
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$SEX <- factor(procedure_groups.REGION1[[i]]$SEX)}
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$ESOP1 <- factor(procedure_groups.REGION1[[i]]$ESOP1)}
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$ADM_TYPE <- factor(procedure_groups.REGION1[[i]]$ADM_TYPE)}
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$ASOURCE <- factor(procedure_groups.REGION1[[i]]$ASOURCE)}
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$DISCSTAT <- factor(procedure_groups.REGION1[[i]]$DISCSTAT)}
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$MARSTAT <- factor(procedure_groups.REGION1[[i]]$MARSTAT)}
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$MARSTAT <- factor(procedure_groups.REGION1[[i]]$MARSTAT)}
for(i in 1:length(procedure_groups.REGION1)){procedure_groups.REGION1[[i]]$REGION <- factor(procedure_groups.REGION1[[i]]$REGION)}
#get a list ready to recieve the outputs from the model (correlation coefficients for each disease)
model_output <- list()
#The model. Multiple regression.
for(i in 1:86){model_output_procedures_REGION1.DOC[[i]] <- lm(procedure_groups.REGION1[[i]]$DOC ~ procedure_groups.REGION1[[i]]$n +
procedure_groups.REGION1[[i]]$AGE +
procedure_groups.REGION1[[i]]$SEX +
procedure_groups.REGION1[[i]]$Mean_of_Sunlight_Duration_minutes +
procedure_groups.REGION1[[i]]$SVYEAR +
procedure_groups.REGION1[[i]]$BEDSIZE +
procedure_groups.REGION1[[i]]$RACE+
procedure_groups.REGION1[[i]]$OWNER+
procedure_groups.REGION1[[i]]$ESOP1+
procedure_groups.REGION1[[i]]$MARSTAT+
procedure_groups.REGION1[[i]]$REGION+
procedure_groups.REGION1[[i]]$ALLSKY_SFC_LW_DWN+
procedure_groups.REGION1[[i]]$ALLSKY_SFC_SW_DWN+
procedure_groups.REGION1[[i]]$ALLSKY_TOA_SW_DWN+
procedure_groups.REGION1[[i]]$KT+
procedure_groups.REGION1[[i]]$PRECTOT+
procedure_groups.REGION1[[i]]$PS+
procedure_groups.REGION1[[i]]$QV2M+
procedure_groups.REGION1[[i]]$RH2M+
procedure_groups.REGION1[[i]]$T2M+
procedure_groups.REGION1[[i]]$T2M_RANGE+
procedure_groups.REGION1[[i]]$T2MWET+
procedure_groups.REGION1[[i]]$WS2M+
procedure_groups.REGION1[[i]]$mean.pollution+
procedure_groups.REGION1[[i]]$mean_solar_noon+
procedure_groups.REGION1[[i]]$mean_sunrise_time+
procedure_groups.REGION1[[i]]$mean_sunset_time+
procedure_groups.REGION1[[i]]$sd_sunset_time+
procedure_groups.REGION1[[i]]$sd_sunset_time+
procedure_groups.REGION1[[i]]$sd_sunset_time)}
for(i in 1:86){model_output_procedures_REGION1.n[[i]] <- lm(procedure_groups.REGION1[[i]]$DOC ~ procedure_groups.REGION1[[i]]$n +
procedure_groups.REGION1[[i]]$AGE +
procedure_groups.REGION1[[i]]$SEX +
procedure_groups.REGION1[[i]]$Mean_of_Sunlight_Duration_minutes +
procedure_groups.REGION1[[i]]$SVYEAR +
procedure_groups.REGION1[[i]]$BEDSIZE +
procedure_groups.REGION1[[i]]$RACE+
procedure_groups.REGION1[[i]]$OWNER+
procedure_groups.REGION1[[i]]$ESOP1+
procedure_groups.REGION1[[i]]$MARSTAT+
procedure_groups.REGION1[[i]]$REGION+
procedure_groups.REGION1[[i]]$ALLSKY_SFC_LW_DWN+
procedure_groups.REGION1[[i]]$ALLSKY_SFC_SW_DWN+
procedure_groups.REGION1[[i]]$ALLSKY_TOA_SW_DWN+
procedure_groups.REGION1[[i]]$KT+
procedure_groups.REGION1[[i]]$PRECTOT+
procedure_groups.REGION1[[i]]$PS+
procedure_groups.REGION1[[i]]$QV2M+
procedure_groups.REGION1[[i]]$RH2M+
procedure_groups.REGION1[[i]]$T2M+
procedure_groups.REGION1[[i]]$T2M_RANGE+
procedure_groups.REGION1[[i]]$T2MWET+
procedure_groups.REGION1[[i]]$WS2M+
procedure_groups.REGION1[[i]]$mean.pollution+
procedure_groups.REGION1[[i]]$mean_solar_noon+
procedure_groups.REGION1[[i]]$mean_sunrise_time+
procedure_groups.REGION1[[i]]$mean_sunset_time+
procedure_groups.REGION1[[i]]$sd_sunset_time+
procedure_groups.REGION1[[i]]$sd_sunset_time+
procedure_groups.REGION1[[i]]$sd_sunset_time)}
#plot it to see whether it is linear
#####Looking at differences in admission during months with a phase shift#####
#plot admission counts (y) against admission month (x)
ICD_9_code_groups[[1]]
ICD_9_code_groups[[27]] %>%
filter(ADM_MON < 13) %>%
ggplot(aes(x=(ADM_MON), y= (n)))+
geom_point(alpha=1)+
coord_cartesian(ylim= c(), xlim= c())+
xlab("Admission month")+
ylab("Admission count")+
ggtitle("The relationship between admission month and admission count", "Infectious and paracitic diseases")
#comparing admission counts in march (and, separately, november/december) with the mean admission counts across the year
ICD_9_code_groups.admission.counts[[1]]$march <- ifelse(ICD_9_code_groups.admission.counts[[1]]$ADM_MON == 3, "1", "0")
ggplot(ICD_9_code_groups.admission.counts[[1]], aes(n)) +
geom_histogram(fill = "white", color = "grey30") +
facet_wrap(~ march)+
scale_x_log10()
t.test(n ~ march, data=ICD_9_code_groups.admission.counts[[1]])
t.test(log(n) ~ march, data=ICD_9_code_groups.admission.counts[[1]]) |
## The pair of functions, makeCacheMatrix and cacheSolve,
## cache the inverse of a matrix
## Function makeCacheMatrix creates a special "matrix" object
## that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(solve) inverse <<- solve
getinverse <- function() inverse
list(set = set, get = get, ## create list with methods for
setinverse = setinverse, ## get and set both the original
getinverse = getinverse) ## matrix and its inverse
}
## Function cacheSolve first checks if the inverse of the special
## "matrix" has already been calculated for the matrix that has not
## been changed and retreives the inverse from the cache. Otherwise,
## computes the inverse of the special "matrix" returned by makeCacheMatrix
cacheSolve <- function(x, ...) {
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse) ## Return cached inverse of 'x'
}
data <- x$get()
inverse <- solve(data,...)
x$setinverse(inverse)
inverse ## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | GorjanaP/ProgrammingAssignment2 | R | false | false | 1,333 | r | ## The pair of functions, makeCacheMatrix and cacheSolve,
## cache the inverse of a matrix
## Function makeCacheMatrix creates a special "matrix" object
## that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(solve) inverse <<- solve
getinverse <- function() inverse
list(set = set, get = get, ## create list with methods for
setinverse = setinverse, ## get and set both the original
getinverse = getinverse) ## matrix and its inverse
}
## Function cacheSolve first checks if the inverse of the special
## "matrix" has already been calculated for the matrix that has not
## been changed and retreives the inverse from the cache. Otherwise,
## computes the inverse of the special "matrix" returned by makeCacheMatrix
cacheSolve <- function(x, ...) {
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse) ## Return cached inverse of 'x'
}
data <- x$get()
inverse <- solve(data,...)
x$setinverse(inverse)
inverse ## Return a matrix that is the inverse of 'x'
}
|
library(qtl2pleio)
library(testthat)
context("testing scan_pvl")
# setup
# read data
iron <- qtl2::read_cross2(system.file("extdata", "iron.zip", package="qtl2"))
# insert pseudomarkers into map
map <- qtl2::insert_pseudomarkers(iron$gmap, step=1)
# calculate genotype probabilities
probs <- qtl2::calc_genoprob(iron, map, error_prob=0.002)
# grab phenotypes and covariates; ensure that covariates have names attribute
pheno <- iron$pheno
# leave-one-chromosome-out kinship matrices
kinship <- qtl2::calc_kinship(probs, "loco")$`1`
# get founder allele probabilites
probs <- qtl2::genoprob_to_alleleprob(probs)$`1`
ac <- matrix(as.numeric(iron$covar$sex == "m", ncol = 1))
colnames(ac) <- "sex"
rownames(ac) <- rownames(probs)
## first scan_pvl call
scan_out <- scan_pvl(probs = probs,
pheno = pheno,
kinship = kinship,
start_snp = 1,
n_snp = 10
)
test_that("scan_pvl returns a dataframe where the last column has numeric entries, all negative", {
expect_true(identical(rep(TRUE, nrow(scan_out)),
as.vector(scan_out[ , ncol(scan_out)] < 0)))
expect_true(is.data.frame(scan_out))
})
so_cov <- scan_pvl(probs = probs,
pheno = pheno,
addcovar = ac,
kinship = kinship,
start_snp = 1,
n_snp = 10
)
foo <- scan_pvl(probs = probs,
pheno = pheno,
kinship = NULL,
start_snp = 1,
n_snp = 10
)
test_that("scan_pvl handles missing values in covariates appropriately", {
expect_equal(sum(!is.na(scan_out)), prod(dim(scan_out)))
expect_equal(sum(!is.na(so_cov)), prod(dim(so_cov)))
})
test_that("scan_pvl handles kinship = NULL", {
expect_true(is.data.frame(foo))
})
| /tests/testthat/test-scan_pvl.R | permissive | kbroman/qtl2pleio | R | false | false | 1,851 | r | library(qtl2pleio)
library(testthat)
context("testing scan_pvl")
# setup
# read data
iron <- qtl2::read_cross2(system.file("extdata", "iron.zip", package="qtl2"))
# insert pseudomarkers into map
map <- qtl2::insert_pseudomarkers(iron$gmap, step=1)
# calculate genotype probabilities
probs <- qtl2::calc_genoprob(iron, map, error_prob=0.002)
# grab phenotypes and covariates; ensure that covariates have names attribute
pheno <- iron$pheno
# leave-one-chromosome-out kinship matrices
kinship <- qtl2::calc_kinship(probs, "loco")$`1`
# get founder allele probabilites
probs <- qtl2::genoprob_to_alleleprob(probs)$`1`
ac <- matrix(as.numeric(iron$covar$sex == "m", ncol = 1))
colnames(ac) <- "sex"
rownames(ac) <- rownames(probs)
## first scan_pvl call
scan_out <- scan_pvl(probs = probs,
pheno = pheno,
kinship = kinship,
start_snp = 1,
n_snp = 10
)
test_that("scan_pvl returns a dataframe where the last column has numeric entries, all negative", {
expect_true(identical(rep(TRUE, nrow(scan_out)),
as.vector(scan_out[ , ncol(scan_out)] < 0)))
expect_true(is.data.frame(scan_out))
})
so_cov <- scan_pvl(probs = probs,
pheno = pheno,
addcovar = ac,
kinship = kinship,
start_snp = 1,
n_snp = 10
)
foo <- scan_pvl(probs = probs,
pheno = pheno,
kinship = NULL,
start_snp = 1,
n_snp = 10
)
test_that("scan_pvl handles missing values in covariates appropriately", {
expect_equal(sum(!is.na(scan_out)), prod(dim(scan_out)))
expect_equal(sum(!is.na(so_cov)), prod(dim(so_cov)))
})
test_that("scan_pvl handles kinship = NULL", {
expect_true(is.data.frame(foo))
})
|
# This R package and dependencies will need to be installed:
library(devtools)
install_github("cmfield/phylloR")
library(phylloR)
# Import and format all of the data
expt53raw <- read.delim("expt53/otutab_final_classified.txt",stringsAsFactors=F,header=T,row.names=1,comment.char="")
expt61raw <- read.delim("expt61/otutab_final_classified.txt",stringsAsFactors=F,header=T,row.names=1,comment.char="")
expt62raw <- read.delim("expt62/otutab_final_classified.txt",stringsAsFactors=F,header=T,row.names=1,comment.char="")
colnames(expt53raw) <- sub("X","E53S",colnames(expt53raw))
colnames(expt61raw) <- sub("X","E61S",colnames(expt61raw))
colnames(expt62raw) <- sub("X","E62S",colnames(expt62raw))
meta53raw <- read.csv("metadata53.csv",sep=";",header=T,row.names=1)
meta61raw <- read.csv("metadata61.csv",sep=";",header=T,row.names=1)
meta62raw <- read.csv("metadata62.csv",sep=";",header=T,row.names=1)
rownames(meta53raw) <- paste("E53S",rownames(meta53raw),sep="")
rownames(meta61raw) <- paste("E61S",rownames(meta61raw),sep="")
rownames(meta62raw) <- paste("E62S",rownames(meta62raw),sep="")
# The data frame will be row-wise, ie: each row is an experiment, each column a phylogroup
# Remove the inoculum and axenic data, combine unclassified otus
expt53 <- expt53raw[,which(meta53raw$Treatment!="Ax" & meta53raw$Spray!="Inoc")]
expt53 <- expt53[order(rownames(expt53)),]
expt53 <- rbind(expt53[!grepl("Unclass",rownames(expt53)),],Unclassified=apply(expt53[grepl("Unclass",rownames(expt53)),],2,sum))
meta53 <- meta53raw[which(meta53raw$Treatment!="Ax" & meta53raw$Spray!="Inoc"),]
meta53 <- cbind(experiment=53,meta53[,1:4])
colnames(meta53) <- c("experiment","initial","spray","repeat","time")
meta53$initial <- factor(meta53$initial,c("ALL","Ax","No A","No B","No G","No P"))
meta53$spray <- factor(meta53$spray,c("U","Mg","A","B","G","P"))
expt61 <- expt61raw[,which(meta61raw$Treatment!="Axenic" & meta61raw$Time!="t0")]
expt61 <- expt61[order(rownames(expt61)),]
expt61 <- rbind(expt61[!grepl("Unclass",rownames(expt61)),],Unclassified=apply(expt61[grepl("Unclass",rownames(expt61)),],2,sum))
expt61 <- expt61[rownames(expt61)!="Leaf281",]
meta61 <- meta61raw[which(meta61raw$Treatment!="Axenic" & meta61raw$Time!="t0"),]
meta61 <- cbind(experiment=61,meta61[,1],"U",meta61[,2:3])
colnames(meta61) <- c("experiment","initial","spray","repeat","time")
levels(meta61$initial) <- sub("-","Leaf",levels(meta61$initial))
meta61$initial <- factor(meta61$initial,c("ALL","Ax",levels(meta61$initial)[!levels(meta61$initial)%in%c("ALL","Ax")]))
meta61$initial <- factor(meta61$initial,c("ALL","Ax",levels(meta61$initial)[grepl("Leaf",levels(meta61$initial))][order(match(levels(meta61$initial)[grepl("Leaf",levels(meta61$initial))],rownames(leafTaxonomy)))]))
meta61$initial <- droplevels(meta61$initial)
expt62 <- expt62raw[,which(meta62raw$Treatment!="Ax" & meta62raw$Time!="t0")]
expt62 <- expt62[order(rownames(expt62)),]
expt62 <- rbind(expt62[!grepl("Unclass",rownames(expt62)),],Unclassified=apply(expt62[grepl("Unclass",rownames(expt62)),],2,sum))
meta62 <- meta62raw[which(meta62raw$Treatment!="Ax" & meta62raw$Time!="t0"),]
meta62 <- cbind(experiment=62,meta62[,1],"U",meta62[,2:3])
colnames(meta62) <- c("experiment","initial","spray","repeat","time")
levels(meta62$initial) <- sub("-","Leaf",levels(meta62$initial))
meta62$initial <- factor(meta62$initial,c("ALL","Ax",levels(meta62$initial)[!levels(meta62$initial)%in%c("ALL","Ax")]))
meta62$initial <- factor(meta62$initial,c("ALL","Ax",levels(meta62$initial)[grepl("Leaf",levels(meta62$initial))][order(match(levels(meta62$initial)[grepl("Leaf",levels(meta62$initial))],rownames(leafTaxonomy)))]))
meta62$initial <- droplevels(meta61$initial)
# Make datasets compatible with analysis functions
#ds51 <- list(counts=expt51,meta=meta51)
ds53 <- list(counts=expt53,meta=meta53)
ds61 <- list(counts=expt61,meta=meta61)
ds62 <- list(counts=expt62,meta=meta62)
ds6c <- list(counts=cbind(expt61,expt62),meta=rbind(meta61,meta62))
# Make control-only datasets and export
control53 <- expt53[,meta53$initial=="ALL"]
control61 <- expt61[,meta61$initial=="ALL"]
control62 <- expt62[,meta62$initial=="ALL"]
write.table(control53,"network/control53.tsv",sep="\t")
write.table(control61,"network/control61.tsv",sep="\t")
write.table(control62,"network/control62.tsv",sep="\t")
cat("Data imported\n")
zcols = c("#313695","#4575b4","#74add1","#abd9e9","#e0f3f8","#ffffbf","#fee090","#fdae61","#f46d43","#d73027","#a50026")
# Make handy strain sets
strains <- rownames(ds53$counts)
strains <- strains[-length(strains)]
subTax <- leafTaxonomy[strains,]
sNames <- subTax$Name
alpha <- rownames(subTax)[subTax$Class=="Alphaproteobacteria"]
aNames <- subTax[subTax$Class=="Alphaproteobacteria",]$Name
beta <- rownames(subTax)[subTax$Class=="Betaproteobacteria"]
bNames <- subTax[subTax$Class=="Betaproteobacteria",]$Name
gamma <- rownames(subTax)[subTax$Class=="Gammaproteobacteria"]
gNames <- subTax[subTax$Class=="Gammaproteobacteria",]$Name
proteo <- rownames(subTax)[subTax$Phylum=="Proteobacteria"]
pNames <- subTax[subTax$Phylum=="Proteobacteria",]$Name
notalpha <- rownames(subTax)[subTax$Class!="Alphaproteobacteria"]
naNames <- subTax[subTax$Class!="Alphaproteobacteria",]$Name
notbeta <- rownames(subTax)[subTax$Class!="Betaproteobacteria"]
nbNames <- subTax[subTax$Class!="Betaproteobacteria",]$Name
notgamma <- rownames(subTax)[subTax$Class!="Gammaproteobacteria"]
ngNames <- subTax[subTax$Class!="Gammaproteobacteria",]$Name
notproteo <- rownames(subTax)[subTax$Phylum!="Proteobacteria"]
npNames <- subTax[subTax$Phylum!="Proteobacteria",]$Name
if(!exists("nocalc")){
cds53 <- list(conspray=makeCDS(ds53,include=list(initial="ALL"),foi="spray",title="Effect of Mock Spray\n(all strains)",legend=c("Unsprayed","Mock Sprayed")),
contime=makeCDS(ds53,include=list(initial="ALL"),foi="time",title="Effect of Time\n(all strains)",legend=c("Timepoint 1","Timepoint 2")),
noalpha=makeCDS(ds53,include=list(initial=c("No A","ALL"),spray=c("U","Mg")),foi="initial",title="",legend=c("Control","Absence")),
alphaback=makeCDS(ds53,include=list(initial="No A",time="t2"),foi="spray",title="",legend=c("Mock Spray","Reintroduction")),
latealpha=makeCDS(ds53,include=list(initial=c("No A","ALL"),spray=c("U","Mg","A"),time="t2"),exclude=list(initial="No A",spray="Mg"),foi="initial",title="",legend=c("Control","Late Arrival")),
nobeta=makeCDS(ds53,include=list(initial=c("No B","ALL"),spray=c("U","Mg")),foi="initial",title="",legend=c("Control","Absence")),
betaback=makeCDS(ds53,include=list(initial="No B",time="t2"),foi="spray",title="",legend=c("Mock Spray","Reintroduction")),
latebeta=makeCDS(ds53,include=list(initial=c("No B","ALL"),spray=c("U","Mg","B"),time="t2"),exclude=list(initial="No B",spray="Mg"),foi="initial",title="",legend=c("Control","Late Arrival")),
nogamma=makeCDS(ds53,include=list(initial=c("No G","ALL"),spray=c("U","Mg")),foi="initial",title="",legend=c("Control","Absence")),
gammaback=makeCDS(ds53,include=list(initial="No G",time="t2"),foi="spray",title="",legend=c("Mock Spray","Reintroduction")),
lategamma=makeCDS(ds53,include=list(initial=c("No G","ALL"),spray=c("U","Mg","G"),time="t2"),exclude=list(initial="No G",spray="Mg"),foi="initial",title="",legend=c("Control","Late Arrival")),
noproteo=makeCDS(ds53,include=list(initial=c("No P","ALL"),spray=c("U","Mg")),foi="initial",title="",legend=c("Control","Absence")),
proteoback=makeCDS(ds53,include=list(initial="No P",time="t2"),foi="spray",title="",legend=c("Mock Spray","Reintroduction")),
lateproteo=makeCDS(ds53,include=list(initial=c("No P","ALL"),spray=c("U","Mg","P"),time="t2"),exclude=list(initial="No P",spray="Mg"),foi="initial",title="",legend=c("Control","Late Arrival"))
)
cds6 <- list()
cds6[["batch"]] <- makeCDS(ds6c,include=list(initial="ALL"),foi="experiment",title="R1 vs. R2",legend=c("R1","R2"))
ds633 <- ds6c
ds633$counts <- ds633$counts[,(ds633$meta$experiment=="62" & ds633$meta$initial=="Leaf33") | (ds633$meta$experiment=="61" & ds633$meta$initial=="ALL")]
ds633$meta <- ds633$meta[(ds633$meta$experiment=="62" & ds633$meta$initial=="Leaf33") | (ds633$meta$experiment=="61" & ds633$meta$initial=="ALL"),]
cds6[["dropout"]] <- makeCDS(ds633,foi="experiment",title="R1 Control vs.\nR2 L-33 Sphingomonas Drop-out",legend=c("R1 Control","R2 Drop-out"))
for(x in levels(ds6c$meta$initial)[-1]){
cds6[[paste(x,1,sep="")]] <- makeCDS(ds61,include=list(initial=c(x,"ALL")),foi="initial",title="")
cds6[[paste(x,2,sep="")]] <- makeCDS(ds62,include=list(initial=c(x,"ALL")),foi="initial",title="")
cds6[[paste(x,"c",sep="")]] <- makeCDS(ds6c,include=list(initial=c(x,"ALL")),foi="initial",ftc="experiment",title="")
}
}
pm = 10000
cf = 0.01
bicols <- c("#214478","#A1D662")
adn53 <- list()
cairo_pdf("figures/class_dropout_control_spraytime.pdf",width=16,height=7,family="Arial")
par(mfrow=c(1,2),cex=1.5,cex.lab=1,cex.sub=1.2)
adn53$conspray <- plotPCA(cds53$conspray,soi=strains,perm=pm,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,showArrows=F)
adn53$contime <- plotPCA(cds53$contime,soi=strains,perm=pm,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,showArrows=F)
dev.off()
cairo_pdf("figures/class_dropout_pca.pdf",width=28,height=35,family="Arial")
par(mfrow=c(5,4),cex=1.5,oma=c(0,6,6,0),cex.lab=1,cex.sub=1.2)
adn53$noalpha <- plotPCA(cds53$noalpha,soi=notalpha,perm=pm,cutoff=cf,rowLabs=naNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$nobeta <- plotPCA(cds53$nobeta,soi=notbeta,perm=pm,cutoff=cf,rowLabs=nbNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$nogamma <- plotPCA(cds53$nogamma,soi=notgamma,perm=pm,cutoff=cf,rowLabs=ngNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$noproteo <- plotPCA(cds53$noproteo,soi=notproteo,perm=pm,cutoff=cf,rowLabs=npNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$alphaback <- plotPCA(cds53$alphaback,soi=notalpha,perm=pm,cutoff=cf,rowLabs=naNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$betaback <- plotPCA(cds53$betaback,soi=notbeta,perm=pm,cutoff=cf,rowLabs=nbNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$gammaback <- plotPCA(cds53$gammaback,soi=notgamma,perm=pm,cutoff=cf,rowLabs=ngNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$proteoback <- plotPCA(cds53$proteoback,soi=notproteo,perm=pm,cutoff=cf,rowLabs=npNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$latealphanot <- plotPCA(cds53$latealpha,soi=notalpha,perm=pm,cutoff=cf,rowLabs=naNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$latebetanot <- plotPCA(cds53$latebeta,soi=notbeta,perm=pm,cutoff=cf,rowLabs=nbNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$lategammanot <- plotPCA(cds53$lategamma,soi=notgamma,perm=pm,cutoff=cf,rowLabs=ngNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$lateproteonot <- plotPCA(cds53$lateproteo,soi=notproteo,perm=pm,cutoff=cf,rowLabs=npNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$latealpha <- plotPCA(cds53$latealpha,soi=alpha,perm=pm,cutoff=cf,rowLabs=aNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$latebeta <- plotPCA(cds53$latebeta,soi=beta,perm=pm,cutoff=cf,rowLabs=bNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$lategamma <- plotPCA(cds53$lategamma,soi=gamma,perm=pm,cutoff=cf,rowLabs=gNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$lateproteo <- plotPCA(cds53$lateproteo,soi=proteo,perm=pm,cutoff=cf,rowLabs=pNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$latealphaall <- plotPCA(cds53$latealpha,soi=strains,perm=pm,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$latebetaall <- plotPCA(cds53$latebeta,soi=strains,perm=pm,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$lategammaall <- plotPCA(cds53$lategamma,soi=strains,perm=pm,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$lateproteoall <- plotPCA(cds53$lateproteo,soi=strains,perm=pm,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
mtext("Drop-Out Condition",side=3,line=3,at=0.515,outer=T,cex=3)
mtext("Alphaproteobacteria",side=3,line=0,at=0.14,outer=T,cex=3)
mtext("Betaproteobacteria",side=3,line=0,at=0.39,outer=T,cex=3)
mtext("Gammaproteobacteria",side=3,line=0,at=0.64,outer=T,cex=3)
mtext("Proteobacteria",side=3,line=0,at=0.89,outer=T,cex=3)
mtext("Group Absent vs. Control (a)",side=2,line=1,at=0.93,outer=T,cex=3)
mtext("Late Arrival vs. Mock (b)",side=2,line=1,at=0.73,outer=T,cex=3)
mtext("Late Arrival vs. Control (c)",side=2,line=1,at=0.53,outer=T,cex=3)
mtext("Late Arrival vs. Control (c)",side=2,line=1,at=0.33,outer=T,cex=3)
mtext("Late Arrival vs. Control (c)",side=2,line=1,at=0.13,outer=T,cex=3)
mtext("Effect on Rest of the Community",side=2,line=4,at=0.73,outer=T,cex=3)
mtext("Effect on Invading Group",side=2,line=4,at=0.33,outer=T,cex=3)
mtext("Effect on Whole Community",side=2,line=4,at=0.13,outer=T,cex=3)
mtext(bquote(underline(" ")),side=2,line=3,at=0.73,outer=T,cex=3)
dev.off()
cairo_pdf("figures/class_dropout_control_spraytime_bar.pdf",width=16,height=7,family="Arial")
par(mfrow=c(1,2),cex=1)
plotCommunityChanges(cds53$contime,soi=strains,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$conspray,soi=strains,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,nBars=18)
dev.off()
cairo_pdf("figures/class_dropout_bar.pdf",width=28,height=21,family="Arial")
par(mfrow=c(3,4),cex=1,oma=c(0,6,4,0))
plotCommunityChanges(cds53$noalpha,soi=notalpha,cutoff=cf,rowLabs=naNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$nobeta,soi=notbeta,cutoff=cf,rowLabs=nbNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$nogamma,soi=notgamma,cutoff=cf,rowLabs=ngNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$noproteo,soi=notproteo,cutoff=cf,rowLabs=npNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$alphaback,soi=notalpha,cutoff=cf,rowLabs=naNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$betaback,soi=notbeta,cutoff=cf,rowLabs=nbNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$gammaback,soi=notgamma,cutoff=cf,rowLabs=ngNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$proteoback,soi=notproteo,cutoff=cf,rowLabs=npNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$latealpha,soi=strains,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$latebeta,soi=strains,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$lategamma,soi=strains,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$lateproteo,soi=strains,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,nBars=18)
mtext("Alphaproteobacteria",side=3,line=0,at=0.14,outer=T,cex=3)
mtext("Betaproteobacteria",side=3,line=0,at=0.39,outer=T,cex=3)
mtext("Gammaproteobacteria",side=3,line=0,at=0.64,outer=T,cex=3)
mtext("Proteobacteria",side=3,line=0,at=0.89,outer=T,cex=3)
mtext("Group Absent vs. Control (a)",side=2,line=1,at=0.853,outer=T,cex=3)
mtext("Late Arrival vs. Mock (b)",side=2,line=1,at=0.52,outer=T,cex=3)
mtext("Late Arrival vs. Control (c)",side=2,line=1,at=0.187,outer=T,cex=3)
mtext("Effect on Rest of the Community",side=2,line=4,at=0.853,outer=T,cex=3)
mtext("Effect on Invading Group",side=2,line=4,at=0.52,outer=T,cex=3)
mtext("Effect on Whole Community",side=2,line=4,at=0.187,outer=T,cex=3)
dev.off()
adn6 <- list()
cairo_pdf("figures/single_dropout_batch_pca.pdf",width=14,height=7,family="Arial")
par(mfrow=c(1,2),cex=1.5,cex.lab=1,cex.sub=1.2)
adn6[["batch"]] <- plotPCA(cds6[["batch"]],soi=strains,perm=pm,cutoff=cf,rowLabs=sNames,cols=bicols,showArrows=F)
adn6[["dropout"]] <- plotPCA(cds6[["dropout"]],soi=strains,perm=pm,cutoff=cf,rowLabs=sNames,cols=bicols,showArrows=F)
dev.off()
cairo_pdf("figures/single_dropout_pca.pdf",width=21,height=175,family="Arial")
par(mfrow=c(25,3),cex=1.5,oma=c(0,8,6,0),cex.lab=1,cex.sub=1.2)
for(x in levels(ds6c$meta$initial)[-1]){
adn6[[paste(x,1,sep="")]] <- plotPCA(cds6[[paste(x,1,sep="")]],soi=strains[strains!=x],perm=pm,cutoff=cf,rowLabs=sNames[strains!=x],subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn6[[paste(x,2,sep="")]] <- plotPCA(cds6[[paste(x,2,sep="")]],soi=strains[strains!=x],perm=pm,cutoff=cf,rowLabs=sNames[strains!=x],subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn6[[paste(x,"c",sep="")]] <- plotPCA(cds6[[paste(x,"c",sep="")]],soi=strains[strains!=x],perm=pm,cutoff=cf,rowLabs=sNames[strains!=x],subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
mtext(leafTaxonomy[x,]$Name,side=2,line=1,at=1.065-which(levels(ds6c$meta$initial[-1])==x)/25,outer=T,cex=3)
}
mtext("Replicate 1",side=3,line=0,at=0.187,outer=T,cex=3)
mtext("Replicate 2",side=3,line=0,at=0.52,outer=T,cex=3)
mtext("Combined",side=3,line=0,at=0.853,outer=T,cex=3)
mtext("Drop-Out Condition",side=2,line=5,at=0.5,outer=T,cex=4)
mtext("Experiment",side=3,line=4,at=0.52,outer=T,cex=4)
dev.off()
cairo_pdf("figures/single_dropout_highlights_pca.pdf",width=28,height=7,family="Arial")
par(mfrow=c(1,4),cex=1,cex.main=1.5,cex.sub=1.5)
for(x in c("Leaf203","Leaf231","Leaf233","Leaf262")){
adn6[[paste(x,"c",sep="")]] <- plotPCA(cds6[[paste(x,"c",sep="")]],soi=strains[strains!=x],perm=pm,cutoff=cf,rowLabs=sNames[strains!=x],cols=bicols,showLegend=F,showArrows=F,subtitle=leafTaxonomy[x,]$Name)
}
dev.off()
cairo_pdf("figures/single_dropout_batch_bar.pdf",width=14,height=7,family="Arial")
par(mfrow=c(1,2),cex=1)
plotCommunityChanges(cds6[["batch"]],soi=strains,cutoff=cf,rowLabs=sNames,cols=bicols,nBars=12)
plotCommunityChanges(cds6[["dropout"]],soi=strains,cutoff=cf,rowLabs=sNames,cols=bicols,nBars=12)
dev.off()
cairo_pdf("figures/single_dropout_bar.pdf",width=42,height=175,family="Arial")
par(mfrow=c(25,3),cex=1,oma=c(0,4,4,0))
for(x in levels(ds6c$meta$initial)[-1]){
plotCommunityChanges(cds6[[paste(x,1,sep="")]],soi=strains[strains!=sub("no","Leaf",x)],cutoff=cf,rowLabs=sNames[strains!=sub("no","Leaf",x)],subtitle="",cols=bicols,nBars=34)
plotCommunityChanges(cds6[[paste(x,2,sep="")]],soi=strains[strains!=sub("no","Leaf",x)],cutoff=cf,rowLabs=sNames[strains!=sub("no","Leaf",x)],subtitle="",cols=bicols,nBars=34)
plotCommunityChanges(cds6[[paste(x,"c",sep="")]],soi=strains[strains!=sub("no","Leaf",x)],cutoff=cf,rowLabs=sNames[strains!=sub("no","Leaf",x)],subtitle="",cols=bicols,nBars=34)
mtext(leafTaxonomy[x,]$Name,side=2,line=1,at=1.065-which(levels(ds6c$meta$initial[-1])==x)/25,outer=T,cex=3)
}
mtext("Replicate 1",side=3,line=0,at=0.187,outer=T,cex=3)
mtext("Replicate 2",side=3,line=0,at=0.52,outer=T,cex=3)
mtext("Combined",side=3,line=0,at=0.853,outer=T,cex=3)
dev.off()
# Summary plot for expt53
library(corrplot)
library(plotrix)
library(circlize)
effects53 <- unlist(lapply(adn53,function(x) x$stats$aov.tab$R2[1]))
pvalues53 <- unlist(lapply(adn53,function(x) x$stats$aov.tab$Pr[1]))
e <- t(matrix(effects53[c(-1,-2)],4))
p <- t(matrix(pvalues53[c(-1,-2)],4))
rownames(e) <- c("Group Absent\nvs. Control (a)","Late Arrival\nvs. Mock (b)","Late Arrival\nvs. Control (c)","Late Arrival\nvs. Control (c)","Late Arrival\nvs. Control (c)")
colnames(e) <- c("Alphaproteobacteria","Betaproteobacteria","Gammaproteobacteria","Proteobacteria")
cairo_pdf("figures/class_dropout_summary.pdf",width=6,height=6,family="Arial")
par(cex=1,xpd=T)
corrplot(e,is.corr=F,cl.lim=c(0,0.7),cl.ratio=0.4,p.mat=p,insig="label_sig",sig=c(0.0001,0.001,0.01,0.05),tl.col=1,pch.cex=1,tl.srt=45,mar=c(0,10,2,2),col=zcols[6:1])
text(-4,4.1,"Effect on Rest\nof the Community",font=2)
text(-4,2.1,"Effect on\nInvading Group",font=2)
text(-4,1.1,"Effect on\nWhole Community",font=2)
lines(c(-2.1,-2.1),c(2.5,5.5))
text(2.5,8.5,"Drop-Out Condition",cex=1,font=2)
text(6,3,"Effect Size",cex=1,srt=-90)
dev.off()
# Summary plot 2 for expt53
library(ape)
library(apextra)
tree <- read.tree("../leafGenomes/16s_full_align.phy_phyml_tree.txt")
tree <- drop.tip(tree,tree$tip.label[!tree$tip.label%in%strains])
tree <- root(tree,node=69,resolve.root=T)
tree <- as.ultrametric(tree)
strainOrder <- tree$tip.label[tipOrder(tree)]
strainCols <- subTax[,'Color',F]
strainCols <- strainCols[strainOrder,]
fc53 <- do.call(cbind,lapply(cds53[names(cds53)[-1:-2]],function(x) x$results[,"log2FoldChange"]))
fc53[is.na(fc53)] <- 0
pv53 <- do.call(cbind,lapply(cds53[names(cds53)[-1:-2]],function(x) x$results[,"padj"]))
pv53[is.na(pv53)] <- 1
rownames(fc53) <- rownames(cds53[[1]]$results)
colnames(fc53) <- rep(c("(a)","(b)","(c)"),4)
fc53 <- fc53[1:62,]
pv53 <- pv53[1:62,]
mask <- fc53==fc53
mask[alpha,1:2] <- FALSE
mask[beta,4:5] <- FALSE
mask[gamma,7:8] <- FALSE
mask[proteo,10:11] <- FALSE
overlay <- matrix(cut(pv53,c(0,0.0001,0.001,0.01,0.05,1),c("****","***","**","*","")),nrow(pv53))
cairo_pdf("figures/class_dropout_details.pdf",width=10,height=15,family="Arial")
par(cex=1)
treatmap(tree,fc53,mask=mask,overlay=overlay,tip.labels=leafTaxonomy[tree$tip.label,]$Name,tip.colors=strainCols,aspect.ratio=0.2,tip.label.width=8,z.cols=zcols)
dev.off()
# Summary plot for expt6
effects6 <- unlist(lapply(adn6,function(x) x$stats$aov.tab$R2[1]))
pvalues6 <- unlist(lapply(adn6,function(x) x$stats$aov.tab$Pr[1]))
e <- matrix(effects6[-c(1,2)],3)
rownames(e) <- c("Replicate 1","Replicate 2","Combined")
shortNames <- levels(ds6c$meta$initial)[-1]
colnames(e) <- leafTaxonomy[shortNames,]$Name
p <- matrix(pvalues6[-c(1,2)],3)
e <- e[,order(match(shortNames,tree$tip.label))]
p <- p[,order(match(shortNames,tree$tip.label))]
shortNames <- shortNames[order(match(shortNames,tree$tip.label))]
subtree <- keep.tip(tree,shortNames)
cairo_pdf("figures/single_dropout_summary.pdf",width=14,height=7,family="Arial")
par(cex=1,xpd=T)
rownames(e) <- rep("",3)
corrplot(e,is.corr=F,cl.lim=c(0,0.25),cl.ratio=0.1,tl.col=c(leafTaxonomy[shortNames,]$Color,"black"),p.mat=p,insig="label_sig",sig=c(0.0001,0.001,0.01,0.05),pch.cex=1,mar=c(0,8,0,2),col=zcols[6:1])
draw.phylo(1,8.5,25,11,subtree,direction="d")
text(0.25,1,"Combined",pos=2)
text(0.25,2,"Replicate 2",pos=2)
text(0.25,3,"Replicate 1",pos=2)
text(12.5,11.5,"Drop-Out Condition",cex=1.5)
text(28,2,"Effect Size",cex=1,srt=-90)
legend(0.5,3.6,legend=names(phylumColors[-4]),fill=phylumColors[-4],xjust=1,yjust=0,cex=0.8)
dev.off()
# Summary plot 2 for expt6 combined
cds6c <- cds6[-c(1,2)][grepl("c",names(cds6)[-c(1,2)])]
fc6c <- do.call(cbind,lapply(cds6c,function(x) x$results$log2FoldChange))
fc6c[is.na(fc6c)] <- 0
pv6c <- do.call(cbind,lapply(cds6c,function(x) x$results$padj))
pv6c[is.na(pv6c)] <- 1
rownames(fc6c) <- rownames(cds6c[[1]]$results)
colnames(fc6c) <- sub("c","",colnames(fc6c))
fc6c <- fc6c[1:62,]
pv6c <- pv6c[1:62,]
mask <- fc6c==fc6c
for(col in colnames(fc6c)){
mask[col,col] <- FALSE
}
overlay <- matrix(cut(pv6c,c(0,0.0001,0.001,0.01,0.05,1),c("****","***","**","*","")),nrow(pv6c))
hc <- as.phylo(hclust(dist(fc6c)))
hc <- reorderTips(hc)
cairo_pdf("figures/single_dropout_details_phylo.pdf",width=15,height=20,family="Arial")
par(cex=1)
treatmap(tree,fc6c,mask=mask,overlay=overlay,tip.labels=leafTaxonomy[tree$tip.label,]$Name,tip.colors=leafTaxonomy[tree$tip.label,]$Color,tip.label.width=10,mat.label.height=24,mat.labels=leafTaxonomy[colnames(fc6c),]$Name,mat.label.color=leafTaxonomy[colnames(fc6c),]$Color,mat.hclust=T,z.cols=zcols)
dev.off()
cairo_pdf("figures/single_dropout_details_hclust.pdf",width=15,height=20,family="Arial")
par(cex=1)
treatmap(hc,fc6c,mask=mask,overlay=overlay,tip.labels=leafTaxonomy[hc$tip.label,]$Name,tip.colors=leafTaxonomy[hc$tip.label,]$Color,tip.label.width=10,mat.label.height=24,mat.labels=leafTaxonomy[colnames(fc6c),]$Name,mat.label.color=leafTaxonomy[colnames(fc6c),]$Color,mat.hclust=T,z.cols=zcols)
dev.off()
# Summary plot with 6-1 and 6-2 separate
cds61 <- cds6[grepl("1$",names(cds6))]
fc61 <- do.call(cbind,lapply(cds61,function(x) x$results$log2FoldChange))
fc61[is.na(fc61)] <- 0
pv61 <- do.call(cbind,lapply(cds61,function(x) x$results$padj))
pv61[is.na(pv61)] <- 1
shortNames1 <- sub("1$","",sub("no","Leaf",names(cds61)))
rownames(fc61) <- rownames(cds61[[1]]$results)
colnames(fc61) <- paste("R1",leafTaxonomy[shortNames1,]$Name,sep="-")
fc61 <- fc61[1:62,]
pv61 <- pv61[1:62,]
cds62 <- cds6[grepl("2$",names(cds6))]
fc62 <- do.call(cbind,lapply(cds62,function(x) x$results$log2FoldChange))
fc62[is.na(fc62)] <- 0
pv62 <- do.call(cbind,lapply(cds62,function(x) x$results$padj))
pv62[is.na(pv62)] <- 1
shortNames2 <- sub("2$","",sub("no","Leaf",names(cds62)))
rownames(fc62) <- rownames(cds62[[1]]$results)
colnames(fc62) <- paste("R2",leafTaxonomy[shortNames2,]$Name,sep="-")
fc62 <- fc62[1:62,]
pv62 <- pv62[1:62,]
fcc <- cbind(fc61,fc62)
pvc <- cbind(pv61,pv62)
shortNames <- c(shortNames1,shortNames2)
mask <- fcc==fcc
for(i in 1:(length(shortNames))){
mask[shortNames[i],i] <- FALSE
}
overlay <- matrix(cut(pvc,c(0,0.0001,0.001,0.01,0.05,1),c("****","***","**","*","")),nrow(pvc))
cairo_pdf("figures/single_dropout_separate_details_phylo.pdf",width=20,height=20,family="Arial")
par(cex=1)
treatmap(tree,fcc,mask=mask,overlay=overlay,tip.labels=leafTaxonomy[tree$tip.label,]$Name,tip.colors=leafTaxonomy[tree$tip.label,]$Color,tip.label.width=10,mat.label.height=24,mat.labels=colnames(fcc),mat.label.color=leafTaxonomy[shortNames,]$Color,mat.col.order=order(match(shortNames,tree$tip.label),decreasing=T),z.cols=zcols)
dev.off()
# Community plots
control53 <- cds53$conspray$counts
control53 <- control53[rownames(control53)!="Unclassified",]
control61 <- ds61$counts
control61 <- control61[,ds61$meta$initial=="ALL"]
control61 <- control61[rownames(control61)!="Unclassified",]
control62 <- ds62$counts
control62 <- control62[,ds62$meta$initial=="ALL"]
control62 <- control62[rownames(control62)!="Unclassified",]
cairo_pdf("figures/class_dropout_control_community.pdf",width=20,height=10,family="Arial")
par(cex=1)
comm53 <- plotCommunity(control53,type="violinswarm",xlabels=leafTaxonomy[rownames(control53),]$Name,xcols=leafTaxonomy[rownames(control53),]$Color)
dev.off()
cairo_pdf("figures/single_dropout_control_community.pdf",width=20,height=10,family="Arial")
par(cex=1)
comm61 <- plotCommunity(control61,type="violinswarm",xlabels=leafTaxonomy[rownames(control61),]$Name,xcols=leafTaxonomy[rownames(control61),]$Color)
comm62 <- plotCommunity(control62,type="violinswarm",xlabels=leafTaxonomy[rownames(control62),]$Name,xcols=leafTaxonomy[rownames(control61),]$Color)
dev.off()
# PCA of controls
cds5361 <- makeCDS(counts=cbind(control53,control61),meta=rbind(cds53$conspray$meta,ds61$meta[ds61$meta$initial=="ALL",]),foi="experiment",legend=c("Class Droupout","Single Dropout R1"))
cds5362 <- makeCDS(counts=cbind(control53,control62),meta=rbind(cds53$conspray$meta,ds62$meta[ds62$meta$initial=="ALL",]),foi="experiment",legend=c("Class Droupout","Single Dropout R2"))
cds6162 <- makeCDS(counts=cbind(control61,control62),meta=rbind(ds61$meta[ds62$meta$initial=="ALL",],ds62$meta[ds62$meta$initial=="ALL",]),foi="experiment",legend=c("Single Dropout R1","Single Dropout R2"))
cdsControl <- makeCDS(counts=cbind(control53,control61,control62),meta=rbind(cds53$conspray$meta,ds61$meta[ds61$meta$initial=="ALL",],ds62$meta[ds62$meta$initial=="ALL",]),foi="experiment",legend=c("Class Droupout","Single Dropout R1","Single Dropout R2"))
cairo_pdf("figures/controls_comparison_pca.pdf",width=14,height=14,family="Arial")
par(mfrow=c(2,2),cex=1)
plotPCA(cds5361,cols=bicols)
plotPCA(cds6162,cols=bicols)
plotPCA(cds5362,cols=bicols)
plotPCA(cdsControl,cols=c("red",bicols))
dev.off()
# Make tables for SparCC: all controls, expt61+expt62, noalpha, nobeta, nogamma, noproteo, alpharestore, betarestore, gammarestore, proteorestore
sparTabs <- list(control53=t(ds53$counts[,ds53$meta$initial=="ALL"]),
control61=t(ds61$counts[,ds61$meta$initial=="ALL"]),
control62=t(ds62$counts[,ds62$meta$initial=="ALL"])
)
for(name in names(sparTabs)){
write.table(t(sparTabs[[name]]),paste("network/",name,".tsv",sep=""),sep="\t")
}
# Pie chart of control community
cairo_pdf("figures/class_dropout_pie.pdf",width=7,height=7,family="Arial")
par(cex=1)
plotCommunityPie(control53,strainTaxa=names(phylumColors)[match(leafTaxonomy[rownames(control53),]$Color,phylumColors)],cols=phylumColors,taxLabels=names(phylumColors),sort=F)
dev.off()
cairo_pdf("figures/single_dropout_pie.pdf",width=7,height=7,family="Arial")
par(cex=1)
plotCommunityPie(control61,strainTaxa=names(phylumColors)[match(leafTaxonomy[rownames(control61),]$Color,phylumColors)],cols=phylumColors,taxLabels=names(phylumColors),sort=F)
plotCommunityPie(control62,strainTaxa=names(phylumColors)[match(leafTaxonomy[rownames(control62),]$Color,phylumColors)],cols=phylumColors,taxLabels=names(phylumColors),sort=F)
dev.off()
# Network analysis based on experiment 6 knockouts: 61, 62 and 6c
cds6c <- cds6[grepl("c$",names(cds6))]
names(cds6c) <- substr(names(cds6c),1,nchar(names(cds6c))-1)
names(cds6c) <- sub("no","Leaf",names(cds6c))
summary6 <- summariseResults(cds6c)
summary6$fcMatrix <- summary6$fcMatrix[strains,]
summary6$pcMatrix <- summary6$pvMatrix[strains,]
net6 <- igraphFromSummary(summary6$fcMatrix,summary6$pvMatrix,cutoff=0.01)
vertex_attr(net6,"shortName") <- sub("-.*","",vertex_attr(net6,"name"))
vertex_attr(net6,"twoLineName") <- sub("-","\n",vertex_attr(net6,"name"))
write.graph(net6,"results/network_001.gml","graphml")
# Including the inocula for time series comparison
inoc53 <- expt53raw[,which(meta53raw$Treatment=="ALL")]
inoc53 <- inoc53[order(rownames(inoc53)),]
inoc53 <- rbind(inoc53[!grepl("Unclass",rownames(inoc53)),],Unclassified=apply(inoc53[grepl("Unclass",rownames(inoc53)),],2,sum))
inocmeta53 <- meta53raw[which(meta53raw$Treatment=="ALL"),]
inocmeta53 <- cbind(experiment=53,inocmeta53[,1:4])
colnames(inocmeta53) <- c("experiment","initial","spray","repeat","time")
inocmeta53$initial <- factor(inocmeta53$initial,c("ALL"))
inocmeta53$spray <- factor(inocmeta53$spray,c("U","Mg","Inoc"))
# Turn t1 into t2
inocmeta53$time[inocmeta53$time=="t1"] <- "t2"
inoc61 <- expt61raw[,which(meta61raw$Treatment=="ALL")]
inoc61 <- inoc61[order(rownames(inoc61)),]
inoc61 <- rbind(inoc61[!grepl("Unclass",rownames(inoc61)),],Unclassified=apply(inoc61[grepl("Unclass",rownames(inoc61)),],2,sum))
inoc61 <- inoc61[rownames(inoc61)!="Leaf281",]
inocmeta61 <- meta61raw[which(meta61raw$Treatment=="ALL"),]
inocmeta61 <- cbind(experiment=61,inocmeta61[,1],"U",inocmeta61[,2:3])
colnames(inocmeta61) <- c("experiment","initial","spray","repeat","time")
inocmeta61$initial <- factor(inocmeta61$initial,c("ALL"))
inocmeta61$spray <- factor(inocmeta61$spray,c("U"))
inoc62 <- expt62raw[,which(meta62raw$Treatment=="ALL")]
inoc62 <- inoc62[order(rownames(inoc62)),]
inoc62 <- rbind(inoc62[!grepl("Unclass",rownames(inoc62)),],Unclassified=apply(inoc62[grepl("Unclass",rownames(inoc62)),],2,sum))
inocmeta62 <- meta62raw[which(meta62raw$Treatment=="ALL"),]
inocmeta62 <- cbind(experiment=62,inocmeta62[,1],"U",inocmeta62[,2:3])
colnames(inocmeta62) <- c("experiment","initial","spray","repeat","time")
inocmeta62$initial <- factor(inocmeta62$initial,c("ALL"))
inocmeta62$spray <- factor(inocmeta62$spray,c("U"))
ids53 <- list(counts=inoc53,meta=inocmeta53)
ids61 <- list(counts=inoc61,meta=inocmeta61)
ids62 <- list(counts=inoc62,meta=inocmeta62)
ids5361 <- list(counts=cbind(inoc53,inoc61),meta=rbind(inocmeta53,inocmeta61))
ids5362 <- list(counts=cbind(inoc53,inoc62),meta=rbind(inocmeta53,inocmeta62))
ids6162 <- list(counts=cbind(inoc61,inoc62),meta=rbind(inocmeta61,inocmeta62))
ids536162 <- list(counts=cbind(inoc53,inoc61,inoc62),meta=rbind(inocmeta53,inocmeta61,inocmeta62))
dds53 <- DESeqDataSetFromMatrix(ds53$counts[rownames(ds53$counts)!="Unclassified",ds53$meta$initial=="ALL"],ds53$meta[ds53$meta$initial=="ALL",],~1)
ddsi <- DESeqDataSetFromMatrix(ids53$counts[rownames(ids53$counts)!="Unclassified",ids53$meta$time=="t0"],ids53$meta[ids53$meta$time=="t0",],~1)
vsti <- assay(varianceStabilizingTransformation(ddsi))
vst53 <- assay(varianceStabilizingTransformation(dds53))
median53 <- apply(vst53,1,median)
mediani <- apply(vsti,1,median)
cairo_pdf("figures/class_dropout_winners_losers.pdf",width=40,height=10,family="Arial")
par(cex=1)
icds53 <- makeCDS(ids53,include=list(time=c("t0","t2")),foi="time",legend=c("Inoculum","Established Community"))
plotCommunityChanges(icds53,cutoff=cf,rowLabs=leafTaxonomy[rownames(ids53$counts),]$Name,subtitle="",cols=bicols,nBars=54)
dev.off()
cairo_pdf("figures/class_dropout_inocula.pdf",width=20,height=10,family="Arial")
par(cex=1)
plotCommunity(ids53$counts[1:62,ids53$meta$spray=="Inoc"],xlabels=leafTaxonomy[rownames(ids53$counts)[1:62],"Name"],xcols=leafTaxonomy[rownames(ids53$counts)[1:62],"Color"],type="points")
dev.off()
write.table(ids53$counts[1:62,ids53$meta$spray=="Inoc"],"results/inocula53.txt")
cairo_pdf("figures/single_dropout_inocula.pdf",width=20,height=10,onefile=T,family="Arial")
par(cex=1)
plotCommunity(ids61$counts[1:62,ids61$meta$time=="t0"],xlabels=leafTaxonomy[rownames(ids61$counts)[1:62],"Name"],xcols=leafTaxonomy[rownames(ids61$counts)[1:62],"Color"],type="points")
plotCommunity(ids62$counts[1:62,ids62$meta$time=="t0"],xlabels=leafTaxonomy[rownames(ids62$counts)[1:62],"Name"],xcols=leafTaxonomy[rownames(ids62$counts)[1:62],"Color"],type="points")
dev.off()
write.table(ids61$counts[1:62,ids61$meta$time=="t0"],"results/inocula61.txt")
write.table(ids62$counts[1:62,ids62$meta$time=="t0"],"results/inocula62.txt")
icds5361 <- makeCDS(ids5361,include=list(time=c("t0")),foi="experiment",title="",legend=c("Class Dropout Inoculum","Single Dropout R1 Inoculum"))
icds5362 <- makeCDS(ids5362,include=list(time=c("t0")),foi="experiment",title="",legend=c("Class Dropout Inoculum","Single Dropout R2 Inoculum"))
icds6162 <- makeCDS(ids6162,include=list(time=c("t0")),foi="experiment",title="",legend=c("Single Dropout R1 Inoculum","Single Dropout R2 Inoculum"))
icds536162 <- makeCDS(ids536162,include=list(time=c("t0")),foi="experiment",title="",legend=c("Class Dropout Inoculum","Single Dropout R1 Inoculum","Single Dropout R2 Inoculum"))
cairo_pdf("figures/inocula_comparison.pdf",width=14,height=14,family="Arial")
par(mfrow=c(2,2),cex=1)
x <- plotPCA(icds5361,cols=bicols)
x <- plotPCA(icds6162,cols=bicols)
x <- plotPCA(icds5362,cols=bicols)
x <- plotPCA(icds536162,cols=c("red",bicols))
dev.off()
# Function to permute a pearson correlation
permute.cor <- function(x,y,n){
creal <- cor(x,y)
clist <- c(creal,sapply(1:n,function(i) cor(sample(x,length(x)),sample(y,length(y)))))
p = sum(creal<clist)/n
return(p)
}
# Miscellaneous Correlations
# Strain abundance vs. effect size of single dropout
effects6 <- unlist(lapply(adn6,function(x) x$stats$aov.tab$R2[1]))
pvalues6 <- unlist(lapply(adn6,function(x) x$stats$aov.tab$Pr[1]))
e <- matrix(effects6[-c(1,2)],3)
rownames(e) <- c("Replicate 1","Replicate 2","Combined")
shortNames <- levels(ds6c$meta$initial)[-1]
colnames(e) <- leafTaxonomy[shortNames,]$Name
p <- matrix(pvalues6[-c(1,2)],3)
dds61 <- DESeqDataSetFromMatrix(ds61$counts[rownames(ds61$counts)!="Unclassified",ds61$meta$initial=="ALL"],ds61$meta[ds61$meta$initial=="ALL",],~1)
dds62 <- DESeqDataSetFromMatrix(ds62$counts[rownames(ds62$counts)!="Unclassified",ds62$meta$initial=="ALL"],ds62$meta[ds62$meta$initial=="ALL",],~1)
dds6162 <- DESeqDataSetFromMatrix(cbind(ds61$counts[rownames(ds61$counts)!="Unclassified",ds61$meta$initial=="ALL"],ds62$counts[rownames(ds62$counts)!="Unclassified",ds62$meta$initial=="ALL"]),rbind(ds61$meta[ds61$meta$initial=="ALL",],ds62$meta[ds62$meta$initial=="ALL",]),~1)
vst61 <- assay(varianceStabilizingTransformation(dds61))
vst62 <- assay(varianceStabilizingTransformation(dds62))
vst6162 <- assay(varianceStabilizingTransformation(dds6162))
median61 <- apply(vst61,1,median)
median62 <- apply(vst62,1,median)
median6162 <- apply(vst6162,1,median)
library(calibrate)
cairo_pdf("figures/correlations.pdf",width=7,height=7,onefile=T,family="Arial")
par(cex=1)
plot(100*e[1,],median61[colnames(fc6c)],xlab="Effect Size (%)",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="R1")
plot(100*e[1,],median61[colnames(fc6c)],xlab="Effect Size (%)",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="R1")
textxy(100*e[1,],median61[colnames(fc6c)],sub("eaf","",colnames(fc6c)))
plot(100*e[2,],median62[colnames(fc6c)],xlab="Effect Size (%)",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="R2")
plot(100*e[2,],median62[colnames(fc6c)],xlab="Effect Size (%)",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="R2")
textxy(100*e[2,],median62[colnames(fc6c)],sub("eaf","",colnames(fc6c)))
plot(100*e[3,],median6162[colnames(fc6c)],xlab="Effect Size (%)",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="Combined")
plot(100*e[3,],median6162[colnames(fc6c)],xlab="Effect Size (%)",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="Combined")
textxy(100*e[3,],median6162[colnames(fc6c)],sub("eaf","",colnames(fc6c)))
plot(mediani,median53,xlab="Inoculum Normalized Median Relative Abundance",ylab="Control Normalized Median Relative Abundance",pch=19,col=2)
plot(mediani,median53,xlab="Inoculum Normalized Median Relative Abundance",ylab="Control Normalized Median Relative Abundance",pch=19,col=2)
textxy(mediani,median53,sub("eaf","",names(median53)))
y = igraph::degree(net6)[colnames(e)]
yo = igraph::degree(net6,mode="out")[colnames(e)]
yi = igraph::degree(net6,mode="in")[colnames(e)]
x = 100*e[3,]
#plot(100*e[3,],igraph::degree(net6)[colnames(e)],xlab="Effect Size (%)",ylab="Node Degree",pch=19,col=2,main="Combined",sub=summary(lm(y~x))$r.squared)
#abline(lm(y~x))
#plot(100*e[3,],igraph::degree(net6)[colnames(e)],xlab="Effect Size (%)",ylab="Node Degree",pch=19,col=2,main="Combined")
#textxy(100*e[3,],igraph::degree(net6)[colnames(e)],sub("eaf","",rownames(leafTaxonomy)[match(colnames(e),leafTaxonomy$Name)]))
#abline(lm(y~x))
plot(x,yo,xlab="Effect Size (%)",ylab="Node Out Degree",pch=19,col=2,main="Combined",sub=summary(lm(yo~x))$r.squared)
textxy(x,yo,sub("eaf","",rownames(leafTaxonomy)[match(colnames(e),leafTaxonomy$Name)]))
abline(lm(yo~x))
#plot(100*e[3,],yi,xlab="Effect Size (%)",ylab="Node In Degree",pch=19,col=2,main="Combined",sub=summary(lm(yi~x))$r.squared)
#abline(lm(yi~x))
dev.off()
# Output some data to file
write.table(cbind(NMRA=median6162[colnames(fc6c)],ESize=100*e[3,]),"NMRA-ES.txt")
fc61 <- fcc[,seq(1,ncol(fcc),2)]
fc62 <- fcc[,1+seq(1,ncol(fcc),2)]
colnames(fc61) <- colnames(fc6c)
colnames(fc62) <- colnames(fc6c)
for(x in colnames(fc6c)){
fc6c[x,x] <- 0
fc61[x,x] <- 0
fc62[x,x] <- 0
}
fccsums <- apply(fc6c,1,function(x) sum(abs(x)))
fc61sums <- apply(fc61,1,function(x) sum(abs(x)))
fc62sums <- apply(fc62,1,function(x) sum(abs(x)))
cairo_pdf("figures/correlation2.pdf",width=7,height=7,onefile=T,family="Arial")
par(cex=1)
plot(fc61sums,median61,xlab="Total Fold Changes",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="R1")
plot(fc61sums,median61,xlab="Total Fold Changes",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="R1")
textxy(fc61sums,median61,sub("eaf","",names(median61)))
plot(fc62sums,median62,xlab="Total Fold Changes",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="R1")
plot(fc62sums,median62,xlab="Total Fold Changes",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="R1")
textxy(fc62sums,median62,sub("eaf","",names(median62)))
plot(fccsums,median6162,xlab="Total Fold Changes",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="Combined")
plot(fccsums,median6162,xlab="Total Fold Changes",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="Combined")
textxy(fccsums,median6162,sub("eaf","",names(median6162)))
dev.off()
# Plot the fancy figure
summary <- summariseResults(cds6c)
summary$fcMatrix <- summary$fcMatrix[-nrow(summary$fcMatrix),]
summary$pvMatrix <- summary$pvMatrix[-nrow(summary$pvMatrix),]
cairo_pdf("figures/fancy.pdf",width=14,height=14,family="Arial")
plotBipartiteSummary(summary$fcMatrix,summary$pvMatrix,leftPhylo=as.phylo(hclust(dist(t(summary$fcMatrix)),method="ward.D")),rightPhylo=as.phylo(hclust(dist(summary$fcMatrix),method="ward.D")),leftLabs=leafTaxonomy[colnames(summary$fcMatrix),]$Name,rightLabs=leafTaxonomy[rownames(summary$fcMatrix),]$Name,leftCols=leafTaxonomy[colnames(summary$fcMatrix),]$Color,rightCols=leafTaxonomy[rownames(summary$fcMatrix),]$Color,cutoff=0.01,tip.label.width=0.3)
plotBipartiteSummary(summary$fcMatrix,summary$pvMatrix,leftLabs=leafTaxonomy[colnames(summary$fcMatrix),]$Name,rightLabs=leafTaxonomy[rownames(summary$fcMatrix),]$Name,leftPhylo=keep.tip(tree,colnames(summary$fcMatrix)),rightPhylo=tree,leftCols=leafTaxonomy[colnames(summary$fcMatrix),]$Color,rightCols=leafTaxonomy[rownames(summary$fcMatrix),]$Color,cutoff=0.01,tip.label.width=0.3)
dev.off()
| /analysis_publication.r | no_license | cmfield/carlstrom2019 | R | false | false | 42,272 | r | # This R package and dependencies will need to be installed:
library(devtools)
install_github("cmfield/phylloR")
library(phylloR)
# Import and format all of the data
expt53raw <- read.delim("expt53/otutab_final_classified.txt",stringsAsFactors=F,header=T,row.names=1,comment.char="")
expt61raw <- read.delim("expt61/otutab_final_classified.txt",stringsAsFactors=F,header=T,row.names=1,comment.char="")
expt62raw <- read.delim("expt62/otutab_final_classified.txt",stringsAsFactors=F,header=T,row.names=1,comment.char="")
colnames(expt53raw) <- sub("X","E53S",colnames(expt53raw))
colnames(expt61raw) <- sub("X","E61S",colnames(expt61raw))
colnames(expt62raw) <- sub("X","E62S",colnames(expt62raw))
meta53raw <- read.csv("metadata53.csv",sep=";",header=T,row.names=1)
meta61raw <- read.csv("metadata61.csv",sep=";",header=T,row.names=1)
meta62raw <- read.csv("metadata62.csv",sep=";",header=T,row.names=1)
rownames(meta53raw) <- paste("E53S",rownames(meta53raw),sep="")
rownames(meta61raw) <- paste("E61S",rownames(meta61raw),sep="")
rownames(meta62raw) <- paste("E62S",rownames(meta62raw),sep="")
# The data frame will be row-wise, ie: each row is an experiment, each column a phylogroup
# Remove the inoculum and axenic data, combine unclassified otus
expt53 <- expt53raw[,which(meta53raw$Treatment!="Ax" & meta53raw$Spray!="Inoc")]
expt53 <- expt53[order(rownames(expt53)),]
expt53 <- rbind(expt53[!grepl("Unclass",rownames(expt53)),],Unclassified=apply(expt53[grepl("Unclass",rownames(expt53)),],2,sum))
meta53 <- meta53raw[which(meta53raw$Treatment!="Ax" & meta53raw$Spray!="Inoc"),]
meta53 <- cbind(experiment=53,meta53[,1:4])
colnames(meta53) <- c("experiment","initial","spray","repeat","time")
meta53$initial <- factor(meta53$initial,c("ALL","Ax","No A","No B","No G","No P"))
meta53$spray <- factor(meta53$spray,c("U","Mg","A","B","G","P"))
expt61 <- expt61raw[,which(meta61raw$Treatment!="Axenic" & meta61raw$Time!="t0")]
expt61 <- expt61[order(rownames(expt61)),]
expt61 <- rbind(expt61[!grepl("Unclass",rownames(expt61)),],Unclassified=apply(expt61[grepl("Unclass",rownames(expt61)),],2,sum))
expt61 <- expt61[rownames(expt61)!="Leaf281",]
meta61 <- meta61raw[which(meta61raw$Treatment!="Axenic" & meta61raw$Time!="t0"),]
meta61 <- cbind(experiment=61,meta61[,1],"U",meta61[,2:3])
colnames(meta61) <- c("experiment","initial","spray","repeat","time")
levels(meta61$initial) <- sub("-","Leaf",levels(meta61$initial))
meta61$initial <- factor(meta61$initial,c("ALL","Ax",levels(meta61$initial)[!levels(meta61$initial)%in%c("ALL","Ax")]))
meta61$initial <- factor(meta61$initial,c("ALL","Ax",levels(meta61$initial)[grepl("Leaf",levels(meta61$initial))][order(match(levels(meta61$initial)[grepl("Leaf",levels(meta61$initial))],rownames(leafTaxonomy)))]))
meta61$initial <- droplevels(meta61$initial)
expt62 <- expt62raw[,which(meta62raw$Treatment!="Ax" & meta62raw$Time!="t0")]
expt62 <- expt62[order(rownames(expt62)),]
expt62 <- rbind(expt62[!grepl("Unclass",rownames(expt62)),],Unclassified=apply(expt62[grepl("Unclass",rownames(expt62)),],2,sum))
meta62 <- meta62raw[which(meta62raw$Treatment!="Ax" & meta62raw$Time!="t0"),]
meta62 <- cbind(experiment=62,meta62[,1],"U",meta62[,2:3])
colnames(meta62) <- c("experiment","initial","spray","repeat","time")
levels(meta62$initial) <- sub("-","Leaf",levels(meta62$initial))
meta62$initial <- factor(meta62$initial,c("ALL","Ax",levels(meta62$initial)[!levels(meta62$initial)%in%c("ALL","Ax")]))
meta62$initial <- factor(meta62$initial,c("ALL","Ax",levels(meta62$initial)[grepl("Leaf",levels(meta62$initial))][order(match(levels(meta62$initial)[grepl("Leaf",levels(meta62$initial))],rownames(leafTaxonomy)))]))
meta62$initial <- droplevels(meta61$initial)
# Make datasets compatible with analysis functions
#ds51 <- list(counts=expt51,meta=meta51)
ds53 <- list(counts=expt53,meta=meta53)
ds61 <- list(counts=expt61,meta=meta61)
ds62 <- list(counts=expt62,meta=meta62)
ds6c <- list(counts=cbind(expt61,expt62),meta=rbind(meta61,meta62))
# Make control-only datasets and export
control53 <- expt53[,meta53$initial=="ALL"]
control61 <- expt61[,meta61$initial=="ALL"]
control62 <- expt62[,meta62$initial=="ALL"]
write.table(control53,"network/control53.tsv",sep="\t")
write.table(control61,"network/control61.tsv",sep="\t")
write.table(control62,"network/control62.tsv",sep="\t")
cat("Data imported\n")
zcols = c("#313695","#4575b4","#74add1","#abd9e9","#e0f3f8","#ffffbf","#fee090","#fdae61","#f46d43","#d73027","#a50026")
# Make handy strain sets
strains <- rownames(ds53$counts)
strains <- strains[-length(strains)]
subTax <- leafTaxonomy[strains,]
sNames <- subTax$Name
alpha <- rownames(subTax)[subTax$Class=="Alphaproteobacteria"]
aNames <- subTax[subTax$Class=="Alphaproteobacteria",]$Name
beta <- rownames(subTax)[subTax$Class=="Betaproteobacteria"]
bNames <- subTax[subTax$Class=="Betaproteobacteria",]$Name
gamma <- rownames(subTax)[subTax$Class=="Gammaproteobacteria"]
gNames <- subTax[subTax$Class=="Gammaproteobacteria",]$Name
proteo <- rownames(subTax)[subTax$Phylum=="Proteobacteria"]
pNames <- subTax[subTax$Phylum=="Proteobacteria",]$Name
notalpha <- rownames(subTax)[subTax$Class!="Alphaproteobacteria"]
naNames <- subTax[subTax$Class!="Alphaproteobacteria",]$Name
notbeta <- rownames(subTax)[subTax$Class!="Betaproteobacteria"]
nbNames <- subTax[subTax$Class!="Betaproteobacteria",]$Name
notgamma <- rownames(subTax)[subTax$Class!="Gammaproteobacteria"]
ngNames <- subTax[subTax$Class!="Gammaproteobacteria",]$Name
notproteo <- rownames(subTax)[subTax$Phylum!="Proteobacteria"]
npNames <- subTax[subTax$Phylum!="Proteobacteria",]$Name
if(!exists("nocalc")){
cds53 <- list(conspray=makeCDS(ds53,include=list(initial="ALL"),foi="spray",title="Effect of Mock Spray\n(all strains)",legend=c("Unsprayed","Mock Sprayed")),
contime=makeCDS(ds53,include=list(initial="ALL"),foi="time",title="Effect of Time\n(all strains)",legend=c("Timepoint 1","Timepoint 2")),
noalpha=makeCDS(ds53,include=list(initial=c("No A","ALL"),spray=c("U","Mg")),foi="initial",title="",legend=c("Control","Absence")),
alphaback=makeCDS(ds53,include=list(initial="No A",time="t2"),foi="spray",title="",legend=c("Mock Spray","Reintroduction")),
latealpha=makeCDS(ds53,include=list(initial=c("No A","ALL"),spray=c("U","Mg","A"),time="t2"),exclude=list(initial="No A",spray="Mg"),foi="initial",title="",legend=c("Control","Late Arrival")),
nobeta=makeCDS(ds53,include=list(initial=c("No B","ALL"),spray=c("U","Mg")),foi="initial",title="",legend=c("Control","Absence")),
betaback=makeCDS(ds53,include=list(initial="No B",time="t2"),foi="spray",title="",legend=c("Mock Spray","Reintroduction")),
latebeta=makeCDS(ds53,include=list(initial=c("No B","ALL"),spray=c("U","Mg","B"),time="t2"),exclude=list(initial="No B",spray="Mg"),foi="initial",title="",legend=c("Control","Late Arrival")),
nogamma=makeCDS(ds53,include=list(initial=c("No G","ALL"),spray=c("U","Mg")),foi="initial",title="",legend=c("Control","Absence")),
gammaback=makeCDS(ds53,include=list(initial="No G",time="t2"),foi="spray",title="",legend=c("Mock Spray","Reintroduction")),
lategamma=makeCDS(ds53,include=list(initial=c("No G","ALL"),spray=c("U","Mg","G"),time="t2"),exclude=list(initial="No G",spray="Mg"),foi="initial",title="",legend=c("Control","Late Arrival")),
noproteo=makeCDS(ds53,include=list(initial=c("No P","ALL"),spray=c("U","Mg")),foi="initial",title="",legend=c("Control","Absence")),
proteoback=makeCDS(ds53,include=list(initial="No P",time="t2"),foi="spray",title="",legend=c("Mock Spray","Reintroduction")),
lateproteo=makeCDS(ds53,include=list(initial=c("No P","ALL"),spray=c("U","Mg","P"),time="t2"),exclude=list(initial="No P",spray="Mg"),foi="initial",title="",legend=c("Control","Late Arrival"))
)
cds6 <- list()
cds6[["batch"]] <- makeCDS(ds6c,include=list(initial="ALL"),foi="experiment",title="R1 vs. R2",legend=c("R1","R2"))
ds633 <- ds6c
ds633$counts <- ds633$counts[,(ds633$meta$experiment=="62" & ds633$meta$initial=="Leaf33") | (ds633$meta$experiment=="61" & ds633$meta$initial=="ALL")]
ds633$meta <- ds633$meta[(ds633$meta$experiment=="62" & ds633$meta$initial=="Leaf33") | (ds633$meta$experiment=="61" & ds633$meta$initial=="ALL"),]
cds6[["dropout"]] <- makeCDS(ds633,foi="experiment",title="R1 Control vs.\nR2 L-33 Sphingomonas Drop-out",legend=c("R1 Control","R2 Drop-out"))
for(x in levels(ds6c$meta$initial)[-1]){
cds6[[paste(x,1,sep="")]] <- makeCDS(ds61,include=list(initial=c(x,"ALL")),foi="initial",title="")
cds6[[paste(x,2,sep="")]] <- makeCDS(ds62,include=list(initial=c(x,"ALL")),foi="initial",title="")
cds6[[paste(x,"c",sep="")]] <- makeCDS(ds6c,include=list(initial=c(x,"ALL")),foi="initial",ftc="experiment",title="")
}
}
pm = 10000
cf = 0.01
bicols <- c("#214478","#A1D662")
adn53 <- list()
cairo_pdf("figures/class_dropout_control_spraytime.pdf",width=16,height=7,family="Arial")
par(mfrow=c(1,2),cex=1.5,cex.lab=1,cex.sub=1.2)
adn53$conspray <- plotPCA(cds53$conspray,soi=strains,perm=pm,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,showArrows=F)
adn53$contime <- plotPCA(cds53$contime,soi=strains,perm=pm,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,showArrows=F)
dev.off()
cairo_pdf("figures/class_dropout_pca.pdf",width=28,height=35,family="Arial")
par(mfrow=c(5,4),cex=1.5,oma=c(0,6,6,0),cex.lab=1,cex.sub=1.2)
adn53$noalpha <- plotPCA(cds53$noalpha,soi=notalpha,perm=pm,cutoff=cf,rowLabs=naNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$nobeta <- plotPCA(cds53$nobeta,soi=notbeta,perm=pm,cutoff=cf,rowLabs=nbNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$nogamma <- plotPCA(cds53$nogamma,soi=notgamma,perm=pm,cutoff=cf,rowLabs=ngNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$noproteo <- plotPCA(cds53$noproteo,soi=notproteo,perm=pm,cutoff=cf,rowLabs=npNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$alphaback <- plotPCA(cds53$alphaback,soi=notalpha,perm=pm,cutoff=cf,rowLabs=naNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$betaback <- plotPCA(cds53$betaback,soi=notbeta,perm=pm,cutoff=cf,rowLabs=nbNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$gammaback <- plotPCA(cds53$gammaback,soi=notgamma,perm=pm,cutoff=cf,rowLabs=ngNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$proteoback <- plotPCA(cds53$proteoback,soi=notproteo,perm=pm,cutoff=cf,rowLabs=npNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$latealphanot <- plotPCA(cds53$latealpha,soi=notalpha,perm=pm,cutoff=cf,rowLabs=naNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$latebetanot <- plotPCA(cds53$latebeta,soi=notbeta,perm=pm,cutoff=cf,rowLabs=nbNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$lategammanot <- plotPCA(cds53$lategamma,soi=notgamma,perm=pm,cutoff=cf,rowLabs=ngNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$lateproteonot <- plotPCA(cds53$lateproteo,soi=notproteo,perm=pm,cutoff=cf,rowLabs=npNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$latealpha <- plotPCA(cds53$latealpha,soi=alpha,perm=pm,cutoff=cf,rowLabs=aNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$latebeta <- plotPCA(cds53$latebeta,soi=beta,perm=pm,cutoff=cf,rowLabs=bNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$lategamma <- plotPCA(cds53$lategamma,soi=gamma,perm=pm,cutoff=cf,rowLabs=gNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$lateproteo <- plotPCA(cds53$lateproteo,soi=proteo,perm=pm,cutoff=cf,rowLabs=pNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$latealphaall <- plotPCA(cds53$latealpha,soi=strains,perm=pm,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$latebetaall <- plotPCA(cds53$latebeta,soi=strains,perm=pm,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$lategammaall <- plotPCA(cds53$lategamma,soi=strains,perm=pm,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn53$lateproteoall <- plotPCA(cds53$lateproteo,soi=strains,perm=pm,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
mtext("Drop-Out Condition",side=3,line=3,at=0.515,outer=T,cex=3)
mtext("Alphaproteobacteria",side=3,line=0,at=0.14,outer=T,cex=3)
mtext("Betaproteobacteria",side=3,line=0,at=0.39,outer=T,cex=3)
mtext("Gammaproteobacteria",side=3,line=0,at=0.64,outer=T,cex=3)
mtext("Proteobacteria",side=3,line=0,at=0.89,outer=T,cex=3)
mtext("Group Absent vs. Control (a)",side=2,line=1,at=0.93,outer=T,cex=3)
mtext("Late Arrival vs. Mock (b)",side=2,line=1,at=0.73,outer=T,cex=3)
mtext("Late Arrival vs. Control (c)",side=2,line=1,at=0.53,outer=T,cex=3)
mtext("Late Arrival vs. Control (c)",side=2,line=1,at=0.33,outer=T,cex=3)
mtext("Late Arrival vs. Control (c)",side=2,line=1,at=0.13,outer=T,cex=3)
mtext("Effect on Rest of the Community",side=2,line=4,at=0.73,outer=T,cex=3)
mtext("Effect on Invading Group",side=2,line=4,at=0.33,outer=T,cex=3)
mtext("Effect on Whole Community",side=2,line=4,at=0.13,outer=T,cex=3)
mtext(bquote(underline(" ")),side=2,line=3,at=0.73,outer=T,cex=3)
dev.off()
cairo_pdf("figures/class_dropout_control_spraytime_bar.pdf",width=16,height=7,family="Arial")
par(mfrow=c(1,2),cex=1)
plotCommunityChanges(cds53$contime,soi=strains,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$conspray,soi=strains,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,nBars=18)
dev.off()
cairo_pdf("figures/class_dropout_bar.pdf",width=28,height=21,family="Arial")
par(mfrow=c(3,4),cex=1,oma=c(0,6,4,0))
plotCommunityChanges(cds53$noalpha,soi=notalpha,cutoff=cf,rowLabs=naNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$nobeta,soi=notbeta,cutoff=cf,rowLabs=nbNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$nogamma,soi=notgamma,cutoff=cf,rowLabs=ngNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$noproteo,soi=notproteo,cutoff=cf,rowLabs=npNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$alphaback,soi=notalpha,cutoff=cf,rowLabs=naNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$betaback,soi=notbeta,cutoff=cf,rowLabs=nbNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$gammaback,soi=notgamma,cutoff=cf,rowLabs=ngNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$proteoback,soi=notproteo,cutoff=cf,rowLabs=npNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$latealpha,soi=strains,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$latebeta,soi=strains,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$lategamma,soi=strains,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,nBars=18)
plotCommunityChanges(cds53$lateproteo,soi=strains,cutoff=cf,rowLabs=sNames,subtitle="",cols=bicols,nBars=18)
mtext("Alphaproteobacteria",side=3,line=0,at=0.14,outer=T,cex=3)
mtext("Betaproteobacteria",side=3,line=0,at=0.39,outer=T,cex=3)
mtext("Gammaproteobacteria",side=3,line=0,at=0.64,outer=T,cex=3)
mtext("Proteobacteria",side=3,line=0,at=0.89,outer=T,cex=3)
mtext("Group Absent vs. Control (a)",side=2,line=1,at=0.853,outer=T,cex=3)
mtext("Late Arrival vs. Mock (b)",side=2,line=1,at=0.52,outer=T,cex=3)
mtext("Late Arrival vs. Control (c)",side=2,line=1,at=0.187,outer=T,cex=3)
mtext("Effect on Rest of the Community",side=2,line=4,at=0.853,outer=T,cex=3)
mtext("Effect on Invading Group",side=2,line=4,at=0.52,outer=T,cex=3)
mtext("Effect on Whole Community",side=2,line=4,at=0.187,outer=T,cex=3)
dev.off()
adn6 <- list()
cairo_pdf("figures/single_dropout_batch_pca.pdf",width=14,height=7,family="Arial")
par(mfrow=c(1,2),cex=1.5,cex.lab=1,cex.sub=1.2)
adn6[["batch"]] <- plotPCA(cds6[["batch"]],soi=strains,perm=pm,cutoff=cf,rowLabs=sNames,cols=bicols,showArrows=F)
adn6[["dropout"]] <- plotPCA(cds6[["dropout"]],soi=strains,perm=pm,cutoff=cf,rowLabs=sNames,cols=bicols,showArrows=F)
dev.off()
cairo_pdf("figures/single_dropout_pca.pdf",width=21,height=175,family="Arial")
par(mfrow=c(25,3),cex=1.5,oma=c(0,8,6,0),cex.lab=1,cex.sub=1.2)
for(x in levels(ds6c$meta$initial)[-1]){
adn6[[paste(x,1,sep="")]] <- plotPCA(cds6[[paste(x,1,sep="")]],soi=strains[strains!=x],perm=pm,cutoff=cf,rowLabs=sNames[strains!=x],subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn6[[paste(x,2,sep="")]] <- plotPCA(cds6[[paste(x,2,sep="")]],soi=strains[strains!=x],perm=pm,cutoff=cf,rowLabs=sNames[strains!=x],subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
adn6[[paste(x,"c",sep="")]] <- plotPCA(cds6[[paste(x,"c",sep="")]],soi=strains[strains!=x],perm=pm,cutoff=cf,rowLabs=sNames[strains!=x],subtitle="",cols=bicols,showLegend=F,showArrows=F,showTitle=F)
mtext(leafTaxonomy[x,]$Name,side=2,line=1,at=1.065-which(levels(ds6c$meta$initial[-1])==x)/25,outer=T,cex=3)
}
mtext("Replicate 1",side=3,line=0,at=0.187,outer=T,cex=3)
mtext("Replicate 2",side=3,line=0,at=0.52,outer=T,cex=3)
mtext("Combined",side=3,line=0,at=0.853,outer=T,cex=3)
mtext("Drop-Out Condition",side=2,line=5,at=0.5,outer=T,cex=4)
mtext("Experiment",side=3,line=4,at=0.52,outer=T,cex=4)
dev.off()
cairo_pdf("figures/single_dropout_highlights_pca.pdf",width=28,height=7,family="Arial")
par(mfrow=c(1,4),cex=1,cex.main=1.5,cex.sub=1.5)
for(x in c("Leaf203","Leaf231","Leaf233","Leaf262")){
adn6[[paste(x,"c",sep="")]] <- plotPCA(cds6[[paste(x,"c",sep="")]],soi=strains[strains!=x],perm=pm,cutoff=cf,rowLabs=sNames[strains!=x],cols=bicols,showLegend=F,showArrows=F,subtitle=leafTaxonomy[x,]$Name)
}
dev.off()
cairo_pdf("figures/single_dropout_batch_bar.pdf",width=14,height=7,family="Arial")
par(mfrow=c(1,2),cex=1)
plotCommunityChanges(cds6[["batch"]],soi=strains,cutoff=cf,rowLabs=sNames,cols=bicols,nBars=12)
plotCommunityChanges(cds6[["dropout"]],soi=strains,cutoff=cf,rowLabs=sNames,cols=bicols,nBars=12)
dev.off()
cairo_pdf("figures/single_dropout_bar.pdf",width=42,height=175,family="Arial")
par(mfrow=c(25,3),cex=1,oma=c(0,4,4,0))
for(x in levels(ds6c$meta$initial)[-1]){
plotCommunityChanges(cds6[[paste(x,1,sep="")]],soi=strains[strains!=sub("no","Leaf",x)],cutoff=cf,rowLabs=sNames[strains!=sub("no","Leaf",x)],subtitle="",cols=bicols,nBars=34)
plotCommunityChanges(cds6[[paste(x,2,sep="")]],soi=strains[strains!=sub("no","Leaf",x)],cutoff=cf,rowLabs=sNames[strains!=sub("no","Leaf",x)],subtitle="",cols=bicols,nBars=34)
plotCommunityChanges(cds6[[paste(x,"c",sep="")]],soi=strains[strains!=sub("no","Leaf",x)],cutoff=cf,rowLabs=sNames[strains!=sub("no","Leaf",x)],subtitle="",cols=bicols,nBars=34)
mtext(leafTaxonomy[x,]$Name,side=2,line=1,at=1.065-which(levels(ds6c$meta$initial[-1])==x)/25,outer=T,cex=3)
}
mtext("Replicate 1",side=3,line=0,at=0.187,outer=T,cex=3)
mtext("Replicate 2",side=3,line=0,at=0.52,outer=T,cex=3)
mtext("Combined",side=3,line=0,at=0.853,outer=T,cex=3)
dev.off()
# Summary plot for expt53
library(corrplot)
library(plotrix)
library(circlize)
effects53 <- unlist(lapply(adn53,function(x) x$stats$aov.tab$R2[1]))
pvalues53 <- unlist(lapply(adn53,function(x) x$stats$aov.tab$Pr[1]))
e <- t(matrix(effects53[c(-1,-2)],4))
p <- t(matrix(pvalues53[c(-1,-2)],4))
rownames(e) <- c("Group Absent\nvs. Control (a)","Late Arrival\nvs. Mock (b)","Late Arrival\nvs. Control (c)","Late Arrival\nvs. Control (c)","Late Arrival\nvs. Control (c)")
colnames(e) <- c("Alphaproteobacteria","Betaproteobacteria","Gammaproteobacteria","Proteobacteria")
cairo_pdf("figures/class_dropout_summary.pdf",width=6,height=6,family="Arial")
par(cex=1,xpd=T)
corrplot(e,is.corr=F,cl.lim=c(0,0.7),cl.ratio=0.4,p.mat=p,insig="label_sig",sig=c(0.0001,0.001,0.01,0.05),tl.col=1,pch.cex=1,tl.srt=45,mar=c(0,10,2,2),col=zcols[6:1])
text(-4,4.1,"Effect on Rest\nof the Community",font=2)
text(-4,2.1,"Effect on\nInvading Group",font=2)
text(-4,1.1,"Effect on\nWhole Community",font=2)
lines(c(-2.1,-2.1),c(2.5,5.5))
text(2.5,8.5,"Drop-Out Condition",cex=1,font=2)
text(6,3,"Effect Size",cex=1,srt=-90)
dev.off()
# Summary plot 2 for expt53
library(ape)
library(apextra)
tree <- read.tree("../leafGenomes/16s_full_align.phy_phyml_tree.txt")
tree <- drop.tip(tree,tree$tip.label[!tree$tip.label%in%strains])
tree <- root(tree,node=69,resolve.root=T)
tree <- as.ultrametric(tree)
strainOrder <- tree$tip.label[tipOrder(tree)]
strainCols <- subTax[,'Color',F]
strainCols <- strainCols[strainOrder,]
fc53 <- do.call(cbind,lapply(cds53[names(cds53)[-1:-2]],function(x) x$results[,"log2FoldChange"]))
fc53[is.na(fc53)] <- 0
pv53 <- do.call(cbind,lapply(cds53[names(cds53)[-1:-2]],function(x) x$results[,"padj"]))
pv53[is.na(pv53)] <- 1
rownames(fc53) <- rownames(cds53[[1]]$results)
colnames(fc53) <- rep(c("(a)","(b)","(c)"),4)
fc53 <- fc53[1:62,]
pv53 <- pv53[1:62,]
mask <- fc53==fc53
mask[alpha,1:2] <- FALSE
mask[beta,4:5] <- FALSE
mask[gamma,7:8] <- FALSE
mask[proteo,10:11] <- FALSE
overlay <- matrix(cut(pv53,c(0,0.0001,0.001,0.01,0.05,1),c("****","***","**","*","")),nrow(pv53))
cairo_pdf("figures/class_dropout_details.pdf",width=10,height=15,family="Arial")
par(cex=1)
treatmap(tree,fc53,mask=mask,overlay=overlay,tip.labels=leafTaxonomy[tree$tip.label,]$Name,tip.colors=strainCols,aspect.ratio=0.2,tip.label.width=8,z.cols=zcols)
dev.off()
# Summary plot for expt6
effects6 <- unlist(lapply(adn6,function(x) x$stats$aov.tab$R2[1]))
pvalues6 <- unlist(lapply(adn6,function(x) x$stats$aov.tab$Pr[1]))
e <- matrix(effects6[-c(1,2)],3)
rownames(e) <- c("Replicate 1","Replicate 2","Combined")
shortNames <- levels(ds6c$meta$initial)[-1]
colnames(e) <- leafTaxonomy[shortNames,]$Name
p <- matrix(pvalues6[-c(1,2)],3)
e <- e[,order(match(shortNames,tree$tip.label))]
p <- p[,order(match(shortNames,tree$tip.label))]
shortNames <- shortNames[order(match(shortNames,tree$tip.label))]
subtree <- keep.tip(tree,shortNames)
cairo_pdf("figures/single_dropout_summary.pdf",width=14,height=7,family="Arial")
par(cex=1,xpd=T)
rownames(e) <- rep("",3)
corrplot(e,is.corr=F,cl.lim=c(0,0.25),cl.ratio=0.1,tl.col=c(leafTaxonomy[shortNames,]$Color,"black"),p.mat=p,insig="label_sig",sig=c(0.0001,0.001,0.01,0.05),pch.cex=1,mar=c(0,8,0,2),col=zcols[6:1])
draw.phylo(1,8.5,25,11,subtree,direction="d")
text(0.25,1,"Combined",pos=2)
text(0.25,2,"Replicate 2",pos=2)
text(0.25,3,"Replicate 1",pos=2)
text(12.5,11.5,"Drop-Out Condition",cex=1.5)
text(28,2,"Effect Size",cex=1,srt=-90)
legend(0.5,3.6,legend=names(phylumColors[-4]),fill=phylumColors[-4],xjust=1,yjust=0,cex=0.8)
dev.off()
# Summary plot 2 for expt6 combined
cds6c <- cds6[-c(1,2)][grepl("c",names(cds6)[-c(1,2)])]
fc6c <- do.call(cbind,lapply(cds6c,function(x) x$results$log2FoldChange))
fc6c[is.na(fc6c)] <- 0
pv6c <- do.call(cbind,lapply(cds6c,function(x) x$results$padj))
pv6c[is.na(pv6c)] <- 1
rownames(fc6c) <- rownames(cds6c[[1]]$results)
colnames(fc6c) <- sub("c","",colnames(fc6c))
fc6c <- fc6c[1:62,]
pv6c <- pv6c[1:62,]
mask <- fc6c==fc6c
for(col in colnames(fc6c)){
mask[col,col] <- FALSE
}
overlay <- matrix(cut(pv6c,c(0,0.0001,0.001,0.01,0.05,1),c("****","***","**","*","")),nrow(pv6c))
hc <- as.phylo(hclust(dist(fc6c)))
hc <- reorderTips(hc)
cairo_pdf("figures/single_dropout_details_phylo.pdf",width=15,height=20,family="Arial")
par(cex=1)
treatmap(tree,fc6c,mask=mask,overlay=overlay,tip.labels=leafTaxonomy[tree$tip.label,]$Name,tip.colors=leafTaxonomy[tree$tip.label,]$Color,tip.label.width=10,mat.label.height=24,mat.labels=leafTaxonomy[colnames(fc6c),]$Name,mat.label.color=leafTaxonomy[colnames(fc6c),]$Color,mat.hclust=T,z.cols=zcols)
dev.off()
cairo_pdf("figures/single_dropout_details_hclust.pdf",width=15,height=20,family="Arial")
par(cex=1)
treatmap(hc,fc6c,mask=mask,overlay=overlay,tip.labels=leafTaxonomy[hc$tip.label,]$Name,tip.colors=leafTaxonomy[hc$tip.label,]$Color,tip.label.width=10,mat.label.height=24,mat.labels=leafTaxonomy[colnames(fc6c),]$Name,mat.label.color=leafTaxonomy[colnames(fc6c),]$Color,mat.hclust=T,z.cols=zcols)
dev.off()
# Summary plot with 6-1 and 6-2 separate
cds61 <- cds6[grepl("1$",names(cds6))]
fc61 <- do.call(cbind,lapply(cds61,function(x) x$results$log2FoldChange))
fc61[is.na(fc61)] <- 0
pv61 <- do.call(cbind,lapply(cds61,function(x) x$results$padj))
pv61[is.na(pv61)] <- 1
shortNames1 <- sub("1$","",sub("no","Leaf",names(cds61)))
rownames(fc61) <- rownames(cds61[[1]]$results)
colnames(fc61) <- paste("R1",leafTaxonomy[shortNames1,]$Name,sep="-")
fc61 <- fc61[1:62,]
pv61 <- pv61[1:62,]
cds62 <- cds6[grepl("2$",names(cds6))]
fc62 <- do.call(cbind,lapply(cds62,function(x) x$results$log2FoldChange))
fc62[is.na(fc62)] <- 0
pv62 <- do.call(cbind,lapply(cds62,function(x) x$results$padj))
pv62[is.na(pv62)] <- 1
shortNames2 <- sub("2$","",sub("no","Leaf",names(cds62)))
rownames(fc62) <- rownames(cds62[[1]]$results)
colnames(fc62) <- paste("R2",leafTaxonomy[shortNames2,]$Name,sep="-")
fc62 <- fc62[1:62,]
pv62 <- pv62[1:62,]
fcc <- cbind(fc61,fc62)
pvc <- cbind(pv61,pv62)
shortNames <- c(shortNames1,shortNames2)
mask <- fcc==fcc
for(i in 1:(length(shortNames))){
mask[shortNames[i],i] <- FALSE
}
overlay <- matrix(cut(pvc,c(0,0.0001,0.001,0.01,0.05,1),c("****","***","**","*","")),nrow(pvc))
cairo_pdf("figures/single_dropout_separate_details_phylo.pdf",width=20,height=20,family="Arial")
par(cex=1)
treatmap(tree,fcc,mask=mask,overlay=overlay,tip.labels=leafTaxonomy[tree$tip.label,]$Name,tip.colors=leafTaxonomy[tree$tip.label,]$Color,tip.label.width=10,mat.label.height=24,mat.labels=colnames(fcc),mat.label.color=leafTaxonomy[shortNames,]$Color,mat.col.order=order(match(shortNames,tree$tip.label),decreasing=T),z.cols=zcols)
dev.off()
# Community plots
control53 <- cds53$conspray$counts
control53 <- control53[rownames(control53)!="Unclassified",]
control61 <- ds61$counts
control61 <- control61[,ds61$meta$initial=="ALL"]
control61 <- control61[rownames(control61)!="Unclassified",]
control62 <- ds62$counts
control62 <- control62[,ds62$meta$initial=="ALL"]
control62 <- control62[rownames(control62)!="Unclassified",]
cairo_pdf("figures/class_dropout_control_community.pdf",width=20,height=10,family="Arial")
par(cex=1)
comm53 <- plotCommunity(control53,type="violinswarm",xlabels=leafTaxonomy[rownames(control53),]$Name,xcols=leafTaxonomy[rownames(control53),]$Color)
dev.off()
cairo_pdf("figures/single_dropout_control_community.pdf",width=20,height=10,family="Arial")
par(cex=1)
comm61 <- plotCommunity(control61,type="violinswarm",xlabels=leafTaxonomy[rownames(control61),]$Name,xcols=leafTaxonomy[rownames(control61),]$Color)
comm62 <- plotCommunity(control62,type="violinswarm",xlabels=leafTaxonomy[rownames(control62),]$Name,xcols=leafTaxonomy[rownames(control61),]$Color)
dev.off()
# PCA of controls
cds5361 <- makeCDS(counts=cbind(control53,control61),meta=rbind(cds53$conspray$meta,ds61$meta[ds61$meta$initial=="ALL",]),foi="experiment",legend=c("Class Droupout","Single Dropout R1"))
cds5362 <- makeCDS(counts=cbind(control53,control62),meta=rbind(cds53$conspray$meta,ds62$meta[ds62$meta$initial=="ALL",]),foi="experiment",legend=c("Class Droupout","Single Dropout R2"))
cds6162 <- makeCDS(counts=cbind(control61,control62),meta=rbind(ds61$meta[ds62$meta$initial=="ALL",],ds62$meta[ds62$meta$initial=="ALL",]),foi="experiment",legend=c("Single Dropout R1","Single Dropout R2"))
cdsControl <- makeCDS(counts=cbind(control53,control61,control62),meta=rbind(cds53$conspray$meta,ds61$meta[ds61$meta$initial=="ALL",],ds62$meta[ds62$meta$initial=="ALL",]),foi="experiment",legend=c("Class Droupout","Single Dropout R1","Single Dropout R2"))
cairo_pdf("figures/controls_comparison_pca.pdf",width=14,height=14,family="Arial")
par(mfrow=c(2,2),cex=1)
plotPCA(cds5361,cols=bicols)
plotPCA(cds6162,cols=bicols)
plotPCA(cds5362,cols=bicols)
plotPCA(cdsControl,cols=c("red",bicols))
dev.off()
# Make tables for SparCC: all controls, expt61+expt62, noalpha, nobeta, nogamma, noproteo, alpharestore, betarestore, gammarestore, proteorestore
sparTabs <- list(control53=t(ds53$counts[,ds53$meta$initial=="ALL"]),
control61=t(ds61$counts[,ds61$meta$initial=="ALL"]),
control62=t(ds62$counts[,ds62$meta$initial=="ALL"])
)
for(name in names(sparTabs)){
write.table(t(sparTabs[[name]]),paste("network/",name,".tsv",sep=""),sep="\t")
}
# Pie chart of control community
cairo_pdf("figures/class_dropout_pie.pdf",width=7,height=7,family="Arial")
par(cex=1)
plotCommunityPie(control53,strainTaxa=names(phylumColors)[match(leafTaxonomy[rownames(control53),]$Color,phylumColors)],cols=phylumColors,taxLabels=names(phylumColors),sort=F)
dev.off()
cairo_pdf("figures/single_dropout_pie.pdf",width=7,height=7,family="Arial")
par(cex=1)
plotCommunityPie(control61,strainTaxa=names(phylumColors)[match(leafTaxonomy[rownames(control61),]$Color,phylumColors)],cols=phylumColors,taxLabels=names(phylumColors),sort=F)
plotCommunityPie(control62,strainTaxa=names(phylumColors)[match(leafTaxonomy[rownames(control62),]$Color,phylumColors)],cols=phylumColors,taxLabels=names(phylumColors),sort=F)
dev.off()
# Network analysis based on experiment 6 knockouts: 61, 62 and 6c
cds6c <- cds6[grepl("c$",names(cds6))]
names(cds6c) <- substr(names(cds6c),1,nchar(names(cds6c))-1)
names(cds6c) <- sub("no","Leaf",names(cds6c))
summary6 <- summariseResults(cds6c)
summary6$fcMatrix <- summary6$fcMatrix[strains,]
summary6$pcMatrix <- summary6$pvMatrix[strains,]
net6 <- igraphFromSummary(summary6$fcMatrix,summary6$pvMatrix,cutoff=0.01)
vertex_attr(net6,"shortName") <- sub("-.*","",vertex_attr(net6,"name"))
vertex_attr(net6,"twoLineName") <- sub("-","\n",vertex_attr(net6,"name"))
write.graph(net6,"results/network_001.gml","graphml")
# Including the inocula for time series comparison
inoc53 <- expt53raw[,which(meta53raw$Treatment=="ALL")]
inoc53 <- inoc53[order(rownames(inoc53)),]
inoc53 <- rbind(inoc53[!grepl("Unclass",rownames(inoc53)),],Unclassified=apply(inoc53[grepl("Unclass",rownames(inoc53)),],2,sum))
inocmeta53 <- meta53raw[which(meta53raw$Treatment=="ALL"),]
inocmeta53 <- cbind(experiment=53,inocmeta53[,1:4])
colnames(inocmeta53) <- c("experiment","initial","spray","repeat","time")
inocmeta53$initial <- factor(inocmeta53$initial,c("ALL"))
inocmeta53$spray <- factor(inocmeta53$spray,c("U","Mg","Inoc"))
# Turn t1 into t2
inocmeta53$time[inocmeta53$time=="t1"] <- "t2"
inoc61 <- expt61raw[,which(meta61raw$Treatment=="ALL")]
inoc61 <- inoc61[order(rownames(inoc61)),]
inoc61 <- rbind(inoc61[!grepl("Unclass",rownames(inoc61)),],Unclassified=apply(inoc61[grepl("Unclass",rownames(inoc61)),],2,sum))
inoc61 <- inoc61[rownames(inoc61)!="Leaf281",]
inocmeta61 <- meta61raw[which(meta61raw$Treatment=="ALL"),]
inocmeta61 <- cbind(experiment=61,inocmeta61[,1],"U",inocmeta61[,2:3])
colnames(inocmeta61) <- c("experiment","initial","spray","repeat","time")
inocmeta61$initial <- factor(inocmeta61$initial,c("ALL"))
inocmeta61$spray <- factor(inocmeta61$spray,c("U"))
inoc62 <- expt62raw[,which(meta62raw$Treatment=="ALL")]
inoc62 <- inoc62[order(rownames(inoc62)),]
inoc62 <- rbind(inoc62[!grepl("Unclass",rownames(inoc62)),],Unclassified=apply(inoc62[grepl("Unclass",rownames(inoc62)),],2,sum))
inocmeta62 <- meta62raw[which(meta62raw$Treatment=="ALL"),]
inocmeta62 <- cbind(experiment=62,inocmeta62[,1],"U",inocmeta62[,2:3])
colnames(inocmeta62) <- c("experiment","initial","spray","repeat","time")
inocmeta62$initial <- factor(inocmeta62$initial,c("ALL"))
inocmeta62$spray <- factor(inocmeta62$spray,c("U"))
ids53 <- list(counts=inoc53,meta=inocmeta53)
ids61 <- list(counts=inoc61,meta=inocmeta61)
ids62 <- list(counts=inoc62,meta=inocmeta62)
ids5361 <- list(counts=cbind(inoc53,inoc61),meta=rbind(inocmeta53,inocmeta61))
ids5362 <- list(counts=cbind(inoc53,inoc62),meta=rbind(inocmeta53,inocmeta62))
ids6162 <- list(counts=cbind(inoc61,inoc62),meta=rbind(inocmeta61,inocmeta62))
ids536162 <- list(counts=cbind(inoc53,inoc61,inoc62),meta=rbind(inocmeta53,inocmeta61,inocmeta62))
dds53 <- DESeqDataSetFromMatrix(ds53$counts[rownames(ds53$counts)!="Unclassified",ds53$meta$initial=="ALL"],ds53$meta[ds53$meta$initial=="ALL",],~1)
ddsi <- DESeqDataSetFromMatrix(ids53$counts[rownames(ids53$counts)!="Unclassified",ids53$meta$time=="t0"],ids53$meta[ids53$meta$time=="t0",],~1)
vsti <- assay(varianceStabilizingTransformation(ddsi))
vst53 <- assay(varianceStabilizingTransformation(dds53))
median53 <- apply(vst53,1,median)
mediani <- apply(vsti,1,median)
cairo_pdf("figures/class_dropout_winners_losers.pdf",width=40,height=10,family="Arial")
par(cex=1)
icds53 <- makeCDS(ids53,include=list(time=c("t0","t2")),foi="time",legend=c("Inoculum","Established Community"))
plotCommunityChanges(icds53,cutoff=cf,rowLabs=leafTaxonomy[rownames(ids53$counts),]$Name,subtitle="",cols=bicols,nBars=54)
dev.off()
cairo_pdf("figures/class_dropout_inocula.pdf",width=20,height=10,family="Arial")
par(cex=1)
plotCommunity(ids53$counts[1:62,ids53$meta$spray=="Inoc"],xlabels=leafTaxonomy[rownames(ids53$counts)[1:62],"Name"],xcols=leafTaxonomy[rownames(ids53$counts)[1:62],"Color"],type="points")
dev.off()
write.table(ids53$counts[1:62,ids53$meta$spray=="Inoc"],"results/inocula53.txt")
cairo_pdf("figures/single_dropout_inocula.pdf",width=20,height=10,onefile=T,family="Arial")
par(cex=1)
plotCommunity(ids61$counts[1:62,ids61$meta$time=="t0"],xlabels=leafTaxonomy[rownames(ids61$counts)[1:62],"Name"],xcols=leafTaxonomy[rownames(ids61$counts)[1:62],"Color"],type="points")
plotCommunity(ids62$counts[1:62,ids62$meta$time=="t0"],xlabels=leafTaxonomy[rownames(ids62$counts)[1:62],"Name"],xcols=leafTaxonomy[rownames(ids62$counts)[1:62],"Color"],type="points")
dev.off()
write.table(ids61$counts[1:62,ids61$meta$time=="t0"],"results/inocula61.txt")
write.table(ids62$counts[1:62,ids62$meta$time=="t0"],"results/inocula62.txt")
icds5361 <- makeCDS(ids5361,include=list(time=c("t0")),foi="experiment",title="",legend=c("Class Dropout Inoculum","Single Dropout R1 Inoculum"))
icds5362 <- makeCDS(ids5362,include=list(time=c("t0")),foi="experiment",title="",legend=c("Class Dropout Inoculum","Single Dropout R2 Inoculum"))
icds6162 <- makeCDS(ids6162,include=list(time=c("t0")),foi="experiment",title="",legend=c("Single Dropout R1 Inoculum","Single Dropout R2 Inoculum"))
icds536162 <- makeCDS(ids536162,include=list(time=c("t0")),foi="experiment",title="",legend=c("Class Dropout Inoculum","Single Dropout R1 Inoculum","Single Dropout R2 Inoculum"))
cairo_pdf("figures/inocula_comparison.pdf",width=14,height=14,family="Arial")
par(mfrow=c(2,2),cex=1)
x <- plotPCA(icds5361,cols=bicols)
x <- plotPCA(icds6162,cols=bicols)
x <- plotPCA(icds5362,cols=bicols)
x <- plotPCA(icds536162,cols=c("red",bicols))
dev.off()
# Function to permute a pearson correlation
permute.cor <- function(x,y,n){
creal <- cor(x,y)
clist <- c(creal,sapply(1:n,function(i) cor(sample(x,length(x)),sample(y,length(y)))))
p = sum(creal<clist)/n
return(p)
}
# Miscellaneous Correlations
# Strain abundance vs. effect size of single dropout
effects6 <- unlist(lapply(adn6,function(x) x$stats$aov.tab$R2[1]))
pvalues6 <- unlist(lapply(adn6,function(x) x$stats$aov.tab$Pr[1]))
e <- matrix(effects6[-c(1,2)],3)
rownames(e) <- c("Replicate 1","Replicate 2","Combined")
shortNames <- levels(ds6c$meta$initial)[-1]
colnames(e) <- leafTaxonomy[shortNames,]$Name
p <- matrix(pvalues6[-c(1,2)],3)
dds61 <- DESeqDataSetFromMatrix(ds61$counts[rownames(ds61$counts)!="Unclassified",ds61$meta$initial=="ALL"],ds61$meta[ds61$meta$initial=="ALL",],~1)
dds62 <- DESeqDataSetFromMatrix(ds62$counts[rownames(ds62$counts)!="Unclassified",ds62$meta$initial=="ALL"],ds62$meta[ds62$meta$initial=="ALL",],~1)
dds6162 <- DESeqDataSetFromMatrix(cbind(ds61$counts[rownames(ds61$counts)!="Unclassified",ds61$meta$initial=="ALL"],ds62$counts[rownames(ds62$counts)!="Unclassified",ds62$meta$initial=="ALL"]),rbind(ds61$meta[ds61$meta$initial=="ALL",],ds62$meta[ds62$meta$initial=="ALL",]),~1)
vst61 <- assay(varianceStabilizingTransformation(dds61))
vst62 <- assay(varianceStabilizingTransformation(dds62))
vst6162 <- assay(varianceStabilizingTransformation(dds6162))
median61 <- apply(vst61,1,median)
median62 <- apply(vst62,1,median)
median6162 <- apply(vst6162,1,median)
library(calibrate)
cairo_pdf("figures/correlations.pdf",width=7,height=7,onefile=T,family="Arial")
par(cex=1)
plot(100*e[1,],median61[colnames(fc6c)],xlab="Effect Size (%)",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="R1")
plot(100*e[1,],median61[colnames(fc6c)],xlab="Effect Size (%)",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="R1")
textxy(100*e[1,],median61[colnames(fc6c)],sub("eaf","",colnames(fc6c)))
plot(100*e[2,],median62[colnames(fc6c)],xlab="Effect Size (%)",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="R2")
plot(100*e[2,],median62[colnames(fc6c)],xlab="Effect Size (%)",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="R2")
textxy(100*e[2,],median62[colnames(fc6c)],sub("eaf","",colnames(fc6c)))
plot(100*e[3,],median6162[colnames(fc6c)],xlab="Effect Size (%)",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="Combined")
plot(100*e[3,],median6162[colnames(fc6c)],xlab="Effect Size (%)",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="Combined")
textxy(100*e[3,],median6162[colnames(fc6c)],sub("eaf","",colnames(fc6c)))
plot(mediani,median53,xlab="Inoculum Normalized Median Relative Abundance",ylab="Control Normalized Median Relative Abundance",pch=19,col=2)
plot(mediani,median53,xlab="Inoculum Normalized Median Relative Abundance",ylab="Control Normalized Median Relative Abundance",pch=19,col=2)
textxy(mediani,median53,sub("eaf","",names(median53)))
y = igraph::degree(net6)[colnames(e)]
yo = igraph::degree(net6,mode="out")[colnames(e)]
yi = igraph::degree(net6,mode="in")[colnames(e)]
x = 100*e[3,]
#plot(100*e[3,],igraph::degree(net6)[colnames(e)],xlab="Effect Size (%)",ylab="Node Degree",pch=19,col=2,main="Combined",sub=summary(lm(y~x))$r.squared)
#abline(lm(y~x))
#plot(100*e[3,],igraph::degree(net6)[colnames(e)],xlab="Effect Size (%)",ylab="Node Degree",pch=19,col=2,main="Combined")
#textxy(100*e[3,],igraph::degree(net6)[colnames(e)],sub("eaf","",rownames(leafTaxonomy)[match(colnames(e),leafTaxonomy$Name)]))
#abline(lm(y~x))
plot(x,yo,xlab="Effect Size (%)",ylab="Node Out Degree",pch=19,col=2,main="Combined",sub=summary(lm(yo~x))$r.squared)
textxy(x,yo,sub("eaf","",rownames(leafTaxonomy)[match(colnames(e),leafTaxonomy$Name)]))
abline(lm(yo~x))
#plot(100*e[3,],yi,xlab="Effect Size (%)",ylab="Node In Degree",pch=19,col=2,main="Combined",sub=summary(lm(yi~x))$r.squared)
#abline(lm(yi~x))
dev.off()
# Output some data to file
write.table(cbind(NMRA=median6162[colnames(fc6c)],ESize=100*e[3,]),"NMRA-ES.txt")
fc61 <- fcc[,seq(1,ncol(fcc),2)]
fc62 <- fcc[,1+seq(1,ncol(fcc),2)]
colnames(fc61) <- colnames(fc6c)
colnames(fc62) <- colnames(fc6c)
for(x in colnames(fc6c)){
fc6c[x,x] <- 0
fc61[x,x] <- 0
fc62[x,x] <- 0
}
fccsums <- apply(fc6c,1,function(x) sum(abs(x)))
fc61sums <- apply(fc61,1,function(x) sum(abs(x)))
fc62sums <- apply(fc62,1,function(x) sum(abs(x)))
cairo_pdf("figures/correlation2.pdf",width=7,height=7,onefile=T,family="Arial")
par(cex=1)
plot(fc61sums,median61,xlab="Total Fold Changes",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="R1")
plot(fc61sums,median61,xlab="Total Fold Changes",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="R1")
textxy(fc61sums,median61,sub("eaf","",names(median61)))
plot(fc62sums,median62,xlab="Total Fold Changes",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="R1")
plot(fc62sums,median62,xlab="Total Fold Changes",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="R1")
textxy(fc62sums,median62,sub("eaf","",names(median62)))
plot(fccsums,median6162,xlab="Total Fold Changes",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="Combined")
plot(fccsums,median6162,xlab="Total Fold Changes",ylab="Normalized Median Relative Abundance",pch=19,col=2,main="Combined")
textxy(fccsums,median6162,sub("eaf","",names(median6162)))
dev.off()
# Plot the fancy figure
summary <- summariseResults(cds6c)
summary$fcMatrix <- summary$fcMatrix[-nrow(summary$fcMatrix),]
summary$pvMatrix <- summary$pvMatrix[-nrow(summary$pvMatrix),]
cairo_pdf("figures/fancy.pdf",width=14,height=14,family="Arial")
plotBipartiteSummary(summary$fcMatrix,summary$pvMatrix,leftPhylo=as.phylo(hclust(dist(t(summary$fcMatrix)),method="ward.D")),rightPhylo=as.phylo(hclust(dist(summary$fcMatrix),method="ward.D")),leftLabs=leafTaxonomy[colnames(summary$fcMatrix),]$Name,rightLabs=leafTaxonomy[rownames(summary$fcMatrix),]$Name,leftCols=leafTaxonomy[colnames(summary$fcMatrix),]$Color,rightCols=leafTaxonomy[rownames(summary$fcMatrix),]$Color,cutoff=0.01,tip.label.width=0.3)
plotBipartiteSummary(summary$fcMatrix,summary$pvMatrix,leftLabs=leafTaxonomy[colnames(summary$fcMatrix),]$Name,rightLabs=leafTaxonomy[rownames(summary$fcMatrix),]$Name,leftPhylo=keep.tip(tree,colnames(summary$fcMatrix)),rightPhylo=tree,leftCols=leafTaxonomy[colnames(summary$fcMatrix),]$Color,rightCols=leafTaxonomy[rownames(summary$fcMatrix),]$Color,cutoff=0.01,tip.label.width=0.3)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ruin_probability.R
\name{ruin_probability}
\alias{ruin_probability}
\title{Estimate a ruin probability for a finite time horizon}
\usage{
ruin_probability(model, time_horizon, simulation_number = NULL,
ci_level = NULL, parallel = NULL, return_paths = NULL)
}
\arguments{
\item{model}{an S4 object indicating a risk model (e.g.,
\linkS4class{CramerLundberg}).}
\item{time_horizon}{a length one numeric finite vector specifying the time at
which the ruin probability should be estimated.}
\item{simulation_number}{a length one numeric vector giving the number of
simulations that should be performed. Default: \code{10000}.}
\item{ci_level}{a length one numeric vector between 0 and 1 indicating the
level of the confidence interval of the ruin probability. Default:
\code{0.95}.}
\item{parallel}{a length one logical vector indicating whether the parallel
computing should be used. Default: \code{TRUE}.}
\item{return_paths}{a length one logical vector indicating whether a list of
simulated paths should be returned. Default: \code{FALSE}.}
}
\value{
A list of two elements: a numeric vector of lower bound of CI,
estimate, and upper bound of CI of the ruin probability; and optionally the
list of simulated paths.
}
\description{
\code{ruin_probability} simulates paths for a given risk \code{model} and
returns a crude Monte-Carlo estimate of the ruin probability for the finite
time horizon.
}
\details{
The function uses a parallel computing from the package
\code{\link{parallel}} (if \code{parallel} is TRUE). The package sets up
\code{\link{RNGkind}} to \code{"L'Ecuyer-CMRG"} for a safe seeding (see
\code{\link{nextRNGStream}}) when it is loaded, so that user does not have
to take care of seeds / RNGs. Further, the function allows computing the
confidence interval, assuming the normal distribution of the ruin
probability (thanks to CLT).
}
\examples{
model <- CramerLundberg(initial_capital = 0,
premium_rate = 1,
claim_poisson_arrival_rate = 1,
claim_size_generator = rexp,
claim_size_parameters = list(rate = 1))
ruin_probability(model = model,
time_horizon = 10,
simulation_number = 100,
return_paths = TRUE,
parallel = FALSE)
}
| /man/ruin_probability.Rd | no_license | irudnyts/ruin | R | false | true | 2,395 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ruin_probability.R
\name{ruin_probability}
\alias{ruin_probability}
\title{Estimate a ruin probability for a finite time horizon}
\usage{
ruin_probability(model, time_horizon, simulation_number = NULL,
ci_level = NULL, parallel = NULL, return_paths = NULL)
}
\arguments{
\item{model}{an S4 object indicating a risk model (e.g.,
\linkS4class{CramerLundberg}).}
\item{time_horizon}{a length one numeric finite vector specifying the time at
which the ruin probability should be estimated.}
\item{simulation_number}{a length one numeric vector giving the number of
simulations that should be performed. Default: \code{10000}.}
\item{ci_level}{a length one numeric vector between 0 and 1 indicating the
level of the confidence interval of the ruin probability. Default:
\code{0.95}.}
\item{parallel}{a length one logical vector indicating whether the parallel
computing should be used. Default: \code{TRUE}.}
\item{return_paths}{a length one logical vector indicating whether a list of
simulated paths should be returned. Default: \code{FALSE}.}
}
\value{
A list of two elements: a numeric vector of lower bound of CI,
estimate, and upper bound of CI of the ruin probability; and optionally the
list of simulated paths.
}
\description{
\code{ruin_probability} simulates paths for a given risk \code{model} and
returns a crude Monte-Carlo estimate of the ruin probability for the finite
time horizon.
}
\details{
The function uses a parallel computing from the package
\code{\link{parallel}} (if \code{parallel} is TRUE). The package sets up
\code{\link{RNGkind}} to \code{"L'Ecuyer-CMRG"} for a safe seeding (see
\code{\link{nextRNGStream}}) when it is loaded, so that user does not have
to take care of seeds / RNGs. Further, the function allows computing the
confidence interval, assuming the normal distribution of the ruin
probability (thanks to CLT).
}
\examples{
model <- CramerLundberg(initial_capital = 0,
premium_rate = 1,
claim_poisson_arrival_rate = 1,
claim_size_generator = rexp,
claim_size_parameters = list(rate = 1))
ruin_probability(model = model,
time_horizon = 10,
simulation_number = 100,
return_paths = TRUE,
parallel = FALSE)
}
|
#***********************************************
# parts of algorithm:
# * draw Z
# * draw theta2
# * draw theta1 (centering!)
# * draw sigma2
# * draw sigma1
# * draw b_class
# * draw b
# * draw variance(b_class)
# * draw a_class
# * draw a
# * draw variance(a_class)
#********************************************
#############################################
# compute group mean by the rowsum function
.mcmc.groupmean <- function( matr , group , groupsize=NULL ){
r1 <- rowsum( matr , group )
if ( is.null(groupsize) ){
groupsize <- rowsum( 1+0*matr[,1] , group )[,1]
}
r1 / groupsize
}
############################################
####################################################
# draw latent responses Z
.draw.Z.2pno.ml <- function( aM , bM, theta , N , I , threshlow , threshupp ){
# calculate means
mij <- aM * theta - bM
# simulate uniform data
rij <- matrix( stats::runif( N*I ) , nrow=N , ncol=I )
# calculate corresponding value
pl <- stats::pnorm( threshlow , mean=mij)
pu <- stats::pnorm( threshupp , mean=mij)
pij <- pl + (pu-pl)*rij
# simulate Z
Zij <- stats::qnorm( pij , mean = mij )
return(Zij)
}
#############################################
########################################
# draw theta = theta.L2 + theta.L1
# here, the "total" theta is sampled!
.draw.theta.2pno.ml <- function( aM , b , bM , N , I , Z ,
sigma1 , sigma2 , sigma.res , link , theta2 , idgroup ){
#**************************************
# link = logit
# Z = aM * theta - bM + eps
# Z + bM = aM * theta + eps
# bM <- matrix(b,N,I,byrow=TRUE )
if (link=="logit"){
# vtheta <- 1 / ( rowSums( aM^2 ) + ( sigma1^2+sigma2^2 ) )
# vtheta <- 1 / ( rowSums( aM^2 ) + 1/( sigma1^2+sigma2^2 ) )
# mtheta <- rowSums( aM * ( Z + bM ) ) * vtheta
Zres <- Z + bM
m1ij <- rowSums( aM * Zres )
prec1 <- rowSums(aM^2 )
m1ij <- m1ij / prec1
m2ij <- theta2[ idgroup ]
# prec2 <- 1 / ( sigma1^2 + sigma2^2 )
prec2 <- 1 / ( sigma1^2)
} # end logit
#***************************************
# link = normal
if (link == "normal"){
# print(sigma.res)
sigma.resM <- matrix( sigma.res , N , I , byrow=TRUE)
Zres <- Z + bM
# correct precision and estimator
# something seems to go wrong here
# m1ij <- rowSums( aM * Zres * sigma.resM)
m1ij <- rowSums( aM * Zres / sigma.resM^2 )
m2ij <- theta2[ idgroup ]
# prec1 <- rowSums( aM^2 * sigma.resM^2 )
prec1 <- rowSums( aM^2 / sigma.resM^2 )
m1ij <- m1ij / prec1
prec2 <- 1 / ( sigma1^2 )
} # end normal
prectotal <- prec1 + prec2
mtheta <- ( prec1*m1ij + prec2 * m2ij ) / prectotal
vtheta <- 1 / prectotal
theta <- stats::rnorm( N , mean=mtheta , sd = sqrt( vtheta ) )
theta <- theta - mean(theta)
return(theta)
}
#########################################
# draw level 2 latent class mean
.draw.theta2.2pno.ml <- function( theta , idgroup , groupsize ,
sigma1 , sigma2 , G ){
# compute latent mean
mij1 <- .mcmc.groupmean( theta , idgroup , groupsize)[,1]
prec.ij1 <- 1 / ( sigma1^2 / groupsize )
prec.ij2 <- 1 / sigma2^2
prec.tot <- prec.ij1 + prec.ij2
vtheta <- 1 / prec.tot
mtheta <- mij1 * prec.ij1 / prec.tot
theta2 <- stats::rnorm( G , mean=mtheta , sd = sqrt( vtheta ))
theta2 <- theta2 - mean(theta2)
return(theta2)
}
######################################
############################################################
# draw level 1 and level 2 variances
.draw.sigma12.2pno.ml <- function( theta , theta2 , idgroup , N ,
G , prior.sigma2 ){
# level 1 variance
sig2 <- sum( (theta - theta2[ idgroup ])^2 ) / N
sigma1 <- sqrt( .mcmc.draw.variance( 1 , w0= 1 , sig02= .7 , n=N , sig2=sig2 ) )
# level 2 variance
sig2 <- sum( theta2^2 ) / G
sigma2 <- sqrt( .mcmc.draw.variance( 1 ,
w0= prior.sigma2[1] , sig02= prior.sigma2[2]^2 , n=G , sig2=sig2 ) )
res <- list( "sigma1"=sigma1 , "sigma2" = sigma2 )
return(res)
}
######################################################
# b item parameter single level case
.draw.est.b.sl <- function( Z , aM , theta , N , I , omega.b , mu.b ,
sigma.res , link ){
# Z = aM * theta - bM + eps
# => bM = Z - aM*theta - eps
Zres <- Z - aM * theta
mij1 <- -colMeans(Zres)
#****
# link = logit
if (link=="logit"){
prec1ij <- N
prec2ij <- 1 / omega.b^2
prectotal <- prec1ij + prec2ij
mij <- ( mij1 * prec1ij + mu.b * prec2ij ) / prectotal
vij <- 1 / prectotal
}
#***
# link = normal
if (link=="normal"){
prec1ij <- N / sigma.res^2
prec2ij <- 1 / omega.b^2
prectotal <- prec1ij + prec2ij
mij <- ( mij1 * prec1ij + mu.b * prec2ij ) / prectotal
vij <- 1 / prectotal
} # end link=normal
#*** sample b parameter
b <- stats::rnorm( I , mean=mij , sd = sqrt( vij ) )
return(b)
}
####################################################
# draw hyperparameters of b
.draw.est.b.hyperpars <- function( b, mu.b , omega.b , I ,
prior.omega.b , est.b.M ){
if ( est.b.M=="h"){
# sample mu.b
mij <- mean(b)
vij <- omega.b^2 / I
mu.b <- stats::rnorm(1 , mean=mij , sd = sqrt(vij) )
# sample omega.b
sig2 <- sum( ( b - mu.b )^2 ) / I
omega.b <- sqrt( .mcmc.draw.variance( 1 ,
w0= prior.omega.b[1] , sig02= prior.omega.b[2]^2 ,
n=I , sig2=sig2 ) )
}
res <- list( "mu.b"=mu.b , "omega.b" = omega.b )
return(res)
}
#####################################################
# sampling of b in case of multilevel DIF
.mcmc.est.b.2pno.ml.v2 <- function( N , Z , aM , theta , idgroup , groupsize ,
b , bG , G , I , sigma.b , omega.b , mu.b , sigma.res , link ){
# sampling of b (fixed item difficulties)
# Z = aM * theta - b - bG + eps
# Z - aM * theta + bG = - b + eps
Zres <- Z - aM * theta + bG[ idgroup , ]
# Zres <- Z - aM * theta
mij1 <- -colMeans(Zres)
#****
# link = logit
if (link=="logit"){
prec1ij <- N
prec2ij <- 1 / omega.b^2
prectotal <- prec1ij + prec2ij
mij <- ( mij1 * prec1ij + mu.b * prec2ij ) / prectotal
vij <- 1 / prectotal
}
#***
# link = normal
if (link=="normal"){
prec1ij <- N / sigma.res^2
prec2ij <- 1 / omega.b^2
prectotal <- prec1ij + prec2ij
mij <- ( mij1 * prec1ij + mu.b * prec2ij ) / prectotal
vij <- 1 / prectotal
} # end link=normal
#*** sample b parameter
b <- stats::rnorm( I , mean=mij , sd = sqrt( vij ) )
return(b)
}
####################################################################
#########################################################
# estimation of b for clusters
.mcmc.est.b.group.2pno.ml <- function( Z , aM , theta , idgroup , groupsize ,
b , G , I , sigma.b , sigma.res , link , N ){
#*****
# Z = a * theta - b - bG + eps
# => - b = Z - a * theta + bG + eps
bM1 <- matrix( b , nrow=N , ncol=I , byrow=TRUE )
Zres <- Z - aM * theta + bM1
# compute means
mij1 <- - .mcmc.groupmean( matr=Zres , group=idgroup , groupsize )
mij2 <- matrix( 0 , G , I , byrow=TRUE )
# compute precisions
prec1 <- matrix( groupsize , G , I , byrow=FALSE) / 1
if (link == "normal"){
prec1 <- prec1 / matrix( sigma.res^2 , G , I , byrow=TRUE )
}
prec2 <- 1 / matrix( sigma.b^2 , G , I , byrow=TRUE )
prectot <- prec1 + prec2
# compute total means
mtot <- ( mij1*prec1 + mij2*prec2 ) / prectot
vtot <- 1 / prectot
# sampling of bG
bG <- matrix( stats::rnorm( G*I , mean=mtot , sd = sqrt(vtot) ) , G , I )
# adjustment
# bG1 <- rowMeans( bG )
# bG1 <- bG1 - mean(bG1)
# bG <- bG - bG1
# bG1 <- colMeans( bG )
bG <- as.matrix( base::scale( bG , scale=FALSE ) )
return(bG)
}
##################################################################
# estimation of hierarchical distribution
.mcmc.sigma.b.2pno.ml <- function( bG , mu.b , omega.b , G , I ,
est.b.Var , prior.sigma.b , sigma.b ){
#*****
# draw item group standard deviations
bresG <- bG
if ( est.b.Var == "i"){
sig2b <- colSums( bresG^2 ) / G
sigma.b <- sqrt( .mcmc.draw.variance( I ,
w0= prior.sigma.b[1] , sig02= prior.sigma.b[2]^2 ,
n=G , sig2=sig2b ) )
}
if ( est.b.Var == "j"){
sig2b <- sum( bresG^2 ) / (G*I)
sigma.b <- sqrt( .mcmc.draw.variance( 1 ,
w0= prior.sigma.b[1] , sig02= prior.sigma.b[2]^2 ,
n=G*I , sig2=sig2b ) )
sigma.b <- rep( sigma.b , I )
}
return(sigma.b)
}
##################################################################
# sampling of a parameters
.draw.est.a.sl <- function( Z , bM , theta , mu.a , omega.a , I ,
sigma.res , link ){
# Z = a * theta - b + eps
# => Z + b = a*theta + eps
# a is obtained as a regression estimate
eps <- .01
Zres <- Z + bM
h1 <- sum( theta^2 )
h2 <- colSums( Zres * theta )
# calculate means
m1ij <- h2 / h1
m2ij <- mu.a
# calculate precisions
prec1 <- h1 * 1 # (X'X)^(-1) * sigma^2_{res}
if ( link == "normal"){
prec1 <- prec1 / sigma.res^2
}
prec2 <- 1 / omega.a^2
prectotal <- prec1 + prec2
# define mean and variance of posterior
m1 <- ( m1ij * prec1 + m2ij * prec2 ) / prectotal
# sampling of a
a <- stats::rnorm( I , mean=m1 , sd = 1/sqrt(prectotal) )
# a <- a - ( mean(a) - 1 )
a[ a < 0 ] <- eps
a <- exp( log(a) - mean( log( a ) ) )
# a <- a / prod(a)
return(a)
}
######################################################################
# draw hyperparameters of a
.draw.est.a.hyperpars <- function( a, mu.a , omega.a , I ,
prior.omega.a , est.a.M ){
if ( est.a.M=="h"){
# set mu.a to one
mu.a <- 1
# sample omega.a
sig2 <- sum( ( a - mu.a )^2 ) / I
omega.a <- sqrt( .mcmc.draw.variance( 1 ,
w0= prior.omega.a[1] , sig02= prior.omega.a[2]^2 ,
n=I , sig2=sig2 ) )
}
res <- list( "mu.a"=mu.a , "omega.a" = omega.a )
return(res)
}
###################################################################
# sampling of a parameters
.mcmc.a.est.a.2pno.ml <- function( Z , bM , aG , idgroup , theta ,
mu.a , omega.a , I , link , sigma.res ){
# Z = a * theta + aG*theta - bM + eps
# => Z + bM - aG * theta = a*theta + eps
# a is obtained as a regression estimate
Zres <- Z + bM - aG[ idgroup , ] * theta
h1 <- sum( theta^2 )
h2 <- colSums( Zres * theta )
# calculate means
m1ij <- h2 / h1
m2ij <- mu.a
# calculate precisions
prec1 <- h1 * 1 # (X'X)^(-1) * sigma^2_{res}
if (link=="normal"){
prec1 <- prec1 / sigma.res^2
}
prec2 <- 1 / omega.a^2
prectotal <- prec1 + prec2
# define mean and variance of posterior
m1 <- ( m1ij * prec1 + m2ij * prec2 ) / prectotal
# sampling of a
a <- stats::rnorm( I , mean=m1 , sd = 1/sqrt(prectotal) )
a <- a - ( mean(a) - 1 )
return(a)
}
###################################################################
########################################################################
# draw a parameters group wise
.mcmc.est.aG.2pno.ml.v2 <- function( Z , bM , theta , idgroup , G , I ,
a , sigma.a , N , link , sigma.res ){
#****
# Z = a * theta + aG*theta - bM + eps
# Z + bM - a*theta = aG * theta + eps
aM1 <- matrix( a , N , I , byrow=TRUE )
Zres <- Z + bM - aM1*theta
# calculate means of a parameters
theta2l <- rowsum( theta^2 , idgroup )[,1]
Zrestheta <- rowsum( Zres*theta , idgroup )
m1ij <- Zrestheta / theta2l
m2ij <- matrix( 0 , G , I , byrow=TRUE )
# calculate precisions
prec1 <- matrix( theta2l , G , I )
if (link=="normal"){
prec1 <- prec1 / sigma.res^2
}
# take sigma.res into account!!
prec2 <- matrix( 1 / sigma.a^2 , G , I , byrow=TRUE )
prectotal <- prec1 + prec2
m1 <- ( m1ij*prec1 + m2ij * prec2 ) / prectotal
aG <- matrix( stats::rnorm(G*I , mean = m1 , sd = sqrt( 1 / prectotal )) , G , I )
# center aG parameters within each group
# aG <- aG - ( rowMeans( aG ) - 0 )
aG <- scale( aG , scale=FALSE)
return(aG)
}
#########################################################
# sampling from hierarchical a distribution
.mcmc.a.grouphier.2pno.ml <- function( aG , mu.a , G , omega.a , I ,
prior.sigma.a , est.a.Var , sigma.a ){
#***
# draw item group standard deviations
aresG <- aG
if ( est.a.Var == "i"){
sig2b <- colSums( aresG^2 ) / G
sigma.a <- sqrt( .mcmc.draw.variance( I ,
w0= prior.sigma.a[1] , sig02= prior.sigma.a[2]^2 ,
n=G , sig2=sig2b ) )
}
if ( est.a.Var == "j"){
sig2b <- sum( aresG^2 ) / (G*I)
sigma.a <- sqrt( .mcmc.draw.variance( 1 ,
w0= prior.sigma.a[1] , sig02= prior.sigma.a[2]^2 ,
n=G*I , sig2=sig2b ) )
sigma.a <- rep( sigma.a , I )
}
return(sigma.a)
}
####################################################################
# draw residual standard deviations
.draw.sigma.res.2pno.ml <- function( Z , aM , bM , theta , N , I ){
# Z = a * theta - b + eps
Zres <- Z - aM * theta + bM
sig2 <- colSums( Zres^2 ) / N
sigma.res <- sqrt( .mcmc.draw.variance( I ,
w0= .001 , sig02= 1,
n=N , sig2=sig2 ) )
return(sigma.res)
}
| /sirt/R/mcmc.2pno.ml_alg.R | no_license | ingted/R-Examples | R | false | false | 13,541 | r |
#***********************************************
# parts of algorithm:
# * draw Z
# * draw theta2
# * draw theta1 (centering!)
# * draw sigma2
# * draw sigma1
# * draw b_class
# * draw b
# * draw variance(b_class)
# * draw a_class
# * draw a
# * draw variance(a_class)
#********************************************
#############################################
# compute group mean by the rowsum function
.mcmc.groupmean <- function( matr , group , groupsize=NULL ){
r1 <- rowsum( matr , group )
if ( is.null(groupsize) ){
groupsize <- rowsum( 1+0*matr[,1] , group )[,1]
}
r1 / groupsize
}
############################################
####################################################
# draw latent responses Z
.draw.Z.2pno.ml <- function( aM , bM, theta , N , I , threshlow , threshupp ){
# calculate means
mij <- aM * theta - bM
# simulate uniform data
rij <- matrix( stats::runif( N*I ) , nrow=N , ncol=I )
# calculate corresponding value
pl <- stats::pnorm( threshlow , mean=mij)
pu <- stats::pnorm( threshupp , mean=mij)
pij <- pl + (pu-pl)*rij
# simulate Z
Zij <- stats::qnorm( pij , mean = mij )
return(Zij)
}
#############################################
########################################
# draw theta = theta.L2 + theta.L1
# here, the "total" theta is sampled!
.draw.theta.2pno.ml <- function( aM , b , bM , N , I , Z ,
sigma1 , sigma2 , sigma.res , link , theta2 , idgroup ){
#**************************************
# link = logit
# Z = aM * theta - bM + eps
# Z + bM = aM * theta + eps
# bM <- matrix(b,N,I,byrow=TRUE )
if (link=="logit"){
# vtheta <- 1 / ( rowSums( aM^2 ) + ( sigma1^2+sigma2^2 ) )
# vtheta <- 1 / ( rowSums( aM^2 ) + 1/( sigma1^2+sigma2^2 ) )
# mtheta <- rowSums( aM * ( Z + bM ) ) * vtheta
Zres <- Z + bM
m1ij <- rowSums( aM * Zres )
prec1 <- rowSums(aM^2 )
m1ij <- m1ij / prec1
m2ij <- theta2[ idgroup ]
# prec2 <- 1 / ( sigma1^2 + sigma2^2 )
prec2 <- 1 / ( sigma1^2)
} # end logit
#***************************************
# link = normal
if (link == "normal"){
# print(sigma.res)
sigma.resM <- matrix( sigma.res , N , I , byrow=TRUE)
Zres <- Z + bM
# correct precision and estimator
# something seems to go wrong here
# m1ij <- rowSums( aM * Zres * sigma.resM)
m1ij <- rowSums( aM * Zres / sigma.resM^2 )
m2ij <- theta2[ idgroup ]
# prec1 <- rowSums( aM^2 * sigma.resM^2 )
prec1 <- rowSums( aM^2 / sigma.resM^2 )
m1ij <- m1ij / prec1
prec2 <- 1 / ( sigma1^2 )
} # end normal
prectotal <- prec1 + prec2
mtheta <- ( prec1*m1ij + prec2 * m2ij ) / prectotal
vtheta <- 1 / prectotal
theta <- stats::rnorm( N , mean=mtheta , sd = sqrt( vtheta ) )
theta <- theta - mean(theta)
return(theta)
}
#########################################
# draw level 2 latent class mean
.draw.theta2.2pno.ml <- function( theta , idgroup , groupsize ,
sigma1 , sigma2 , G ){
# compute latent mean
mij1 <- .mcmc.groupmean( theta , idgroup , groupsize)[,1]
prec.ij1 <- 1 / ( sigma1^2 / groupsize )
prec.ij2 <- 1 / sigma2^2
prec.tot <- prec.ij1 + prec.ij2
vtheta <- 1 / prec.tot
mtheta <- mij1 * prec.ij1 / prec.tot
theta2 <- stats::rnorm( G , mean=mtheta , sd = sqrt( vtheta ))
theta2 <- theta2 - mean(theta2)
return(theta2)
}
######################################
############################################################
# draw level 1 and level 2 variances
.draw.sigma12.2pno.ml <- function( theta , theta2 , idgroup , N ,
G , prior.sigma2 ){
# level 1 variance
sig2 <- sum( (theta - theta2[ idgroup ])^2 ) / N
sigma1 <- sqrt( .mcmc.draw.variance( 1 , w0= 1 , sig02= .7 , n=N , sig2=sig2 ) )
# level 2 variance
sig2 <- sum( theta2^2 ) / G
sigma2 <- sqrt( .mcmc.draw.variance( 1 ,
w0= prior.sigma2[1] , sig02= prior.sigma2[2]^2 , n=G , sig2=sig2 ) )
res <- list( "sigma1"=sigma1 , "sigma2" = sigma2 )
return(res)
}
######################################################
# b item parameter single level case
.draw.est.b.sl <- function( Z , aM , theta , N , I , omega.b , mu.b ,
sigma.res , link ){
# Z = aM * theta - bM + eps
# => bM = Z - aM*theta - eps
Zres <- Z - aM * theta
mij1 <- -colMeans(Zres)
#****
# link = logit
if (link=="logit"){
prec1ij <- N
prec2ij <- 1 / omega.b^2
prectotal <- prec1ij + prec2ij
mij <- ( mij1 * prec1ij + mu.b * prec2ij ) / prectotal
vij <- 1 / prectotal
}
#***
# link = normal
if (link=="normal"){
prec1ij <- N / sigma.res^2
prec2ij <- 1 / omega.b^2
prectotal <- prec1ij + prec2ij
mij <- ( mij1 * prec1ij + mu.b * prec2ij ) / prectotal
vij <- 1 / prectotal
} # end link=normal
#*** sample b parameter
b <- stats::rnorm( I , mean=mij , sd = sqrt( vij ) )
return(b)
}
####################################################
# draw hyperparameters of b
.draw.est.b.hyperpars <- function( b, mu.b , omega.b , I ,
prior.omega.b , est.b.M ){
if ( est.b.M=="h"){
# sample mu.b
mij <- mean(b)
vij <- omega.b^2 / I
mu.b <- stats::rnorm(1 , mean=mij , sd = sqrt(vij) )
# sample omega.b
sig2 <- sum( ( b - mu.b )^2 ) / I
omega.b <- sqrt( .mcmc.draw.variance( 1 ,
w0= prior.omega.b[1] , sig02= prior.omega.b[2]^2 ,
n=I , sig2=sig2 ) )
}
res <- list( "mu.b"=mu.b , "omega.b" = omega.b )
return(res)
}
#####################################################
# sampling of b in case of multilevel DIF
.mcmc.est.b.2pno.ml.v2 <- function( N , Z , aM , theta , idgroup , groupsize ,
b , bG , G , I , sigma.b , omega.b , mu.b , sigma.res , link ){
# sampling of b (fixed item difficulties)
# Z = aM * theta - b - bG + eps
# Z - aM * theta + bG = - b + eps
Zres <- Z - aM * theta + bG[ idgroup , ]
# Zres <- Z - aM * theta
mij1 <- -colMeans(Zres)
#****
# link = logit
if (link=="logit"){
prec1ij <- N
prec2ij <- 1 / omega.b^2
prectotal <- prec1ij + prec2ij
mij <- ( mij1 * prec1ij + mu.b * prec2ij ) / prectotal
vij <- 1 / prectotal
}
#***
# link = normal
if (link=="normal"){
prec1ij <- N / sigma.res^2
prec2ij <- 1 / omega.b^2
prectotal <- prec1ij + prec2ij
mij <- ( mij1 * prec1ij + mu.b * prec2ij ) / prectotal
vij <- 1 / prectotal
} # end link=normal
#*** sample b parameter
b <- stats::rnorm( I , mean=mij , sd = sqrt( vij ) )
return(b)
}
####################################################################
#########################################################
# estimation of b for clusters
.mcmc.est.b.group.2pno.ml <- function( Z , aM , theta , idgroup , groupsize ,
b , G , I , sigma.b , sigma.res , link , N ){
#*****
# Z = a * theta - b - bG + eps
# => - b = Z - a * theta + bG + eps
bM1 <- matrix( b , nrow=N , ncol=I , byrow=TRUE )
Zres <- Z - aM * theta + bM1
# compute means
mij1 <- - .mcmc.groupmean( matr=Zres , group=idgroup , groupsize )
mij2 <- matrix( 0 , G , I , byrow=TRUE )
# compute precisions
prec1 <- matrix( groupsize , G , I , byrow=FALSE) / 1
if (link == "normal"){
prec1 <- prec1 / matrix( sigma.res^2 , G , I , byrow=TRUE )
}
prec2 <- 1 / matrix( sigma.b^2 , G , I , byrow=TRUE )
prectot <- prec1 + prec2
# compute total means
mtot <- ( mij1*prec1 + mij2*prec2 ) / prectot
vtot <- 1 / prectot
# sampling of bG
bG <- matrix( stats::rnorm( G*I , mean=mtot , sd = sqrt(vtot) ) , G , I )
# adjustment
# bG1 <- rowMeans( bG )
# bG1 <- bG1 - mean(bG1)
# bG <- bG - bG1
# bG1 <- colMeans( bG )
bG <- as.matrix( base::scale( bG , scale=FALSE ) )
return(bG)
}
##################################################################
# estimation of hierarchical distribution
.mcmc.sigma.b.2pno.ml <- function( bG , mu.b , omega.b , G , I ,
est.b.Var , prior.sigma.b , sigma.b ){
#*****
# draw item group standard deviations
bresG <- bG
if ( est.b.Var == "i"){
sig2b <- colSums( bresG^2 ) / G
sigma.b <- sqrt( .mcmc.draw.variance( I ,
w0= prior.sigma.b[1] , sig02= prior.sigma.b[2]^2 ,
n=G , sig2=sig2b ) )
}
if ( est.b.Var == "j"){
sig2b <- sum( bresG^2 ) / (G*I)
sigma.b <- sqrt( .mcmc.draw.variance( 1 ,
w0= prior.sigma.b[1] , sig02= prior.sigma.b[2]^2 ,
n=G*I , sig2=sig2b ) )
sigma.b <- rep( sigma.b , I )
}
return(sigma.b)
}
##################################################################
# sampling of a parameters
.draw.est.a.sl <- function( Z , bM , theta , mu.a , omega.a , I ,
sigma.res , link ){
# Z = a * theta - b + eps
# => Z + b = a*theta + eps
# a is obtained as a regression estimate
eps <- .01
Zres <- Z + bM
h1 <- sum( theta^2 )
h2 <- colSums( Zres * theta )
# calculate means
m1ij <- h2 / h1
m2ij <- mu.a
# calculate precisions
prec1 <- h1 * 1 # (X'X)^(-1) * sigma^2_{res}
if ( link == "normal"){
prec1 <- prec1 / sigma.res^2
}
prec2 <- 1 / omega.a^2
prectotal <- prec1 + prec2
# define mean and variance of posterior
m1 <- ( m1ij * prec1 + m2ij * prec2 ) / prectotal
# sampling of a
a <- stats::rnorm( I , mean=m1 , sd = 1/sqrt(prectotal) )
# a <- a - ( mean(a) - 1 )
a[ a < 0 ] <- eps
a <- exp( log(a) - mean( log( a ) ) )
# a <- a / prod(a)
return(a)
}
######################################################################
# draw hyperparameters of a
.draw.est.a.hyperpars <- function( a, mu.a , omega.a , I ,
prior.omega.a , est.a.M ){
if ( est.a.M=="h"){
# set mu.a to one
mu.a <- 1
# sample omega.a
sig2 <- sum( ( a - mu.a )^2 ) / I
omega.a <- sqrt( .mcmc.draw.variance( 1 ,
w0= prior.omega.a[1] , sig02= prior.omega.a[2]^2 ,
n=I , sig2=sig2 ) )
}
res <- list( "mu.a"=mu.a , "omega.a" = omega.a )
return(res)
}
###################################################################
# sampling of a parameters
.mcmc.a.est.a.2pno.ml <- function( Z , bM , aG , idgroup , theta ,
mu.a , omega.a , I , link , sigma.res ){
# Z = a * theta + aG*theta - bM + eps
# => Z + bM - aG * theta = a*theta + eps
# a is obtained as a regression estimate
Zres <- Z + bM - aG[ idgroup , ] * theta
h1 <- sum( theta^2 )
h2 <- colSums( Zres * theta )
# calculate means
m1ij <- h2 / h1
m2ij <- mu.a
# calculate precisions
prec1 <- h1 * 1 # (X'X)^(-1) * sigma^2_{res}
if (link=="normal"){
prec1 <- prec1 / sigma.res^2
}
prec2 <- 1 / omega.a^2
prectotal <- prec1 + prec2
# define mean and variance of posterior
m1 <- ( m1ij * prec1 + m2ij * prec2 ) / prectotal
# sampling of a
a <- stats::rnorm( I , mean=m1 , sd = 1/sqrt(prectotal) )
a <- a - ( mean(a) - 1 )
return(a)
}
###################################################################
########################################################################
# draw a parameters group wise
.mcmc.est.aG.2pno.ml.v2 <- function( Z , bM , theta , idgroup , G , I ,
a , sigma.a , N , link , sigma.res ){
#****
# Z = a * theta + aG*theta - bM + eps
# Z + bM - a*theta = aG * theta + eps
aM1 <- matrix( a , N , I , byrow=TRUE )
Zres <- Z + bM - aM1*theta
# calculate means of a parameters
theta2l <- rowsum( theta^2 , idgroup )[,1]
Zrestheta <- rowsum( Zres*theta , idgroup )
m1ij <- Zrestheta / theta2l
m2ij <- matrix( 0 , G , I , byrow=TRUE )
# calculate precisions
prec1 <- matrix( theta2l , G , I )
if (link=="normal"){
prec1 <- prec1 / sigma.res^2
}
# take sigma.res into account!!
prec2 <- matrix( 1 / sigma.a^2 , G , I , byrow=TRUE )
prectotal <- prec1 + prec2
m1 <- ( m1ij*prec1 + m2ij * prec2 ) / prectotal
aG <- matrix( stats::rnorm(G*I , mean = m1 , sd = sqrt( 1 / prectotal )) , G , I )
# center aG parameters within each group
# aG <- aG - ( rowMeans( aG ) - 0 )
aG <- scale( aG , scale=FALSE)
return(aG)
}
#########################################################
# sampling from hierarchical a distribution
.mcmc.a.grouphier.2pno.ml <- function( aG , mu.a , G , omega.a , I ,
prior.sigma.a , est.a.Var , sigma.a ){
#***
# draw item group standard deviations
aresG <- aG
if ( est.a.Var == "i"){
sig2b <- colSums( aresG^2 ) / G
sigma.a <- sqrt( .mcmc.draw.variance( I ,
w0= prior.sigma.a[1] , sig02= prior.sigma.a[2]^2 ,
n=G , sig2=sig2b ) )
}
if ( est.a.Var == "j"){
sig2b <- sum( aresG^2 ) / (G*I)
sigma.a <- sqrt( .mcmc.draw.variance( 1 ,
w0= prior.sigma.a[1] , sig02= prior.sigma.a[2]^2 ,
n=G*I , sig2=sig2b ) )
sigma.a <- rep( sigma.a , I )
}
return(sigma.a)
}
####################################################################
# draw residual standard deviations
.draw.sigma.res.2pno.ml <- function( Z , aM , bM , theta , N , I ){
# Z = a * theta - b + eps
Zres <- Z - aM * theta + bM
sig2 <- colSums( Zres^2 ) / N
sigma.res <- sqrt( .mcmc.draw.variance( I ,
w0= .001 , sig02= 1,
n=N , sig2=sig2 ) )
return(sigma.res)
}
|
suppressMessages(library(Seurat))
suppressMessages(library(dplyr))
suppressMessages(library(Matrix))
suppressMessages(library(methods))
suppressMessages(library(scales))
suppressMessages(library(ggplot2))
args=commandArgs(T);
#sample=as.character(args[1]);
sample=args[1];
setwd("./")
path_out="./"
data1=read.table(paste("data_ref/expr.txt",sep=""))
data2=read.table(paste("data_obj/",sample,".txt",sep=""))
gene1=read.table(paste("data_ref/gene.txt",sep=""))
data3=rbind(data1[,1:ncol(data1)],data2[,1:ncol(data2)])
data4=t(data3)
cell_all=nrow(data3)
colnames(data4) <- paste("cell",c(1:cell_all),sep="")
rownames(data4) <- gene1[,2]
suppressWarnings(suppressMessages(bmmc <- CreateSeuratObject(counts = data4, project = "test")))
suppressWarnings(suppressMessages(bmmc <- ScaleData(object = bmmc)))
suppressWarnings(suppressMessages(bmmc <- RunPCA(bmmc, features = rownames(bmmc), npcs = 30,weight.by.var = TRUE)))
write.table(bmmc@reductions$pca@cell.embeddings[,1:30],file=paste(path_out,"temp_out/s5_embedding_",sample,".txt",sep=""),sep = "\t",row.names=F,col.names=F)
| /script/s5_dr_pca.r | permissive | hengwu0929/cci | R | false | false | 1,097 | r |
suppressMessages(library(Seurat))
suppressMessages(library(dplyr))
suppressMessages(library(Matrix))
suppressMessages(library(methods))
suppressMessages(library(scales))
suppressMessages(library(ggplot2))
args=commandArgs(T);
#sample=as.character(args[1]);
sample=args[1];
setwd("./")
path_out="./"
data1=read.table(paste("data_ref/expr.txt",sep=""))
data2=read.table(paste("data_obj/",sample,".txt",sep=""))
gene1=read.table(paste("data_ref/gene.txt",sep=""))
data3=rbind(data1[,1:ncol(data1)],data2[,1:ncol(data2)])
data4=t(data3)
cell_all=nrow(data3)
colnames(data4) <- paste("cell",c(1:cell_all),sep="")
rownames(data4) <- gene1[,2]
suppressWarnings(suppressMessages(bmmc <- CreateSeuratObject(counts = data4, project = "test")))
suppressWarnings(suppressMessages(bmmc <- ScaleData(object = bmmc)))
suppressWarnings(suppressMessages(bmmc <- RunPCA(bmmc, features = rownames(bmmc), npcs = 30,weight.by.var = TRUE)))
write.table(bmmc@reductions$pca@cell.embeddings[,1:30],file=paste(path_out,"temp_out/s5_embedding_",sample,".txt",sep=""),sep = "\t",row.names=F,col.names=F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitness_landscape_linear.R
\name{curve_maker_logistic}
\alias{curve_maker_logistic}
\title{Creates a logistic curve between two points}
\usage{
curve_maker_logistic(x1, y1, x2, y2, curve_scale = 0.07, ...)
}
\arguments{
\item{x1, y1, x2, y2}{the beginning and ending co-ordinates}
\item{curve_scale}{defines 'steepness' of the logistic function. Higher values are less steep}
}
\value{
a logistic line on the current graph between the given poins
}
\description{
Creates a logistic curve between two points
}
| /packages/XGeneAnalysis/man/curve_maker_logistic.Rd | no_license | a3cel2/xga | R | false | true | 588 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitness_landscape_linear.R
\name{curve_maker_logistic}
\alias{curve_maker_logistic}
\title{Creates a logistic curve between two points}
\usage{
curve_maker_logistic(x1, y1, x2, y2, curve_scale = 0.07, ...)
}
\arguments{
\item{x1, y1, x2, y2}{the beginning and ending co-ordinates}
\item{curve_scale}{defines 'steepness' of the logistic function. Higher values are less steep}
}
\value{
a logistic line on the current graph between the given poins
}
\description{
Creates a logistic curve between two points
}
|
library(shiny)
library(ggplot2)
shinyUI(pageWithSidebar(
headerPanel("Calculating power for Gaussian Data"),
sidebarPanel(
p(strong('Null hypothesis mean : 30')),
sliderInput('mu', 'True mean (mu_a)',value = 34, min = 30, max = 38, step = 0.1),
sliderInput('sigma', 'Sample variance (sigma)',value = 10, min = 5, max = 20, step = 0.1),
sliderInput('n', 'Sample size (n)',value = 30, min = 10, max = 100, step = 5),
sliderInput('alpha', 'Type I error (alpha)',value = 0.05, min = 0.01, max = 0.1, step = 0.01),
radioButtons("htype", "Test type:", c("Unilateral"="U","Bilateral"="B"), inline=T)
),
# numericInput(inputId, label, value, min = NA, max = NA, step = NA, width = NULL)
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Documentation",
htmlOutput("documentation")
),
tabPanel("Usage",
htmlOutput("usage")
),
tabPanel("Plot",
withMathJax(),
h3(align="center","Gaussian distributions"),
uiOutput("hypothesis"),
plotOutput('powerplot'),
uiOutput("formula"),
tableOutput("values"))
)
)
))
| /ui.R | no_license | fab64443/ShinyPower | R | false | false | 1,376 | r | library(shiny)
library(ggplot2)
shinyUI(pageWithSidebar(
headerPanel("Calculating power for Gaussian Data"),
sidebarPanel(
p(strong('Null hypothesis mean : 30')),
sliderInput('mu', 'True mean (mu_a)',value = 34, min = 30, max = 38, step = 0.1),
sliderInput('sigma', 'Sample variance (sigma)',value = 10, min = 5, max = 20, step = 0.1),
sliderInput('n', 'Sample size (n)',value = 30, min = 10, max = 100, step = 5),
sliderInput('alpha', 'Type I error (alpha)',value = 0.05, min = 0.01, max = 0.1, step = 0.01),
radioButtons("htype", "Test type:", c("Unilateral"="U","Bilateral"="B"), inline=T)
),
# numericInput(inputId, label, value, min = NA, max = NA, step = NA, width = NULL)
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Documentation",
htmlOutput("documentation")
),
tabPanel("Usage",
htmlOutput("usage")
),
tabPanel("Plot",
withMathJax(),
h3(align="center","Gaussian distributions"),
uiOutput("hypothesis"),
plotOutput('powerplot'),
uiOutput("formula"),
tableOutput("values"))
)
)
))
|
\name{syms.pfunc}
\alias{syms.pfunc}
\title{ Function to Demonstrate the Effect of Different Values of p }
\description{
This function displays a plot demonstrating the effect of varying the value of p, for a range of p values from 0.2 to 5, on the 0 to 1 normalized values of a variable in order to compute corresponding circular symbol diameters.
}
\usage{
syms.pfunc()
}
\author{ Robert G. Garrett }
\keyword{ hplot }
| /man/syms.pfunc.Rd | no_license | cran/rgr | R | false | false | 437 | rd | \name{syms.pfunc}
\alias{syms.pfunc}
\title{ Function to Demonstrate the Effect of Different Values of p }
\description{
This function displays a plot demonstrating the effect of varying the value of p, for a range of p values from 0.2 to 5, on the 0 to 1 normalized values of a variable in order to compute corresponding circular symbol diameters.
}
\usage{
syms.pfunc()
}
\author{ Robert G. Garrett }
\keyword{ hplot }
|
# instructions for use:
# ensure downloaded files are in the same directory as this script
# metaMIR_analytical.R
# metaMIR_subs.R
# betafit_mv.RData
# refposneg.RData
# Run the entire script by e.g. using
# R --slave --vanilla --file=metaMIR_cl.R --args <ARGUMENTS_SEE_BELOW>
# The result is the creation of a function called
# batch_metaMIR, which takes the following arguments:
# inputfile - name of file to be analyzed, which is a
# list of genes (one per line) provided via a plain text file
# mincomb - minimum gene combination size to include in results (default=5)
# maxclust - maximum gene combination size (default=15)
# Zthresh - minimum threshold for individual combination scores (default=1)
# outPath - (optional) the path for the output files
# list of genes to be analyzed should be at least 5 genes long. Up to
# 15 genes will be simultaneously analyzed. Longer lists will be partitioned
# according to the nearest neighbours (according to miRNA co-regulation)
# for a provided input file <input_name>.txt the following output files are
# generated:
# <input_name>_cluster_<date_time>.txt
# - tab-delimited file with the clustering results for genelists longer than
# the maximum size, showing the frequency of gene-pair occurrence in the
# resulting lists
# <input_name>_rep_<date_time>.txt
# - tab-delimited file describing the percentage representation of each
# provided gene in the resulting lists, when more genes than the max
# cluster size are provded.
# <input_name>_params_<date_time>.txt
# - the parameters applied during execution.
# <input_name>_output_<date_time>.txt
# - resulting output of the script, with miRNA, number of genes predicted
# to be simultaneously targeted, the standardized score for the
# corresponding combination, an aggregate score, incorporating standard
# score and group size, the list from which the combination was derived,
# and the genes predicted to be targeted.
# output files are written to a "results" folder under the current
# folder, which will be created if it does not exist.
# set flag to indicate script being run in command line mode
comline <- TRUE
scriptPath <- function() {
cmdArgs <- commandArgs(trailingOnly = FALSE);
needle <- "--file=";
match <- grep(needle, cmdArgs);
if (length(match) > 0) {
# Rscript
return(normalizePath(dirname(sub(needle, "", cmdArgs[match]))));
} else {
stop("run with argument --file=metaMIR_cl.R");
}
}
#################
# read commandline args
args <- commandArgs(trailingOnly=TRUE)
# ensure enough arguments
if (length(args)<4 ) {
stop("no arguments given!\n\n\t-> use <inputfile> <mincomb> <maxclust> <zthresh> [optOutPath]");
}
outPath <- "."
if (length(args)>=5) {
outPath <- args[5];
}
# get the absolute path of this script
metaMirPath <- scriptPath();
# check for installation status of required packages
pkgs <- c("data.table","stringr","reshape2")
if(length(new.pkgs <- setdiff(pkgs, rownames(installed.packages(lib.loc=c(.libPaths(),metaMirPath)))))>0) install.packages(new.pkgs, lib=metaMirPath, repos="http://cran.us.r-project.org", destdir=metaMirPath)
rm(pkgs,new.pkgs)
suppressPackageStartupMessages(library(data.table,quietly=T,lib.loc=c(.libPaths(),metaMirPath)))
suppressPackageStartupMessages(library(stringr,quietly=T,lib.loc=c(.libPaths(),metaMirPath)))
suppressPackageStartupMessages(library(reshape2,quietly=T,lib.loc=c(.libPaths(),metaMirPath)))
# load reference database of predictions and beta dist fitting parameters
refspace.env <- new.env()
load( paste(metaMirPath,"refposneg.RData",sep="/"), envir=refspace.env)
load( paste(metaMirPath,"betafit_mv.RData",sep="/"), envir=refspace.env)
# # load parameters for score standardization. Contains the mean and variance
# # calculated from the shape1, shape2 fitting parameters determined for each miRNA
source( paste(metaMirPath,"metaMIR_analytical.R",sep="/") )
#################
# call metaMIR using the command line arguments
batch_metaMIR( inputfile=args[1], mincomb=as.numeric(args[2]), maxclust=as.numeric(args[3]), Zthresh=as.numeric(args[4]), outPath=outPath);
| /R/metaMIR_cl.R | no_license | laixn/metaMIR | R | false | false | 4,341 | r |
# instructions for use:
# ensure downloaded files are in the same directory as this script
# metaMIR_analytical.R
# metaMIR_subs.R
# betafit_mv.RData
# refposneg.RData
# Run the entire script by e.g. using
# R --slave --vanilla --file=metaMIR_cl.R --args <ARGUMENTS_SEE_BELOW>
# The result is the creation of a function called
# batch_metaMIR, which takes the following arguments:
# inputfile - name of file to be analyzed, which is a
# list of genes (one per line) provided via a plain text file
# mincomb - minimum gene combination size to include in results (default=5)
# maxclust - maximum gene combination size (default=15)
# Zthresh - minimum threshold for individual combination scores (default=1)
# outPath - (optional) the path for the output files
# list of genes to be analyzed should be at least 5 genes long. Up to
# 15 genes will be simultaneously analyzed. Longer lists will be partitioned
# according to the nearest neighbours (according to miRNA co-regulation)
# for a provided input file <input_name>.txt the following output files are
# generated:
# <input_name>_cluster_<date_time>.txt
# - tab-delimited file with the clustering results for genelists longer than
# the maximum size, showing the frequency of gene-pair occurrence in the
# resulting lists
# <input_name>_rep_<date_time>.txt
# - tab-delimited file describing the percentage representation of each
# provided gene in the resulting lists, when more genes than the max
# cluster size are provded.
# <input_name>_params_<date_time>.txt
# - the parameters applied during execution.
# <input_name>_output_<date_time>.txt
# - resulting output of the script, with miRNA, number of genes predicted
# to be simultaneously targeted, the standardized score for the
# corresponding combination, an aggregate score, incorporating standard
# score and group size, the list from which the combination was derived,
# and the genes predicted to be targeted.
# output files are written to a "results" folder under the current
# folder, which will be created if it does not exist.
# set flag to indicate script being run in command line mode
comline <- TRUE
scriptPath <- function() {
cmdArgs <- commandArgs(trailingOnly = FALSE);
needle <- "--file=";
match <- grep(needle, cmdArgs);
if (length(match) > 0) {
# Rscript
return(normalizePath(dirname(sub(needle, "", cmdArgs[match]))));
} else {
stop("run with argument --file=metaMIR_cl.R");
}
}
#################
# read commandline args
args <- commandArgs(trailingOnly=TRUE)
# ensure enough arguments
if (length(args)<4 ) {
stop("no arguments given!\n\n\t-> use <inputfile> <mincomb> <maxclust> <zthresh> [optOutPath]");
}
outPath <- "."
if (length(args)>=5) {
outPath <- args[5];
}
# get the absolute path of this script
metaMirPath <- scriptPath();
# check for installation status of required packages
pkgs <- c("data.table","stringr","reshape2")
if(length(new.pkgs <- setdiff(pkgs, rownames(installed.packages(lib.loc=c(.libPaths(),metaMirPath)))))>0) install.packages(new.pkgs, lib=metaMirPath, repos="http://cran.us.r-project.org", destdir=metaMirPath)
rm(pkgs,new.pkgs)
suppressPackageStartupMessages(library(data.table,quietly=T,lib.loc=c(.libPaths(),metaMirPath)))
suppressPackageStartupMessages(library(stringr,quietly=T,lib.loc=c(.libPaths(),metaMirPath)))
suppressPackageStartupMessages(library(reshape2,quietly=T,lib.loc=c(.libPaths(),metaMirPath)))
# load reference database of predictions and beta dist fitting parameters
refspace.env <- new.env()
load( paste(metaMirPath,"refposneg.RData",sep="/"), envir=refspace.env)
load( paste(metaMirPath,"betafit_mv.RData",sep="/"), envir=refspace.env)
# # load parameters for score standardization. Contains the mean and variance
# # calculated from the shape1, shape2 fitting parameters determined for each miRNA
source( paste(metaMirPath,"metaMIR_analytical.R",sep="/") )
#################
# call metaMIR using the command line arguments
batch_metaMIR( inputfile=args[1], mincomb=as.numeric(args[2]), maxclust=as.numeric(args[3]), Zthresh=as.numeric(args[4]), outPath=outPath);
|
#' Video Duration
#'
#' \code{mp4_duration} - Reports the length of a video in seconds.
#'
#' @param path Path to the in .mp4 file.
#' @return \code{mp4_duration} - A numeric value giving length of video in
#' seconds.
#' @keywords duration
#' @export
#' @rdname mp4_duration
#' @examples
#' \dontrun{
#' mp4_duration("foo.mp4")
#' n_img("foo.mp4", 4)
#' mp4_to_times("foo.mp4", 4)
#' }
mp4_duration <- function(path) {
## check if path exists
if (!file.exists(path)) stop(path, " does not exist")
## Detect OS and use shell on Windows or system else
fun <- ifelse(Sys.info()["sysname"] == "Windows", "shell", "system")
fun <- match.fun(fun)
## Check if ffmpeg is available
version <- try(fun("ffmpeg -version", intern = TRUE))
if (inherits(version, 'try-error')) {
warning("The command `ffmpeg`",
" is not available in your system. Please install ffmpeg first:\n",
"http://www.ffmpeg.org/download.html")
return()
}
y <- tempdir()
dur <- file.path(y, "out.txt")
suppressWarnings(fun(sprintf("ffmpeg -i %s > %s 2>&1", shQuote(path),
shQuote(dur))))
durin <- readLines(dur)
delete(dur)
durline <- durin[grepl("Duration", durin)]
hms2sec(Trim(genXtract(durline, "Duration:", ",")))
}
#' Video Duration
#'
#' \code{n_img} - Reports the approximate number of images based on guration and
#' frames per second.
#'
#' @param fps The number of image frames per second to output. Generally the
#' fps used to desconstruct a video into images will be used to reconstruct the
#' images back to video.
#' @return \code{n_img} - A numeric value giving the number of images created
#' from the video.
#' @export
#' @rdname mp4_duration
n_img <- function(path, fps) {
ceiling(fps * mp4_duration(path))
}
#' Video Duration
#'
#' \code{mp4_to_times} - Generate a sequence of times corresponding to
#' \code{fps} and video duration.
#'
#' @return \code{mp4_to_times} - A sequence of times corresponding to
#' \code{fps} and video duration
#' @export
#' @rdname mp4_duration
mp4_to_times <- function(path, fps = 4) {
tot <- mp4_duration(path)
part <- tot - floor(tot)
vals <- seq(0, 1, by = 1/fps)
difs <- vals - part
minval <- vals[difs >= 0][1]
maxtime <- ceiling(tot) + minval
sec_to_hms(seq(0, maxtime, by = 1/fps))
}
| /R/mp4_duration.R | no_license | trinker/embodied | R | false | false | 2,363 | r | #' Video Duration
#'
#' \code{mp4_duration} - Reports the length of a video in seconds.
#'
#' @param path Path to the in .mp4 file.
#' @return \code{mp4_duration} - A numeric value giving length of video in
#' seconds.
#' @keywords duration
#' @export
#' @rdname mp4_duration
#' @examples
#' \dontrun{
#' mp4_duration("foo.mp4")
#' n_img("foo.mp4", 4)
#' mp4_to_times("foo.mp4", 4)
#' }
mp4_duration <- function(path) {
## check if path exists
if (!file.exists(path)) stop(path, " does not exist")
## Detect OS and use shell on Windows or system else
fun <- ifelse(Sys.info()["sysname"] == "Windows", "shell", "system")
fun <- match.fun(fun)
## Check if ffmpeg is available
version <- try(fun("ffmpeg -version", intern = TRUE))
if (inherits(version, 'try-error')) {
warning("The command `ffmpeg`",
" is not available in your system. Please install ffmpeg first:\n",
"http://www.ffmpeg.org/download.html")
return()
}
y <- tempdir()
dur <- file.path(y, "out.txt")
suppressWarnings(fun(sprintf("ffmpeg -i %s > %s 2>&1", shQuote(path),
shQuote(dur))))
durin <- readLines(dur)
delete(dur)
durline <- durin[grepl("Duration", durin)]
hms2sec(Trim(genXtract(durline, "Duration:", ",")))
}
#' Video Duration
#'
#' \code{n_img} - Reports the approximate number of images based on guration and
#' frames per second.
#'
#' @param fps The number of image frames per second to output. Generally the
#' fps used to desconstruct a video into images will be used to reconstruct the
#' images back to video.
#' @return \code{n_img} - A numeric value giving the number of images created
#' from the video.
#' @export
#' @rdname mp4_duration
n_img <- function(path, fps) {
ceiling(fps * mp4_duration(path))
}
#' Video Duration
#'
#' \code{mp4_to_times} - Generate a sequence of times corresponding to
#' \code{fps} and video duration.
#'
#' @return \code{mp4_to_times} - A sequence of times corresponding to
#' \code{fps} and video duration
#' @export
#' @rdname mp4_duration
mp4_to_times <- function(path, fps = 4) {
tot <- mp4_duration(path)
part <- tot - floor(tot)
vals <- seq(0, 1, by = 1/fps)
difs <- vals - part
minval <- vals[difs >= 0][1]
maxtime <- ceiling(tot) + minval
sec_to_hms(seq(0, maxtime, by = 1/fps))
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/flux_stats.R
\name{flux_stats}
\alias{flux_stats}
\title{Compute error statistics of load estimates}
\usage{
flux_stats(x, model, n.coeff, method)
}
\arguments{
\item{x}{data frame containing observed, predicted and residual loads}
\item{model}{linear model object of type lm}
\item{n.coeff}{number of coefficients used to make predictions (for computing degrees of freedom)}
\item{method}{name of method for labelling results}
}
\value{
list of error statistics
}
\description{
Compute error statistics of load estimates
}
| /man/flux_stats.Rd | no_license | walkerjeffd/fluxr | R | false | false | 614 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/flux_stats.R
\name{flux_stats}
\alias{flux_stats}
\title{Compute error statistics of load estimates}
\usage{
flux_stats(x, model, n.coeff, method)
}
\arguments{
\item{x}{data frame containing observed, predicted and residual loads}
\item{model}{linear model object of type lm}
\item{n.coeff}{number of coefficients used to make predictions (for computing degrees of freedom)}
\item{method}{name of method for labelling results}
}
\value{
list of error statistics
}
\description{
Compute error statistics of load estimates
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in
% R/isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull.R
\name{isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull}
\alias{isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull}
\title{Wrapper for the checkarg function, using specific parameter settings.}
\usage{
isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull(argument, default = NULL,
stopIfNot = FALSE, message = NULL, argumentName = NULL)
}
\arguments{
\item{argument}{See checkarg function.}
\item{default}{See checkarg function.}
\item{stopIfNot}{See checkarg function.}
\item{message}{See checkarg function.}
\item{argumentName}{See checkarg function.}
}
\value{
See checkarg function.
}
\description{
This function can be used in 3 ways:\enumerate{
\item Return TRUE or FALSE depending on whether the argument checks are
passed. This is suitable e.g. for if statements that take further action
if the argument does not pass the checks.\cr
\item Throw an exception if the argument does not pass the checks. This is
suitable e.g. when no further action needs to be taken other than
throwing an exception if the argument does not pass the checks.\cr
\item Same as (2) but by supplying a default value, a default can be assigned
in a single statement, when the argument is NULL. The checks are still
performed on the returned value, and an exception is thrown when not
passed.\cr
}
}
\details{
Actual call to checkarg: checkarg(argument, "N", default = default, stopIfNot = stopIfNot, nullAllowed = TRUE, n = 1, zeroAllowed = FALSE, negativeAllowed = FALSE, positiveAllowed = TRUE, nonIntegerAllowed = FALSE, naAllowed = TRUE, nanAllowed = TRUE, infAllowed = TRUE, message = message, argumentName = argumentName)
}
\examples{
isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull(2)
# returns TRUE (argument is valid)
isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull("X")
# returns FALSE (argument is invalid)
#isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull("X", stopIfNot = TRUE)
# throws exception with message defined by message and argumentName parameters
isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull(2, default = 1)
# returns 2 (the argument, rather than the default, since it is not NULL)
#isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull("X", default = 1)
# throws exception with message defined by message and argumentName parameters
isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull(NULL, default = 1)
# returns 1 (the default, rather than the argument, since it is NULL)
}
| /man/isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull.Rd | no_license | cran/checkarg | R | false | true | 2,659 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in
% R/isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull.R
\name{isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull}
\alias{isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull}
\title{Wrapper for the checkarg function, using specific parameter settings.}
\usage{
isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull(argument, default = NULL,
stopIfNot = FALSE, message = NULL, argumentName = NULL)
}
\arguments{
\item{argument}{See checkarg function.}
\item{default}{See checkarg function.}
\item{stopIfNot}{See checkarg function.}
\item{message}{See checkarg function.}
\item{argumentName}{See checkarg function.}
}
\value{
See checkarg function.
}
\description{
This function can be used in 3 ways:\enumerate{
\item Return TRUE or FALSE depending on whether the argument checks are
passed. This is suitable e.g. for if statements that take further action
if the argument does not pass the checks.\cr
\item Throw an exception if the argument does not pass the checks. This is
suitable e.g. when no further action needs to be taken other than
throwing an exception if the argument does not pass the checks.\cr
\item Same as (2) but by supplying a default value, a default can be assigned
in a single statement, when the argument is NULL. The checks are still
performed on the returned value, and an exception is thrown when not
passed.\cr
}
}
\details{
Actual call to checkarg: checkarg(argument, "N", default = default, stopIfNot = stopIfNot, nullAllowed = TRUE, n = 1, zeroAllowed = FALSE, negativeAllowed = FALSE, positiveAllowed = TRUE, nonIntegerAllowed = FALSE, naAllowed = TRUE, nanAllowed = TRUE, infAllowed = TRUE, message = message, argumentName = argumentName)
}
\examples{
isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull(2)
# returns TRUE (argument is valid)
isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull("X")
# returns FALSE (argument is invalid)
#isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull("X", stopIfNot = TRUE)
# throws exception with message defined by message and argumentName parameters
isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull(2, default = 1)
# returns 2 (the argument, rather than the default, since it is not NULL)
#isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull("X", default = 1)
# throws exception with message defined by message and argumentName parameters
isStrictlyPositiveIntegerOrNaOrNanOrInfScalarOrNull(NULL, default = 1)
# returns 1 (the default, rather than the argument, since it is NULL)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{ggplot.eeg_lst}
\alias{ggplot.eeg_lst}
\title{Create an ERP plot}
\usage{
\method{ggplot}{eeg_lst}(data = NULL, mapping = ggplot2::aes(), ..., .max_sample = 64000)
}
\arguments{
\item{data}{An \code{eeg_lst} object.}
\item{mapping}{Default list of aesthetic mappings to use for plot.
If not specified, must be supplied in each layer added to the plot.}
\item{...}{Other arguments passed on to methods. Not currently used.}
\item{.max_sample}{Downsample to approximately 6400 samples by default.}
}
\value{
A ggplot object
}
\description{
\code{ggplot} initializes a ggplot object which takes an \code{eeg_lst} object as
its input data. Layers can then be added in the same way as for a
\link[ggplot2:ggplot]{ggplot2::ggplot} object.
}
\details{
If necessary, t will first downsample the \code{eeg_lst} object so that there is a
maximum of 6400 samples. The \code{eeg_lst} object is then converted to a long-format
tibble via \link{as_tibble}. In this tibble, the \code{.key} variable is the
channel/component name and \code{.value} its respective amplitude. The sample
number (\code{.sample} in the \code{eeg_lst} object) is automatically converted to milliseconds
to create the variable \code{.time}. By default, time is plotted on the
x-axis and amplitude on the y-axis.
To add additional components to the plot such as titles and annotations, simply
use the \code{+} symbol and add layers exactly as you would for \link[ggplot2:ggplot]{ggplot2::ggplot}.
}
\examples{
library(ggplot2)
library(dplyr)
# Plot grand averages for selected channels
data_faces_ERPs \%>\%
# select the desired electrodes
select(O1, O2, P7, P8) \%>\%
ggplot(aes(x = .time, y = .key)) +
# add a grand average wave
stat_summary(
fun.y = "mean", geom = "line", alpha = 1, size = 1.5,
aes(color = condition)
) +
# facet by channel
facet_wrap(~.key) +
theme(legend.position = "bottom")
}
\seealso{
Other plotting functions:
\code{\link{annotate_events}()},
\code{\link{annotate_head}()},
\code{\link{eeg_downsample}()},
\code{\link{plot.eeg_lst}()},
\code{\link{plot_components}()},
\code{\link{plot_in_layout}()},
\code{\link{plot_topo}()},
\code{\link{theme_eeguana}()}
}
\concept{plotting functions}
| /man/ggplot.eeg_lst.Rd | permissive | bnicenboim/eeguana | R | false | true | 2,301 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{ggplot.eeg_lst}
\alias{ggplot.eeg_lst}
\title{Create an ERP plot}
\usage{
\method{ggplot}{eeg_lst}(data = NULL, mapping = ggplot2::aes(), ..., .max_sample = 64000)
}
\arguments{
\item{data}{An \code{eeg_lst} object.}
\item{mapping}{Default list of aesthetic mappings to use for plot.
If not specified, must be supplied in each layer added to the plot.}
\item{...}{Other arguments passed on to methods. Not currently used.}
\item{.max_sample}{Downsample to approximately 6400 samples by default.}
}
\value{
A ggplot object
}
\description{
\code{ggplot} initializes a ggplot object which takes an \code{eeg_lst} object as
its input data. Layers can then be added in the same way as for a
\link[ggplot2:ggplot]{ggplot2::ggplot} object.
}
\details{
If necessary, t will first downsample the \code{eeg_lst} object so that there is a
maximum of 6400 samples. The \code{eeg_lst} object is then converted to a long-format
tibble via \link{as_tibble}. In this tibble, the \code{.key} variable is the
channel/component name and \code{.value} its respective amplitude. The sample
number (\code{.sample} in the \code{eeg_lst} object) is automatically converted to milliseconds
to create the variable \code{.time}. By default, time is plotted on the
x-axis and amplitude on the y-axis.
To add additional components to the plot such as titles and annotations, simply
use the \code{+} symbol and add layers exactly as you would for \link[ggplot2:ggplot]{ggplot2::ggplot}.
}
\examples{
library(ggplot2)
library(dplyr)
# Plot grand averages for selected channels
data_faces_ERPs \%>\%
# select the desired electrodes
select(O1, O2, P7, P8) \%>\%
ggplot(aes(x = .time, y = .key)) +
# add a grand average wave
stat_summary(
fun.y = "mean", geom = "line", alpha = 1, size = 1.5,
aes(color = condition)
) +
# facet by channel
facet_wrap(~.key) +
theme(legend.position = "bottom")
}
\seealso{
Other plotting functions:
\code{\link{annotate_events}()},
\code{\link{annotate_head}()},
\code{\link{eeg_downsample}()},
\code{\link{plot.eeg_lst}()},
\code{\link{plot_components}()},
\code{\link{plot_in_layout}()},
\code{\link{plot_topo}()},
\code{\link{theme_eeguana}()}
}
\concept{plotting functions}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hla_apply_zigosity_threshold.R
\name{hla_apply_zigosity_threshold}
\alias{hla_apply_zigosity_threshold}
\title{Apply zigosity threshold to expression estimates}
\usage{
hla_apply_zigosity_threshold(hla_quants, th = 0.1)
}
\arguments{
\item{hla_quants}{data.frame.}
\item{th}{numeric.}
}
\value{
A data.frame
}
\description{
Apply zigosity threshold to expression estimates
}
| /man/hla_apply_zigosity_threshold.Rd | no_license | genevol-usp/hlaseqlib | R | false | true | 454 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hla_apply_zigosity_threshold.R
\name{hla_apply_zigosity_threshold}
\alias{hla_apply_zigosity_threshold}
\title{Apply zigosity threshold to expression estimates}
\usage{
hla_apply_zigosity_threshold(hla_quants, th = 0.1)
}
\arguments{
\item{hla_quants}{data.frame.}
\item{th}{numeric.}
}
\value{
A data.frame
}
\description{
Apply zigosity threshold to expression estimates
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{plate165_mean_raw_mfi_df}
\alias{plate165_mean_raw_mfi_df}
\title{plate165_mean_raw_mfi_df is a loadable version of the mean_raw_mfi_df dataframe
created by `get_mean_raw_mfi_from_xlsx` or `get_mean_raw_mfi_from_lxd` and
used by `get_low_positive_controls` and `subtract_background`.}
\format{
An object of class \code{data.frame} with 1092 rows and 8 columns.
}
\usage{
plate165_mean_raw_mfi_df
}
\description{
plate165_mean_raw_mfi_df is a loadable version of the mean_raw_mfi_df dataframe
created by `get_mean_raw_mfi_from_xlsx` or `get_mean_raw_mfi_from_lxd` and
used by `get_low_positive_controls` and `subtract_background`.
}
\examples{
\dontrun{
plate165_mean_raw_mfi_df <- snprcspf::plate165_mean_raw_mfi_df
}
}
\keyword{datasets}
| /man/plate165_mean_raw_mfi_df.Rd | permissive | rmsharp/snprcspf | R | false | true | 845 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{plate165_mean_raw_mfi_df}
\alias{plate165_mean_raw_mfi_df}
\title{plate165_mean_raw_mfi_df is a loadable version of the mean_raw_mfi_df dataframe
created by `get_mean_raw_mfi_from_xlsx` or `get_mean_raw_mfi_from_lxd` and
used by `get_low_positive_controls` and `subtract_background`.}
\format{
An object of class \code{data.frame} with 1092 rows and 8 columns.
}
\usage{
plate165_mean_raw_mfi_df
}
\description{
plate165_mean_raw_mfi_df is a loadable version of the mean_raw_mfi_df dataframe
created by `get_mean_raw_mfi_from_xlsx` or `get_mean_raw_mfi_from_lxd` and
used by `get_low_positive_controls` and `subtract_background`.
}
\examples{
\dontrun{
plate165_mean_raw_mfi_df <- snprcspf::plate165_mean_raw_mfi_df
}
}
\keyword{datasets}
|
## Exploratory Data Analysis
## Week 1 Project
## Plot 1 of 4
## Brent Poliquin
## 1/10/16
## Read Data from Raw File
dat <- read.table(file="./data/household_power_consumption.txt",
header=T,
sep=";",
stringsAsFactors = FALSE)
## Filter the data to date range 2007-02-01 to 2007-02-02 (YYYY-MM-DD)
## Date values are stored [D]D/[M]M/YYYY
filt_dat <- dat[(dat$Date == '1/2/2007' | dat$Date == '2/2/2007') , ]
## Visualize Histogram within RStudio
hist(as.numeric(filt_dat$Global_active_power),
col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)",
xlim = c(0, 6),
ylim = c(0, 1200))
## create Histogram and Export to PNG (480 x 480)
png(filename = "plot1.png", width = 480, height = 480)
hist(as.numeric(filt_dat$Global_active_power),
col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)",
xlim = c(0, 6),
ylim = c(0, 1200))
dev.off()
| /plot1.R | no_license | wilby31/ExData_Plotting1 | R | false | false | 1,015 | r | ## Exploratory Data Analysis
## Week 1 Project
## Plot 1 of 4
## Brent Poliquin
## 1/10/16
## Read Data from Raw File
dat <- read.table(file="./data/household_power_consumption.txt",
header=T,
sep=";",
stringsAsFactors = FALSE)
## Filter the data to date range 2007-02-01 to 2007-02-02 (YYYY-MM-DD)
## Date values are stored [D]D/[M]M/YYYY
filt_dat <- dat[(dat$Date == '1/2/2007' | dat$Date == '2/2/2007') , ]
## Visualize Histogram within RStudio
hist(as.numeric(filt_dat$Global_active_power),
col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)",
xlim = c(0, 6),
ylim = c(0, 1200))
## create Histogram and Export to PNG (480 x 480)
png(filename = "plot1.png", width = 480, height = 480)
hist(as.numeric(filt_dat$Global_active_power),
col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)",
xlim = c(0, 6),
ylim = c(0, 1200))
dev.off()
|
#####################################################################
# This list has not been updated since 2014
#
# All of these packages are not necessary for the 2015 classes
#
# If you are taking the one of the 2015 classes, it might
# be better to install as you go using the scripts which are
# provided for each lecture.
######################################################################
install.packages("devtools")
library(devtools)
install_github("ririzarr/rafalib")
install_github("genomicsclass/dagdata")
source("http://bioconductor.org/biocLite.R")
biocLite()
biocLite("Biobase")
biocLite("GenomicRanges")
# Week 1
install.packages("dplyr")
biocLite("Biobase")
# Week 2
biocLite("GEOquery")
biocLite("parathyroidSE")
install.packages("matrixStats")
install.packages("RColorBrewer")
biocLite("affy")
biocLite("oligo")
# Week 3
biocLite("genefilter")
biocLite("limma")
install.packages("animation")
# Week 4
biocLite("SpikeInSubset")
library(devtools);install_github("BackgroundExperimentYeast","stephaniehicks")
biocLite("SpikeIn")
install.packages("MASS")
biocLite("parathyroidSE")
install.packages("matrixStats")
biocLite("limma")
biocLite("affy")
install.packages("animation")
biocLite("preprocessCore")
biocLite("vsn")
library(devtools);install_github("mycAffyData","stephaniehicks")
install.packages("SQN")
# Week 5
install.packages("class")
install.packages("caret")
install.packages("RColorBrewer")
biocLite("genefilter")
install.packages("gplots")
install.packages("matrixStats")
install.packages("animation")
biocLite("yeastCC")
install.packages("UsingR")
# Week 6
biocLite("genefilter")
install.packages("MASS")
biocLite("hgfocus.db")
biocLite("sva")
biocLite("limma")
# Week 7
biocLite("GEOquery")
biocLite("limma")
biocLite("org.Hs.eg.db")
biocLite("GO.db")
biocLite("SpikeInSubset")
biocLite("genefilter")
biocLite("rae230a.db")
biocLite("AnnotationDbi")
biocLite("biomaRt")
# Week 8
biocLite("pasillaBamSubset")
biocLite("TxDb.Dmelanogaster.UCSC.dm3.ensGene")
biocLite("Rsamtools")
biocLite("GenomicAlignments")
biocLite("DESeq2")
biocLite("biomaRt")
biocLite("Gviz")
biocLite("ggbio")
# For building data objects
biocLite("GEOquery")
biocLite("affy")
biocLite("simpleaffy")
biocLite("SpikeIn")
| /packages.R | permissive | miroslavas/labs | R | false | false | 2,249 | r |
#####################################################################
# This list has not been updated since 2014
#
# All of these packages are not necessary for the 2015 classes
#
# If you are taking the one of the 2015 classes, it might
# be better to install as you go using the scripts which are
# provided for each lecture.
######################################################################
install.packages("devtools")
library(devtools)
install_github("ririzarr/rafalib")
install_github("genomicsclass/dagdata")
source("http://bioconductor.org/biocLite.R")
biocLite()
biocLite("Biobase")
biocLite("GenomicRanges")
# Week 1
install.packages("dplyr")
biocLite("Biobase")
# Week 2
biocLite("GEOquery")
biocLite("parathyroidSE")
install.packages("matrixStats")
install.packages("RColorBrewer")
biocLite("affy")
biocLite("oligo")
# Week 3
biocLite("genefilter")
biocLite("limma")
install.packages("animation")
# Week 4
biocLite("SpikeInSubset")
library(devtools);install_github("BackgroundExperimentYeast","stephaniehicks")
biocLite("SpikeIn")
install.packages("MASS")
biocLite("parathyroidSE")
install.packages("matrixStats")
biocLite("limma")
biocLite("affy")
install.packages("animation")
biocLite("preprocessCore")
biocLite("vsn")
library(devtools);install_github("mycAffyData","stephaniehicks")
install.packages("SQN")
# Week 5
install.packages("class")
install.packages("caret")
install.packages("RColorBrewer")
biocLite("genefilter")
install.packages("gplots")
install.packages("matrixStats")
install.packages("animation")
biocLite("yeastCC")
install.packages("UsingR")
# Week 6
biocLite("genefilter")
install.packages("MASS")
biocLite("hgfocus.db")
biocLite("sva")
biocLite("limma")
# Week 7
biocLite("GEOquery")
biocLite("limma")
biocLite("org.Hs.eg.db")
biocLite("GO.db")
biocLite("SpikeInSubset")
biocLite("genefilter")
biocLite("rae230a.db")
biocLite("AnnotationDbi")
biocLite("biomaRt")
# Week 8
biocLite("pasillaBamSubset")
biocLite("TxDb.Dmelanogaster.UCSC.dm3.ensGene")
biocLite("Rsamtools")
biocLite("GenomicAlignments")
biocLite("DESeq2")
biocLite("biomaRt")
biocLite("Gviz")
biocLite("ggbio")
# For building data objects
biocLite("GEOquery")
biocLite("affy")
biocLite("simpleaffy")
biocLite("SpikeIn")
|
library(readxl)
library(dplyr)
library(stringr)
library(igraph)
setwd("/f/mulinlab/huan/All_result_ICGC/network/random_walk_restart/tmp_shortest_path_data/")
org<-read.table("/f/mulinlab/huan/All_result_ICGC/network/network_gene_num.txt",header = T)
spe<-data.frame(org$id)#把所有的顶点数读进来。
input<-read.table("/f/mulinlab/huan/All_result_ICGC/network/the_shortest_path/normal_network_num.txt",header = T) #把文件读进来并转为data.frame
df <- data.frame(from=c(input$start), to=c(input$end), weight=c(input$weight))#把读进来的文件转化成图的start,end,weight
g <- graph.data.frame(df,directed=TRUE,vertices=spe) #把读进来的文件转化成图
pairs <-read.table("./08_uni_start_end_shortest7.txt",header = F)%>%as.data.frame()#把要看最短路径start和end的列表读进来。
rs <- data.frame()#构造一个表
N<-nrow(pairs)#看pairs有多少行
for(i in 1:N ){#对每一行进行循环
start = pairs[i,1] #提第i行的第一列
end =pairs[i,2]#提第i行的第二列
sv <- get.shortest.paths(g,start,end,weights=NULL,output="both") #取最短路径 #即返回节点也返回边
x <- paste(sv$vpath[[1]], collapse="-")#取节点,并指定节点的分隔符
tmp <- data.frame(start=start,end=end, shortest=x)
rs <- bind_rows(rs,tmp)
#print(start)
#print(end)
#print(sv)
}
write.table(rs,"09_the_shortest_path7.txt",row.names = F, col.names = T,quote =F,sep="\t")#把表存下来
#sv <- get.shortest.paths(g,2342,10735,weights=NULL,output="both") #即返回节点也返回边。
#sv <- get.shortest.paths(g,10002,1351,weights=NULL,output="vpath") #即返回节点不返回边。
#sv <- get.shortest.paths(g,10002,11585,weights=NULL,output="both") #即返回节点也返回边。
| /Huan_link_all_script/All_result_ICGC/network/random_walk_restart/tmp_shortest_path_data/09_run_the_shortest_path7.R | no_license | Lhhuan/drug_repurposing | R | false | false | 1,752 | r | library(readxl)
library(dplyr)
library(stringr)
library(igraph)
setwd("/f/mulinlab/huan/All_result_ICGC/network/random_walk_restart/tmp_shortest_path_data/")
org<-read.table("/f/mulinlab/huan/All_result_ICGC/network/network_gene_num.txt",header = T)
spe<-data.frame(org$id)#把所有的顶点数读进来。
input<-read.table("/f/mulinlab/huan/All_result_ICGC/network/the_shortest_path/normal_network_num.txt",header = T) #把文件读进来并转为data.frame
df <- data.frame(from=c(input$start), to=c(input$end), weight=c(input$weight))#把读进来的文件转化成图的start,end,weight
g <- graph.data.frame(df,directed=TRUE,vertices=spe) #把读进来的文件转化成图
pairs <-read.table("./08_uni_start_end_shortest7.txt",header = F)%>%as.data.frame()#把要看最短路径start和end的列表读进来。
rs <- data.frame()#构造一个表
N<-nrow(pairs)#看pairs有多少行
for(i in 1:N ){#对每一行进行循环
start = pairs[i,1] #提第i行的第一列
end =pairs[i,2]#提第i行的第二列
sv <- get.shortest.paths(g,start,end,weights=NULL,output="both") #取最短路径 #即返回节点也返回边
x <- paste(sv$vpath[[1]], collapse="-")#取节点,并指定节点的分隔符
tmp <- data.frame(start=start,end=end, shortest=x)
rs <- bind_rows(rs,tmp)
#print(start)
#print(end)
#print(sv)
}
write.table(rs,"09_the_shortest_path7.txt",row.names = F, col.names = T,quote =F,sep="\t")#把表存下来
#sv <- get.shortest.paths(g,2342,10735,weights=NULL,output="both") #即返回节点也返回边。
#sv <- get.shortest.paths(g,10002,1351,weights=NULL,output="vpath") #即返回节点不返回边。
#sv <- get.shortest.paths(g,10002,11585,weights=NULL,output="both") #即返回节点也返回边。
|
# TODO: Add comment
#
# Author: Brad
# File: Download_Index_Files.R
# Version: 1.0
# Date: 02.5.2014
# Purpose: This file downloads the index files from Edgar. Edgar has an index file for each quarter and
# year so you need to grab each one.
#
###############################################################################
###############################################################################
# INITIAL SETUP;
cat("SECTION: INITIAL SETUP", "\n")
###############################################################################
# Clear workspace
rm(list = ls(all = TRUE))
# Limit History to not exceed 50 lines
Sys.setenv(R_HISTSIZE = 500)
repo <- c("http://cran.us.r-project.org")
options(repos = structure(repo))
options(install.packages.check.source = FALSE)
# String as factors is False -- used for read.csv
options(StringsAsFactors = FALSE)
# Default maxprint option
options(max.print = 500)
# options(max.print=99999)
# Memory limit
#memory.limit(size = 8183)
# Set location (1=HOME,2=WORK,3=CORALSEA FROM HOME,4=CORALSEA FROM WORK) Location <- 1
Location <- 1
if (Location == 1) {
#setwd("C:/Research_temp3/")
input_directory <- normalizePath("C:/Users/S.Brad/Dropbox/Research/Fund_Letters/Data",winslash="\\", mustWork=TRUE)
output_directory <- normalizePath("F:/Research_temp3",winslash="\\", mustWork=TRUE)
function_directory <- normalizePath("C:/Users/S.Brad/Dropbox/Research_Methods/R", winslash = "\\", mustWork = TRUE)
treetag_directory <- normalizePath("C:/TreeTagger",winslash="\\", mustWork=TRUE)
} else if (Location == 2) {
#setwd("C:/Research_temp3/")
input_directory <- normalizePath("C:/Users/bdaughdr/Dropbox/Research/Fund_Letters/Data",winslash="\\", mustWork=TRUE)
output_directory <- normalizePath("C:/Research_temp3",winslash="\\", mustWork=TRUE)
function_directory <- normalizePath("C:/Users/bdaughdr/Dropbox/Research_Methods/R",winslash="\\", mustWork=TRUE)
treetag_directory <- normalizePath("C:/TreeTagger",winslash="\\", mustWork=TRUE)
} else if (Location == 3) {
#setwd("//tsclient/C/Research_temp3/")
input_directory <- normalizePath("H:/Research/Mutual_Fund_Letters/Data", winslash = "\\", mustWork = TRUE)
#output_directory <- normalizePath("//tsclient/C/Research_temp3", winslash = "\\", mustWork = TRUE)
output_directory <- normalizePath("C:/Users/bdaughdr/Documents/Research_temp3",winslash="\\", mustWork=TRUE)
function_directory <- normalizePath("//tsclient/C/Users/S.Brad/Dropbox/Research_Methods/R", winslash = "\\", mustWork = TRUE)
treetag_directory <- normalizePath("//tsclient/C/TreeTagger",winslash="\\", mustWork=TRUE)
} else if (Location == 4) {
#setwd("//tsclient/C/Research_temp3/")
input_directory <- normalizePath("H:/Research/Mutual_Fund_Letters/Data", winslash = "\\", mustWork = TRUE)
#output_directory <- normalizePath("//tsclient/C/Research_temp3", winslash = "\\", mustWork = TRUE)
output_directory <- normalizePath("C:/Users/bdaughdr/Documents/Research_temp3",winslash="\\", mustWork=TRUE)
function_directory <- normalizePath("//tsclient/C/Users/bdaughdr/Dropbox/Research_Methods/R", winslash = "\\", mustWork = TRUE)
treetag_directory <- normalizePath("//tsclient/C/TreeTagger",winslash="\\", mustWork=TRUE)
} else {
cat("ERROR ASSIGNING DIRECTORIES", "\n")
}
rm(Location)
###############################################################################
# FUNCTIONS;
cat("SECTION: FUNCTIONS", "\n")
###############################################################################
#source(file=paste(function_directory,"functions_db.R",sep="\\"),echo=FALSE)
#source(file=paste(function_directory,"functions_statistics.R",sep="\\"),echo=FALSE)
#source(file=paste(function_directory,"functions_text_analysis.R",sep="\\"),echo=FALSE)
source(file=paste(function_directory,"functions_utilities.R",sep="\\"),echo=FALSE)
###############################################################################
# LIBRARIES;
cat("SECTION: LIBRARIES", "\n")
###############################################################################
#Load External Packages
#external_packages <- c("compare","cwhmisc","data.table","fastmatch","foreign","formatR","gdata","gtools",
# "Hmisc","koRpus","mitools","pbapply","plyr","R.oo","reshape2","rJava","RWeka","RWekajars",
# "Snowball","sqldf","stringr","tcltk","tm")
#external_packages <- c("httr","rjson","RCurl","ROAuth","selectr","XML")
external_packages <- c("data.table","RCurl")
invisible(unlist(sapply(external_packages,load_external_packages, repo_str=repo, simplify=FALSE, USE.NAMES=FALSE)))
installed_packages <- list_installed_packages(external_packages)
#=====================================================================;
#PARAMETERS;
#=====================================================================;
#If using windows, set to "\\" - if mac (or unix), set to "/";
slash <- "\\"
#First year you want index files for:
startyear <- 1993
#Last year you want index files for:
endyear <- 2013
#First qtr you want index files for (usually 1):
startqtr <- 1
#Last qtr you want index files for (usually 4):
endqtr <- 4
#Output folder:
indexfolder <- "full-index"
#FTP address
ftp <- "ftp.sec.gov"
#=====================================================================;
#BEGIN SCRIPT;
cat("Begin Script \n")
#=====================================================================;
#Check to see if output directory exists. If not, create it.
create_directory(output_directory,remove=1)
#Check to see if index folder exists. If not, create it.
index_folder_path <- paste(output_directory, indexfolder, sep = slash, collapse = slash)
create_directory(index_folder_path,remove=1)
#Get files loop- The program will loop through each year specified.
#Note that the counter (yr) starts with a value equal to start year and increments by 1 each time through. The loop terminates after the counter exceeds $endyear.
yr <- startyear
qtr <- startqtr
for (yr in startyear:endyear)
{
#yr <- startyear
cat(yr,"\n")
if (yr<endyear)
{
eqtr <- 4
} else
{
eqtr <- endqtr
}
for (qtr in startqtr:endqtr)
{
#qtr <- startqtr
cat(qtr,"\n")
filetoget <- paste("edgar/full-index/",yr,"/QTR",qtr,"/company.zip",sep="")
fonly <- paste(output_directory,slash,indexfolder,slash,"company",yr,qtr,".zip",sep="")
fidx <- paste(output_directory,slash,indexfolder,slash,"company",yr,qtr,".idx",sep="")
download.file(paste("ftp://",ftp,"/",filetoget,sep=""), fonly, quiet = FALSE, mode = "wb",cacheOK = TRUE)
# unzip the files
unzip( fonly , files="company.idx",exdir = paste(output_directory, indexfolder, sep = slash, collapse = slash) )
# rename file
file.rename(paste(output_directory,indexfolder,"company.idx",sep=slash), fidx)
}
}
| /Download_Index_Files.R | no_license | sbraddaughdrill/Download_Edgar_Filings | R | false | false | 6,899 | r | # TODO: Add comment
#
# Author: Brad
# File: Download_Index_Files.R
# Version: 1.0
# Date: 02.5.2014
# Purpose: This file downloads the index files from Edgar. Edgar has an index file for each quarter and
# year so you need to grab each one.
#
###############################################################################
###############################################################################
# INITIAL SETUP;
cat("SECTION: INITIAL SETUP", "\n")
###############################################################################
# Clear workspace
rm(list = ls(all = TRUE))
# Limit History to not exceed 50 lines
Sys.setenv(R_HISTSIZE = 500)
repo <- c("http://cran.us.r-project.org")
options(repos = structure(repo))
options(install.packages.check.source = FALSE)
# String as factors is False -- used for read.csv
options(StringsAsFactors = FALSE)
# Default maxprint option
options(max.print = 500)
# options(max.print=99999)
# Memory limit
#memory.limit(size = 8183)
# Set location (1=HOME,2=WORK,3=CORALSEA FROM HOME,4=CORALSEA FROM WORK) Location <- 1
Location <- 1
if (Location == 1) {
#setwd("C:/Research_temp3/")
input_directory <- normalizePath("C:/Users/S.Brad/Dropbox/Research/Fund_Letters/Data",winslash="\\", mustWork=TRUE)
output_directory <- normalizePath("F:/Research_temp3",winslash="\\", mustWork=TRUE)
function_directory <- normalizePath("C:/Users/S.Brad/Dropbox/Research_Methods/R", winslash = "\\", mustWork = TRUE)
treetag_directory <- normalizePath("C:/TreeTagger",winslash="\\", mustWork=TRUE)
} else if (Location == 2) {
#setwd("C:/Research_temp3/")
input_directory <- normalizePath("C:/Users/bdaughdr/Dropbox/Research/Fund_Letters/Data",winslash="\\", mustWork=TRUE)
output_directory <- normalizePath("C:/Research_temp3",winslash="\\", mustWork=TRUE)
function_directory <- normalizePath("C:/Users/bdaughdr/Dropbox/Research_Methods/R",winslash="\\", mustWork=TRUE)
treetag_directory <- normalizePath("C:/TreeTagger",winslash="\\", mustWork=TRUE)
} else if (Location == 3) {
#setwd("//tsclient/C/Research_temp3/")
input_directory <- normalizePath("H:/Research/Mutual_Fund_Letters/Data", winslash = "\\", mustWork = TRUE)
#output_directory <- normalizePath("//tsclient/C/Research_temp3", winslash = "\\", mustWork = TRUE)
output_directory <- normalizePath("C:/Users/bdaughdr/Documents/Research_temp3",winslash="\\", mustWork=TRUE)
function_directory <- normalizePath("//tsclient/C/Users/S.Brad/Dropbox/Research_Methods/R", winslash = "\\", mustWork = TRUE)
treetag_directory <- normalizePath("//tsclient/C/TreeTagger",winslash="\\", mustWork=TRUE)
} else if (Location == 4) {
#setwd("//tsclient/C/Research_temp3/")
input_directory <- normalizePath("H:/Research/Mutual_Fund_Letters/Data", winslash = "\\", mustWork = TRUE)
#output_directory <- normalizePath("//tsclient/C/Research_temp3", winslash = "\\", mustWork = TRUE)
output_directory <- normalizePath("C:/Users/bdaughdr/Documents/Research_temp3",winslash="\\", mustWork=TRUE)
function_directory <- normalizePath("//tsclient/C/Users/bdaughdr/Dropbox/Research_Methods/R", winslash = "\\", mustWork = TRUE)
treetag_directory <- normalizePath("//tsclient/C/TreeTagger",winslash="\\", mustWork=TRUE)
} else {
cat("ERROR ASSIGNING DIRECTORIES", "\n")
}
rm(Location)
###############################################################################
# FUNCTIONS;
cat("SECTION: FUNCTIONS", "\n")
###############################################################################
#source(file=paste(function_directory,"functions_db.R",sep="\\"),echo=FALSE)
#source(file=paste(function_directory,"functions_statistics.R",sep="\\"),echo=FALSE)
#source(file=paste(function_directory,"functions_text_analysis.R",sep="\\"),echo=FALSE)
source(file=paste(function_directory,"functions_utilities.R",sep="\\"),echo=FALSE)
###############################################################################
# LIBRARIES;
cat("SECTION: LIBRARIES", "\n")
###############################################################################
#Load External Packages
#external_packages <- c("compare","cwhmisc","data.table","fastmatch","foreign","formatR","gdata","gtools",
# "Hmisc","koRpus","mitools","pbapply","plyr","R.oo","reshape2","rJava","RWeka","RWekajars",
# "Snowball","sqldf","stringr","tcltk","tm")
#external_packages <- c("httr","rjson","RCurl","ROAuth","selectr","XML")
external_packages <- c("data.table","RCurl")
invisible(unlist(sapply(external_packages,load_external_packages, repo_str=repo, simplify=FALSE, USE.NAMES=FALSE)))
installed_packages <- list_installed_packages(external_packages)
#=====================================================================;
#PARAMETERS;
#=====================================================================;
#If using windows, set to "\\" - if mac (or unix), set to "/";
slash <- "\\"
#First year you want index files for:
startyear <- 1993
#Last year you want index files for:
endyear <- 2013
#First qtr you want index files for (usually 1):
startqtr <- 1
#Last qtr you want index files for (usually 4):
endqtr <- 4
#Output folder:
indexfolder <- "full-index"
#FTP address
ftp <- "ftp.sec.gov"
#=====================================================================;
#BEGIN SCRIPT;
cat("Begin Script \n")
#=====================================================================;
#Check to see if output directory exists. If not, create it.
create_directory(output_directory,remove=1)
#Check to see if index folder exists. If not, create it.
index_folder_path <- paste(output_directory, indexfolder, sep = slash, collapse = slash)
create_directory(index_folder_path,remove=1)
#Get files loop- The program will loop through each year specified.
#Note that the counter (yr) starts with a value equal to start year and increments by 1 each time through. The loop terminates after the counter exceeds $endyear.
yr <- startyear
qtr <- startqtr
for (yr in startyear:endyear)
{
#yr <- startyear
cat(yr,"\n")
if (yr<endyear)
{
eqtr <- 4
} else
{
eqtr <- endqtr
}
for (qtr in startqtr:endqtr)
{
#qtr <- startqtr
cat(qtr,"\n")
filetoget <- paste("edgar/full-index/",yr,"/QTR",qtr,"/company.zip",sep="")
fonly <- paste(output_directory,slash,indexfolder,slash,"company",yr,qtr,".zip",sep="")
fidx <- paste(output_directory,slash,indexfolder,slash,"company",yr,qtr,".idx",sep="")
download.file(paste("ftp://",ftp,"/",filetoget,sep=""), fonly, quiet = FALSE, mode = "wb",cacheOK = TRUE)
# unzip the files
unzip( fonly , files="company.idx",exdir = paste(output_directory, indexfolder, sep = slash, collapse = slash) )
# rename file
file.rename(paste(output_directory,indexfolder,"company.idx",sep=slash), fidx)
}
}
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
######################################################################
# Test for PUB-769
# Ensure that the number of rows scored in the CM for binary classes is == number of rows in the dataset
######################################################################
options(echo=TRUE)
test.pub.767 <- function() {
Log.info('Importing the altered prostatetype data from smalldata.')
prostate <- h2o.importFile(normalizePath(locate('smalldata/logreg/prostate.csv')), 'prostate')
Log.info('Print head of dataset')
Log.info(head(prostate))
prostate[,2] <- as.factor(prostate[,2]) # convert to Enum for classification
m <- h2o.randomForest(x = 3:8, y = 2, training_frame = prostate, ntrees = 500,
max_depth = 100)
Log.info("Number of rows in the confusion matrix for AUC:")
p <- h2o.performance(m)
print(h2o.confusionMatrix(p))
print("Number of rows in the prostate dataset:")
print(nrow(prostate))
expect_equal(sum(h2o.confusionMatrix(m)[3,1:2]), nrow(prostate))
}
doTest("PUB-767: randomForest on discontinuous integer classes.", test.pub.767)
| /h2o-r/tests/testdir_jira/runit_pub_769_cm_speedrf.R | permissive | h2oai/h2o-3 | R | false | false | 1,217 | r | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
######################################################################
# Test for PUB-769
# Ensure that the number of rows scored in the CM for binary classes is == number of rows in the dataset
######################################################################
options(echo=TRUE)
test.pub.767 <- function() {
Log.info('Importing the altered prostatetype data from smalldata.')
prostate <- h2o.importFile(normalizePath(locate('smalldata/logreg/prostate.csv')), 'prostate')
Log.info('Print head of dataset')
Log.info(head(prostate))
prostate[,2] <- as.factor(prostate[,2]) # convert to Enum for classification
m <- h2o.randomForest(x = 3:8, y = 2, training_frame = prostate, ntrees = 500,
max_depth = 100)
Log.info("Number of rows in the confusion matrix for AUC:")
p <- h2o.performance(m)
print(h2o.confusionMatrix(p))
print("Number of rows in the prostate dataset:")
print(nrow(prostate))
expect_equal(sum(h2o.confusionMatrix(m)[3,1:2]), nrow(prostate))
}
doTest("PUB-767: randomForest on discontinuous integer classes.", test.pub.767)
|
library(tidyverse)
# calvin's scores
scrs <- read.csv('ignore/csci_toread.csv') %>%
mutate(StationCode = as.character(StationCode))
# rafis comid, station link
lnks <- read.csv('ignore/csci_061917.csv') %>%
select(StationCode, COMID) %>%
mutate(
StationCode = as.character(StationCode),
COMID = as.character(COMID)
) %>%
unique
# statewide comid by reach expectation
load(file = 'data/comid_statewide.RData')
exps <- comid %>%
select(COMID, core0.50) %>%
mutate(COMID = as.character(COMID))
# combine
out <- scrs %>%
left_join(lnks, by = 'StationCode') %>%
left_join(exps, by = 'COMID') %>%
mutate(CSCIdiff = CSCI - core0.50)
# save
write.csv(out, file = 'ignore/CSCI_diff.csv', row.names = F)
| /R/csci_comid_join.R | no_license | SCCWRP/SGRRMP | R | false | false | 739 | r | library(tidyverse)
# calvin's scores
scrs <- read.csv('ignore/csci_toread.csv') %>%
mutate(StationCode = as.character(StationCode))
# rafis comid, station link
lnks <- read.csv('ignore/csci_061917.csv') %>%
select(StationCode, COMID) %>%
mutate(
StationCode = as.character(StationCode),
COMID = as.character(COMID)
) %>%
unique
# statewide comid by reach expectation
load(file = 'data/comid_statewide.RData')
exps <- comid %>%
select(COMID, core0.50) %>%
mutate(COMID = as.character(COMID))
# combine
out <- scrs %>%
left_join(lnks, by = 'StationCode') %>%
left_join(exps, by = 'COMID') %>%
mutate(CSCIdiff = CSCI - core0.50)
# save
write.csv(out, file = 'ignore/CSCI_diff.csv', row.names = F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/acmpca_operations.R
\name{acmpca_describe_certificate_authority}
\alias{acmpca_describe_certificate_authority}
\title{Lists information about your private certificate authority (CA)}
\usage{
acmpca_describe_certificate_authority(CertificateAuthorityArn)
}
\arguments{
\item{CertificateAuthorityArn}{[required] The Amazon Resource Name (ARN) that was returned when you called
CreateCertificateAuthority. This must be of the form:
\verb{arn:aws:acm-pca:<i>region</i>:<i>account</i>:certificate-authority/<i>12345678-1234-1234-1234-123456789012</i> }.}
}
\description{
Lists information about your private certificate authority (CA). You
specify the private CA on input by its ARN (Amazon Resource Name). The
output contains the status of your CA. This can be any of the following:
}
\details{
\itemize{
\item \code{CREATING} - ACM Private CA is creating your private certificate
authority.
\item \code{PENDING_CERTIFICATE} - The certificate is pending. You must use
your ACM Private CA-hosted or on-premises root or subordinate CA to
sign your private CA CSR and then import it into PCA.
\item \code{ACTIVE} - Your private CA is active.
\item \code{DISABLED} - Your private CA has been disabled.
\item \code{EXPIRED} - Your private CA certificate has expired.
\item \code{FAILED} - Your private CA has failed. Your CA can fail because of
problems such a network outage or backend AWS failure or other
errors. A failed CA can never return to the pending state. You must
create a new CA.
\item \code{DELETED} - Your private CA is within the restoration period, after
which it is permanently deleted. The length of time remaining in the
CA\'s restoration period is also included in this action\'s output.
}
}
\section{Request syntax}{
\preformatted{svc$describe_certificate_authority(
CertificateAuthorityArn = "string"
)
}
}
\keyword{internal}
| /cran/paws.security.identity/man/acmpca_describe_certificate_authority.Rd | permissive | johnnytommy/paws | R | false | true | 1,922 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/acmpca_operations.R
\name{acmpca_describe_certificate_authority}
\alias{acmpca_describe_certificate_authority}
\title{Lists information about your private certificate authority (CA)}
\usage{
acmpca_describe_certificate_authority(CertificateAuthorityArn)
}
\arguments{
\item{CertificateAuthorityArn}{[required] The Amazon Resource Name (ARN) that was returned when you called
CreateCertificateAuthority. This must be of the form:
\verb{arn:aws:acm-pca:<i>region</i>:<i>account</i>:certificate-authority/<i>12345678-1234-1234-1234-123456789012</i> }.}
}
\description{
Lists information about your private certificate authority (CA). You
specify the private CA on input by its ARN (Amazon Resource Name). The
output contains the status of your CA. This can be any of the following:
}
\details{
\itemize{
\item \code{CREATING} - ACM Private CA is creating your private certificate
authority.
\item \code{PENDING_CERTIFICATE} - The certificate is pending. You must use
your ACM Private CA-hosted or on-premises root or subordinate CA to
sign your private CA CSR and then import it into PCA.
\item \code{ACTIVE} - Your private CA is active.
\item \code{DISABLED} - Your private CA has been disabled.
\item \code{EXPIRED} - Your private CA certificate has expired.
\item \code{FAILED} - Your private CA has failed. Your CA can fail because of
problems such a network outage or backend AWS failure or other
errors. A failed CA can never return to the pending state. You must
create a new CA.
\item \code{DELETED} - Your private CA is within the restoration period, after
which it is permanently deleted. The length of time remaining in the
CA\'s restoration period is also included in this action\'s output.
}
}
\section{Request syntax}{
\preformatted{svc$describe_certificate_authority(
CertificateAuthorityArn = "string"
)
}
}
\keyword{internal}
|
#' Create a compound mapping
#'
#' Create a compound mapping by merging the effect of individual mappings.
#'
#'
#' The following fields are required in the parameter list to initialize the mapping:
#' \tabular{ll}{
#' \code{mapname} \tab Name of the mapping \cr
#' \code{maptype} \tab Must be \code{"compound_map"} \cr
#' \code{maps} \tab a list with the parameter lists of the individual maps
#' }
#'
#' This mapping will also take care of the correct ordering of the individual
#' mappings.
#' Bundling together individual mapping specifications to a compound map is
#' the last step before doing Bayesian inference.
#'
#' @return
#' Returns a list of functions to operate with the mapping, see \code{\link{create_maptype_map}}.
#' @export
#' @family mappings
#'
#' @examples
#' params1 <- list(
#' mapname = "mymap1",
#' maptype = "linearinterpol_map",
#' src_idx = 1:3,
#' tar_idx = 7:8,
#' src_x = 1:3,
#' tar_x = 2:3
#' )
#' params2 <- list(
#' mapname = "mymap2",
#' maptype = "linearinterpol_map",
#' src_idx = 4:6,
#' tar_idx = 7:8,
#' src_x = 1:3,
#' tar_x = 2:3
#' )
#' compmap_params <- list(
#' mapname = "mycompmap",
#' maptype = "compound_map",
#' maps = list(params1, params2)
#' )
#' mycompmap <- create_compound_map()
#' mycompmap$setup(compmap_params)
#' x <- c(1,2,3,5,5,5,0,0)
#' mycompmap$propagate(x)
#' mycompmap$jacobian(x)
#'
#'
create_compound_map <- function() {
mapinfo <- list()
map_list <- list()
pure_sources <- NULL
pure_targets <- NULL
setup <- function(params) {
stopifnot(!is.null(params$maps))
maps <- lapply(params$maps, function(curparams) {
tryCatch(create_map(curparams), error = function(e) {
e$message <- paste0(e$message, " (during creation of map with name ",
curparams$mapname, " of type ", curparams$maptype)
stop(e)
})
})
max_idx <- 0
# ensure no duplicates in indices
for (i in seq_along(maps)) {
if (anyDuplicated(maps[[i]]$get_src_idx()))
stop(paste("Duplicate source indices in map",i))
if (anyDuplicated(maps[[i]]$get_tar_idx()))
stop(paste("Duplicate target indices in map", j))
tryCatch({
max_idx <- max(max_idx, maps[[i]]$get_src_idx(), maps[[i]]$get_tar_idx())
}, error = function(e) {
e$message <- paste0(e$message, " (in map with name ", maps[[i]]$getName(),
" of type ", maps[[i]]$getType())
stop(e)
})
}
# determine indices which are pure sources
is_dest <- rep(FALSE, max_idx)
is_src <- rep(FALSE, max_idx)
for (curmap in maps) {
is_dest[curmap$get_tar_idx()] <- TRUE
is_src[curmap$get_src_idx()] <- TRUE
}
pure_sources <<- which(is_src & !is_dest)
pure_targets <<- which(is_dest)
map_list <<- order_maps(maps)
mapinfo[["mapname"]] <- params[["mapname"]]
mapinfo[["description"]] <- params[["description"]]
}
getType <- function() {
return("compound_map")
}
getName <- function() {
return(mapinfo[["mapname"]])
}
getDescription <- function() {
return(mapinfo[["description"]])
}
getMaps <- function() {
return(map_list)
}
get_src_idx <- function() {
return(pure_sources)
}
get_tar_idx <- function() {
return(pure_targets)
}
get_map_order <- function() {
return(unlist(lapply(map_list, function(x) x$getName())))
}
propagate <- function(x, with.id = TRUE) {
res <- x
if (!with.id) {
res[-pure_sources] <- 0
}
initialres <- res
for (curmap in map_list) {
cur_src_idx <- curmap$get_src_idx()
cur_tar_idx <- curmap$get_tar_idx()
# special treatment of maps with overlapping src and tar idcs
self_map_mask <- get_selfmap_mask(cur_src_idx, cur_tar_idx, n=length(x))
res[self_map_mask] <- res[self_map_mask] - initialres[self_map_mask]
tmpres <- res
res <- curmap$propagate(res, FALSE)
res[!self_map_mask] <- res[!self_map_mask] + tmpres[!self_map_mask]
res[self_map_mask] <- res[self_map_mask] + initialres[self_map_mask]
}
if (!with.id) {
res[pure_sources] <- 0
}
return(res)
}
jacobian <- function(x, with.id = TRUE) {
S <- NULL
if (!with.id) {
x[-pure_sources] <- 0
}
orig_x <- x
for (curmap in map_list) {
if (is.null(S))
{
if (with.id) {
S <- curmap$jacobian(x, TRUE)
} else {
S <- curmap$jacobian(x, FALSE)
diag_els <- rep(0, length(x))
diag_els[pure_sources] <- 1
S <- S + Diagonal(x = diag_els)
}
}
else
{
# self_map_flag <- is_self_map(curmap)
cur_src_idx <- curmap$get_src_idx()
cur_tar_idx <- curmap$get_tar_idx()
selfmap_mask <- get_selfmap_mask(cur_src_idx, cur_tar_idx, n=length(x))
diag(S)[selfmap_mask] <- 0
x[selfmap_mask] <- x[selfmap_mask] - orig_x[selfmap_mask]
curS <- curmap$jacobian(x, TRUE)
diag(curS)[selfmap_mask] <- diag(curS)[selfmap_mask] - 1
S <- curS %*% S
if (with.id) {
diag(S)[selfmap_mask] <- 1
x[selfmap_mask] <- x[selfmap_mask] + orig_x[selfmap_mask]
}
}
x <- curmap$propagate(x, with.id=TRUE)
}
if (!with.id) {
diag(S)[pure_sources] <- 0
}
return(S)
}
list(setup = setup,
getType = getType,
getName = getName,
getMaps = getMaps,
getDescription = getDescription,
get_map_order = get_map_order,
get_src_idx = get_src_idx,
get_tar_idx = get_tar_idx,
propagate = propagate,
jacobian = jacobian)
}
| /R/map_compound.R | permissive | gschnabel/nucdataBaynet | R | false | false | 5,692 | r | #' Create a compound mapping
#'
#' Create a compound mapping by merging the effect of individual mappings.
#'
#'
#' The following fields are required in the parameter list to initialize the mapping:
#' \tabular{ll}{
#' \code{mapname} \tab Name of the mapping \cr
#' \code{maptype} \tab Must be \code{"compound_map"} \cr
#' \code{maps} \tab a list with the parameter lists of the individual maps
#' }
#'
#' This mapping will also take care of the correct ordering of the individual
#' mappings.
#' Bundling together individual mapping specifications to a compound map is
#' the last step before doing Bayesian inference.
#'
#' @return
#' Returns a list of functions to operate with the mapping, see \code{\link{create_maptype_map}}.
#' @export
#' @family mappings
#'
#' @examples
#' params1 <- list(
#' mapname = "mymap1",
#' maptype = "linearinterpol_map",
#' src_idx = 1:3,
#' tar_idx = 7:8,
#' src_x = 1:3,
#' tar_x = 2:3
#' )
#' params2 <- list(
#' mapname = "mymap2",
#' maptype = "linearinterpol_map",
#' src_idx = 4:6,
#' tar_idx = 7:8,
#' src_x = 1:3,
#' tar_x = 2:3
#' )
#' compmap_params <- list(
#' mapname = "mycompmap",
#' maptype = "compound_map",
#' maps = list(params1, params2)
#' )
#' mycompmap <- create_compound_map()
#' mycompmap$setup(compmap_params)
#' x <- c(1,2,3,5,5,5,0,0)
#' mycompmap$propagate(x)
#' mycompmap$jacobian(x)
#'
#'
create_compound_map <- function() {
mapinfo <- list()
map_list <- list()
pure_sources <- NULL
pure_targets <- NULL
setup <- function(params) {
stopifnot(!is.null(params$maps))
maps <- lapply(params$maps, function(curparams) {
tryCatch(create_map(curparams), error = function(e) {
e$message <- paste0(e$message, " (during creation of map with name ",
curparams$mapname, " of type ", curparams$maptype)
stop(e)
})
})
max_idx <- 0
# ensure no duplicates in indices
for (i in seq_along(maps)) {
if (anyDuplicated(maps[[i]]$get_src_idx()))
stop(paste("Duplicate source indices in map",i))
if (anyDuplicated(maps[[i]]$get_tar_idx()))
stop(paste("Duplicate target indices in map", j))
tryCatch({
max_idx <- max(max_idx, maps[[i]]$get_src_idx(), maps[[i]]$get_tar_idx())
}, error = function(e) {
e$message <- paste0(e$message, " (in map with name ", maps[[i]]$getName(),
" of type ", maps[[i]]$getType())
stop(e)
})
}
# determine indices which are pure sources
is_dest <- rep(FALSE, max_idx)
is_src <- rep(FALSE, max_idx)
for (curmap in maps) {
is_dest[curmap$get_tar_idx()] <- TRUE
is_src[curmap$get_src_idx()] <- TRUE
}
pure_sources <<- which(is_src & !is_dest)
pure_targets <<- which(is_dest)
map_list <<- order_maps(maps)
mapinfo[["mapname"]] <- params[["mapname"]]
mapinfo[["description"]] <- params[["description"]]
}
getType <- function() {
return("compound_map")
}
getName <- function() {
return(mapinfo[["mapname"]])
}
getDescription <- function() {
return(mapinfo[["description"]])
}
getMaps <- function() {
return(map_list)
}
get_src_idx <- function() {
return(pure_sources)
}
get_tar_idx <- function() {
return(pure_targets)
}
get_map_order <- function() {
return(unlist(lapply(map_list, function(x) x$getName())))
}
propagate <- function(x, with.id = TRUE) {
res <- x
if (!with.id) {
res[-pure_sources] <- 0
}
initialres <- res
for (curmap in map_list) {
cur_src_idx <- curmap$get_src_idx()
cur_tar_idx <- curmap$get_tar_idx()
# special treatment of maps with overlapping src and tar idcs
self_map_mask <- get_selfmap_mask(cur_src_idx, cur_tar_idx, n=length(x))
res[self_map_mask] <- res[self_map_mask] - initialres[self_map_mask]
tmpres <- res
res <- curmap$propagate(res, FALSE)
res[!self_map_mask] <- res[!self_map_mask] + tmpres[!self_map_mask]
res[self_map_mask] <- res[self_map_mask] + initialres[self_map_mask]
}
if (!with.id) {
res[pure_sources] <- 0
}
return(res)
}
jacobian <- function(x, with.id = TRUE) {
S <- NULL
if (!with.id) {
x[-pure_sources] <- 0
}
orig_x <- x
for (curmap in map_list) {
if (is.null(S))
{
if (with.id) {
S <- curmap$jacobian(x, TRUE)
} else {
S <- curmap$jacobian(x, FALSE)
diag_els <- rep(0, length(x))
diag_els[pure_sources] <- 1
S <- S + Diagonal(x = diag_els)
}
}
else
{
# self_map_flag <- is_self_map(curmap)
cur_src_idx <- curmap$get_src_idx()
cur_tar_idx <- curmap$get_tar_idx()
selfmap_mask <- get_selfmap_mask(cur_src_idx, cur_tar_idx, n=length(x))
diag(S)[selfmap_mask] <- 0
x[selfmap_mask] <- x[selfmap_mask] - orig_x[selfmap_mask]
curS <- curmap$jacobian(x, TRUE)
diag(curS)[selfmap_mask] <- diag(curS)[selfmap_mask] - 1
S <- curS %*% S
if (with.id) {
diag(S)[selfmap_mask] <- 1
x[selfmap_mask] <- x[selfmap_mask] + orig_x[selfmap_mask]
}
}
x <- curmap$propagate(x, with.id=TRUE)
}
if (!with.id) {
diag(S)[pure_sources] <- 0
}
return(S)
}
list(setup = setup,
getType = getType,
getName = getName,
getMaps = getMaps,
getDescription = getDescription,
get_map_order = get_map_order,
get_src_idx = get_src_idx,
get_tar_idx = get_tar_idx,
propagate = propagate,
jacobian = jacobian)
}
|
##' Returns a \code{data.frame} of amino acid properties: \code{AA},
##' \code{ResidueMass}, \code{Abbrev3}, \code{ImmoniumIonMass},
##' \code{Name}, \code{Hydrophobicity}, \code{Hydrophilicity},
##' \code{SideChainMass}, \code{pK1}, \code{pK2} and \code{pI}.
##'
##' @title Amino acids
##' @return A \code{data.frame}
##' @author Laurent Gatto
##' @examples
##' get.amino.acids()
get.amino.acids <- function()
.get.amino.acids()
.get.amino.acids <- function() {
get("amino.acids",envir=.MSnbaseEnv)
}
##' Returns a \code{double} of used atomic mass.
##'
##' @title Atomic mass.
##' @return A named \code{double}.
##' @author Sebastian Gibb
##' @examples
##' get.atomic.mass()
get.atomic.mass <- function()
.get.atomic.mass()
.get.atomic.mass <- function() {
get("atomic.mass",envir=.MSnbaseEnv)
}
formatRt <- function(rt) {
ans <- NA
if (is.numeric(rt)) {
min <- floor(rt/60)
sec <- round(rt-(min*60))
ans <- sprintf("%d:%02d", min, sec)
} else if (is.character(rt)) {
ans <- strsplit(rt, ":")
ans <- sapply(ans, function(x) {
x <- as.numeric(x)
60 * x[1] + x[2]
})
} else {
warning("Input must be numeric of character.")
}
return(ans)
}
#' @param fileIds numeric, could be a vector
#' @param spectrumIds numeric, could be a vector
#' @param nFiles numeric, max number of files
#' @param nSpectra numeric, max number of spectra
#' @return character, in the format F001.S0001
#' @noRd
formatFileSpectrumNames <- function(fileIds, spectrumIds,
nFiles=length(fileIds),
nSpectra=length(spectrumIds)) {
digits <- ceiling(log10(c(nFiles, nSpectra) + 1L))
if (length(fileIds) != 1L && length(spectrumIds) != length(fileIds)) {
stop("Length of 'fileIds' has to be one or equal to ",
"the length of 'spectrumIds'.")
}
sprintf(paste0("F%0", digits[1L], "d.S%0", digits[2L], "d"),
fileIds, spectrumIds)
}
utils.removePeaks_centroided <- function(int, t) {
int[int <= t] <- 0L
int
}
utils.removePeaks <- function(int, t) {
peakRanges <- as(int > 0L, "IRanges")
toLow <- max(extractList(int, peakRanges)) <= t
replaceROWS(int, peakRanges[toLow], 0L)
}
## For internal use - use utils.removePrecMz_Spectrum that will set
## the paramters based on data accessed directly in the spectrum
## object.
utils.removePrecMz <- function(mz, int, precMz, tolerance = 25e-6) {
if (!is.numeric(precMz) || length(precMz) != 1L) {
stop("precMz must be numeric of length 1.")
}
i <- relaxedMatch(precMz, mz, tolerance = tolerance)
if (!is.na(i)) {
peakRanges <- as(int > 0L, "IRanges")
i <- findOverlaps(IRanges(i, width = 1L), peakRanges,
type = "within", select = "first")
if (!is.na(i)) {
int <- replaceROWS(int, peakRanges[i], 0L)
}
}
int
}
utils.removePrecMz_Spectrum <- function(spectrum,
precMz = NULL,
tolerance = 25e-6) {
if (is.null(precMz))
precMz <- precursorMz(spectrum)
if (!is.numeric(precMz))
stop("precMz must either 'NULL' or numeric.")
spectrum@intensity <- utils.removePrecMz(mz(spectrum),
intensity(spectrum),
precMz = precMz,
tolerance = tolerance)
spectrum
}
utils.removePrecMz_list <- function(object, precMz, tolerance = 25e-6) {
idx <- which(object$mz > precMz[1] & object$mz < precMz[2])
object$int <- utils.removePrecMz(object$mz,
object$int,
precMz = precMz,
tolerance = tolerance)
object
}
#' Removes zeros from input except the ones that in the direct neighbourhood of
#' non-zero values.
#'
#' @param x \code{numeric}, vector to be cleaned
#' @param all \code{logical}, should all zeros be removed?
#' @param na.rm \code{logical}, should NAs removed before looking for zeros?
#' @return logical vector, \code{TRUE} for keeping the value
#' @note The return value for \code{NA} is always \code{FALSE}.
#' @examples
#' x <- c(1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0)
#' # T, T, F, T, T, T, T, T, T, T, T, F, F
#' r <- c(TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
#' FALSE, FALSE)
#' stopifnot(utils.clean(x) == r)
#' @noRd
utils.clean <- function(x, all=FALSE, na.rm=FALSE) {
notNA <- !is.na(x)
notZero <- x != 0 & notNA
if (all) {
notZero
} else if (na.rm) {
notNA[notNA] <- utils.enableNeighbours(notZero[notNA])
notNA
} else {
utils.enableNeighbours(notZero)
}
}
#' Switch FALSE to TRUE in the direct neighborhod of TRUE.
#' (used in utils.clean)
#'
#' @param x logical
#' @return logical
#' @examples
#' x <- c(TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
#' FALSE, FALSE)
#' r <- c(TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
#' FALSE, FALSE)
#' stopifnot(utils.enableNeighbours(x) == r)
#' @noRd
utils.enableNeighbours <- function(x) {
stopifnot(is.logical(x))
x | c(x[-1], FALSE) | c(FALSE, x[-length(x)])
}
zoom <- function(x, w = 0.05) {
new("ReporterIons",
mz = x,
width = w,
name = "xlim",
reporterNames = paste("xlim", x, sep = "."),
pcol = rep("grey", length(x)))
}
makeImpuritiesMatrix <- function(x, filename, edit = TRUE) {
if (!missing(filename)) {
m <- read.csv(filename, row.names = 1)
x <- ncol(m)
if (ncol(m) != nrow(m))
stop(paste0("Problem reading impurity matrix. Not square.\n",
"Please read '?purityCorrect' for details."))
ncharge <- x/2
a <- (x/2)
b <- (x/2) + 1
res <- matrix(0, x, x)
diag(res) <- 100 - rowSums(m)
for (k in 1:ncharge) {
diag(res[(1+k):x, 1:(x-k)]) <- m[(1+k):x, (a-k+1)]
diag(res[1:(x-k), (1+k):x]) <- m[1:(x-k), (b+k-1)]
}
## test <- matrix(0, 6, 6)
## diag(test) <- 100 - rowSums(m)
## diag(test[4:6, 1:3]) <- m[4:6, 1] ## col1: -3
## diag(test[3:6, 1:4]) <- m[3:6, 2] ## col2: -2
## diag(test[2:6, 1:5]) <- m[2:6, 3] ## col3: -1
## diag(test[1:5, 2:6]) <- m[1:5, 4] ## col4: +1
## diag(test[1:4, 3:6]) <- m[1:4, 5] ## col5: +2
## diag(test[1:3, 4:6]) <- m[1:3, 6] ## col6: +3
## test <- test/100
M <- res/100
rownames(M) <- colnames(M) <- rownames(m)
} else {
if (x==4) {
M <- matrix(c(0.929,0.059,0.002,0.000,
0.020,0.923,0.056,0.001,
0.000,0.030,0.924,0.045,
0.000,0.001,0.040,0.923),
nrow=4, byrow = TRUE)
rownames(M) <- colnames(M) <-
reporterNames(iTRAQ4)
} else if (x == 6) {
M <- matrix(c(0.939, 0.061, 0.000, 0.000, 0.000, 0.000,
0.005, 0.928, 0.067, 0.000, 0.000, 0.000,
0.000, 0.011, 0.947, 0.042, 0.000, 0.000,
0.000, 0.000, 0.017, 0.942, 0.041, 0.000,
0.000, 0.000, 0.000, 0.016, 0.963, 0.021,
0.000, 0.000, 0.000, 0.002, 0.032, 0.938),
nrow = 6, byrow = TRUE)
rownames(M) <- colnames(M) <-
reporterNames(TMT6)
} else if (x == 8) {
f <- dir(system.file("extdata", package = "MSnbase"),
pattern = "iTRAQ8plexPurityCorrection",
full.names = TRUE)
M <- makeImpuritiesMatrix(filename = f, edit = FALSE)
rownames(M) <- colnames(M) <- c(113:119, 121)
} else if (x == 10) {
## see TMT10.R
M <- structure(c(0.95, 0, 0.003, 0, 0, 0, 0, 0, 0, 0, 0, 0.94, 0,
0.004, 0, 0, 0, 0, 0, 0, 0.05, 0, 0.949, 0, 0.006,
0, 0, 0, 0, 0, 0, 0.058, 0, 0.955, 0, 0.008, 0,
0.001, 0, 0, 0, 0, 0.048, 0, 0.964, 0, 0.014, 0, 0,
0, 0, 0, 0, 0.041, 0, 0.957, 0, 0.015, 0, 0.002, 0,
0, 0, 0, 0.03, 0, 0.962, 0, 0.017, 0, 0, 0, 0, 0,
0, 0.035, 0, 0.928, 0, 0.02, 0, 0, 0, 0, 0, 0,
0.024, 0, 0.965, 0, 0, 0, 0, 0, 0, 0, 0, 0.024, 0,
0.956),
.Dim = c(10L, 10L),
.Dimnames = list(
c("126", "127N", "127C", "128N", "128C", "129N",
"129C", "130N", "130C", "131"),
c("126", "127N", "127C", "128N", "128C", "129N",
"129C", "130N", "130C", "131")))
} else {
M <- diag(x)
}
}
rownames(M) <- paste("% reporter", rownames(M))
if (edit) M <- edit(M)
return(M)
}
utils.getMzDelta <- function(spectrum, percentage) {
## Computes the m/z differences between all the
## 'percentage' top intensity peaks in a spectrum
## Contributed by Guangchuang Yu for the plotMzDelta QC
mz <- mz(spectrum)
i <- intensity(spectrum)
idx <- order(i, decreasing=TRUE)
tops <- idx[1:floor(length(idx) * percentage)] ## top 'percentage' of peaks
mz.filtered <- mz[tops]
delta <- vector("list", length = length(mz.filtered))
i <- 1
while (length(mz.filtered) > 1) {
m <- mz.filtered[1]
mz.filtered <- mz.filtered[-1]
delta[[i]] <- abs(mz.filtered-m)
i <- i+1
}
return(unlist(delta))
}
utils.getMzDelta_list <- function (object, percentage) {
idx <- order(object$int, decreasing = TRUE)
tops <- idx[1:floor(length(idx) * percentage)]
mz.filtered <- object$mz[tops]
delta <- vector("list", length = length(mz.filtered))
i <- 1
while (length(mz.filtered) > 1) {
m <- mz.filtered[1]
mz.filtered <- mz.filtered[-1]
delta[[i]] <- abs(mz.filtered - m)
i <- i+1
}
return(unlist(delta))
}
fillUp <- function(x) {
if (!any(is.na(x)) & !any(x != ""))
return(x)
for (i in 2:length(x)) {
if (is.na(x[i]) | (x[i] == ""))
x[i] <- x[i - 1]
}
return(x)
}
##' Return the name of variable \code{varname} in call \code{match_call}.
##'
##' @title Return a variable name
##' @param match_call An object of class \code{call}, as returned by \code{match.call}.
##' @param varname An \code{character} of length 1 which is looked up in \code{match_call}.
##' @return A \code{character} with the name of the variable passed as parameter
##' \code{varname} in parent close of \code{match_call}.
##' @examples
##' a <- 1
##' f <- function(x, y)
##' MSnbase:::getVariableName(match.call(), "x")
##' f(x = a)
##' f(y = a)
##' @author Laurent Gatto
getVariableName <- function(match_call, varname) {
match_call <- as.list(match_call)
varname <- varname[1]
mcx <- match_call[[varname]]
while (any(sapply(mcx, length) != 1))
mcx <- unlist(lapply(mcx, as.list))
tail(as.character(mcx), n = 1)
}
#' rowwise max, similar to rowwise mean via rowMeans
#'
#' @param x matrix
#' @param na.rm logical
#' @return double vector with maximum values per row
#' @seealso Biobase::rowMax (could not handle missing values/NA)
#' @noRd
.rowMaxs <- function(x, na.rm=FALSE) {
stopifnot(is.matrix(x))
if (na.rm) {
x[is.na(x)] <- -Inf
}
nr <- nrow(x)
x[(max.col(x, ties.method="first") - 1L) * nr + 1L:nr]
}
#' summarise rows by an user-given function
#'
#' @param x matrix
#' @param fun function to summarise rows, if \code{fun} equals
#' \code{sum}/\code{mean} the more efficient \code{rowSums}/\code{rowMeans} are
#' used.
#' @param ... further arguments passed to \code{fun}
#' @return double, summarised rows
#' @noRd
.summariseRows <- function(x, fun, ...) {
stopifnot(is.matrix(x))
stopifnot(is.function(fun))
if (identical(fun, sum)) {
rowSums(x, ...)
} else if (identical(fun, mean)) {
rowMeans(x, ...)
} else {
apply(x, 1L, fun, ...)
}
}
#' find top n indices of each group
#'
#' @param x matrix
#' @param groupBy factor/character of length \code{nrow(x)}
#' @param n consider just the top \code{n} values
#' @param fun function to summarise rows
#' @param ... further arguments passed to \code{fun}
#' @return double, indices sorted by summarising function \code{fun}
#' @noRd
.topIdx <- function(x, groupBy, n, fun, ...) {
if (n < 1) {
stop(sQuote("n"), " has to be greater or equal than 1.")
}
if (nrow(x) != length(groupBy)) {
stop(sQuote("nrow(x)"), " and ", sQuote("length(groupBy)"),
" have to be equal.")
}
rs <- .summariseRows(x, fun, ...)
o <- order(as.double(rs), decreasing=TRUE, na.last=TRUE)
idx <- unlist(lapply(split(o, groupBy[o]), "[", 1:n), use.names=FALSE)
idx[!is.na(idx)]
}
## Computes header from assay data by-passing cache
.header <- function(object) {
if (length(object) == 0)
return(data.frame())
if (all(msLevel(object) == 1)) {
ln <- length(object)
nas <- rep(NA, ln)
hd <- list(fileIdx = fromFile(object),
retention.time = rtime(object),
precursor.mz = nas,
precursor.intensity = nas,
charge = nas,
peaks.count = peaksCount(object),
tic = tic(object),
ionCount = ionCount(object),
ms.level = msLevel(object),
acquisition.number = acquisitionNum(object),
collision.energy = nas)
} else {
## tbl <- table(fromFile(object))
## idx <- as.numeric(unlist(apply(tbl, 1, function(x) 1:x)))
hd <- list(fileIdx = fromFile(object),
retention.time = rtime(object),
precursor.mz = precursorMz(object),
precursor.intensity = precursorIntensity(object),
charge = precursorCharge(object),
peaks.count = peaksCount(object),
tic = tic(object),
ionCount = ionCount(object),
ms.level = msLevel(object),
acquisition.number = acquisitionNum(object),
collision.energy = collisionEnergy(object))
}
## items are either a numeric or a list of integer() - keep former only
sel <- sapply(hd, function(i) !is.list(i))
hd <- as.data.frame(hd[sel])
return(hd)
}
checkHeader <- function(object) {
if (object@.cache$level == 0) {
ans <- TRUE
} else {
cachedhd <- header(object)
fromdata <- .header(object)
ans <- identical(cachedhd, fromdata)
}
return(ans)
}
updateSpectrum2 <- function(x) {
## Version 0.2.0 of Spectrum has now a tic slot (MSnbase 1.5.3)
newx <- new("Spectrum2")
newx@merged <- x@merged
newx@precScanNum <- x@precScanNum
newx@precursorMz <- x@precursorMz
newx@precursorIntensity <- x@precursorIntensity
newx@precursorCharge <- x@precursorCharge
newx@collisionEnergy <- x@collisionEnergy
newx@msLevel <- x@msLevel
newx@peaksCount <- x@peaksCount
newx@rt <- x@rt
newx@acquisitionNum <- x@acquisitionNum
newx@scanIndex <- x@scanIndex
newx@mz <- x@mz
newx@intensity <- x@intensity
newx@fromFile <- x@fromFile
newx@centroided <- x@centroided
newx@tic <- 0
if (validObject(newx))
return(newx)
}
updateMSnExp <- function(x) {
for (fn in featureNames(x)) {
.x <- x[[fn]]
assign(fn, updateSpectrum2(.x), envir = assayData(x))
}
if (validObject(x))
return(x)
}
cramer4 <- function(object, imp) {
## see Shadford et al. 2005, BMC Genomics
if (missing(imp)) {
impM <- matrix(c(0.0, 1.0, 5.9, 0.2,
0.0, 2.0, 5.6, 0.1,
0.0, 3.0, 4.5, 0.1,
0.1, 4.0, 3.5, 0.1),
nrow = 4, byrow = TRUE)
colnames(impM) <- c("-2", "-1", "+1", "+2")
rownames(impM) <- 114:117
imp <- as.numeric(impM)
names(imp) <- letters[1:length(imp)]
}
w <- (100 - (imp["a"] + imp["e"] + imp["i"] + imp["m"]))
x <- (100 - (imp["b"] + imp["f"] + imp["j"] + imp["n"]))
y <- (100 - (imp["c"] + imp["g"] + imp["k"] + imp["o"]))
z <- (100 - (imp["d"] + imp["h"] + imp["l"] + imp["p"]))
C <- matrix(c(w, imp["f"], imp["c"], 0,
imp["i"], x, imp["g"], imp["d"],
imp["m"], imp["j"], y, imp["h"],
0, imp["n"], imp["k"], z),
ncol = 4, byrow = TRUE)
if (det(C) == 0) {
warning("Determinant of C is 0, correction impossible")
object@processingData@processing <-
c(object@processingData@processing,
"No impurity correction possible, det(C) is 0")
} else {
e <- exprs(object)
res <- apply(e, 1, function(.e) {
d1 <- matrix(c(.e,
imp["f"], x, imp["j"], imp["n"],
imp["c"], imp["g"], y, imp["k"],
0, imp["d"], imp["h"], z),
ncol = 4, byrow = TRUE)
d2 <- matrix(c(w, imp["i"], imp["m"], 0,
.e,
imp["c"], imp["g"], y, imp["k"],
0, imp["d"], imp["h"], z),
ncol = 4, byrow = TRUE)
d3 <- matrix(c(w, imp["i"], imp["m"], 0,
imp["f"], x, imp["j"], imp["n"],
.e,
0, imp["d"], imp["h"], z),
ncol = 4, byrow = TRUE)
d4 <- matrix(c(w, imp["i"], imp["m"], 0,
imp["f"], x, imp["j"], imp["n"],
imp["c"], imp["j"], y, imp["k"],
.e),
ncol = 4, byrow = TRUE)
res <- c(det(d1)/det(C),
det(d2)/det(C),
det(d3)/det(C),
det(d4)/det(C))
return(res)
})
res <- t(res)
rownames(res) <- featureNames(object)
colnames(res) <- sampleNames(object)
object@processingData@processing <-
c(object@processingData@processing,
"Impurity correction using Cramer's rule.")
exprs(object) <- res
}
if (validObject(object))
return(object)
}
## cramer6 <- function(x, imp) {
## if (missing(imp)) {
## imp <- c(0, 0, 0, 6.1, 0, 0,
## 0, 0, 0.5, 6.7, 0, 0,
## 0, 0, 1.1, 4.2, 0, 0,
## 0, 0, 1.7, 4.1, 0, 0,
## 0, 0, 1.6, 2.1, 0, 0,
## 0, 0.2, 3.2, 2.8, 0, 0)
## names(imp) <- letters[1:length(imp)]
## impM <- matrix(imp, nrow = 6, byrow = TRUE)
## colnames(impM) <- c("-3", "-2", "-1", "+1", "+2", "+3")
## rownames(impM) <- 126:131
## imp <- as.numeric(imp)
## }
## return(FALSE)
## }
getColsFromPattern <- function(x, pattern) {
if (missing(pattern)) {
stop(sQuote("pattern"), " must not be missing.")
}
if (!is.matrix(x)) {
stop(sQuote("x"), " must be a matrix.")
}
if (nchar(pattern) != ncol(x)) {
stop("The ", sQuote("pattern"), " must be equal to the number of columns.")
}
pattern <- strsplit(pattern, "")[[1L]]
if (!all(unique(pattern) %in% c("0", "1"))) {
stop(sQuote("pattern"), " must be composed of '0' or '1' defining columns",
" with or without 'NA's.")
}
pattern == "1"
}
getRowsFromPattern <- function(x, pattern) {
cols <- getColsFromPattern(x, pattern)
x <- x[, cols, drop=FALSE]
rowSums(is.na(x)) == 0
}
.filterNA <- function(x, pNA=0L, pattern) {
if (!is.matrix(x)) {
stop(sQuote("x"), " must be a matrix.")
}
if (!is.numeric(pNA)) {
stop(sQuote("pNA"), " must be numeric.")
}
if (length(pNA) > 1) {
stop(sQuote("pNA"), " must be of length one.")
}
if (missing(pattern)) { ## using pNA
if (pNA > 1) {
pNA <- 1
}
if (pNA < 0) {
pNA <- 0
}
k <- rowSums(is.na(x)) / ncol(x)
k <= pNA
} else { ## using pattern
getRowsFromPattern(x, pattern)
}
}
nologging <- function(object, n = 1) {
## removes the last n entries from
## object@processingData@processing
l <- length(object@processingData@processing)
x <- seq(l, length = n, by = -1)
object@processingData@processing <-
object@processingData@processing[-x]
stopifnot(length(object@processingData@processing) == (l - n))
if (validObject(object))
return(object)
}
logging <- function(object, msg, date. = TRUE) {
if (date.)
msg <- paste0(msg, " [", date(), "]")
object@processingData@processing <-
c(object@processingData@processing, msg)
if (validObject(object))
return(object)
}
##' Given a text spread sheet \code{f} and a \code{pattern} to
##' be matched to its header (first line in the file), the function
##' returns the matching columns names or indices of the
##' corresponding \code{data.frame}.
##'
##' The function starts by reading the first line of the file (or connection)
##' \code{f} with \code{\link{readLines}}, then splits it
##' according to the optional \code{...} arguments (it is important to
##' correctly specify \code{\link{strsplit}}'s \code{split} character vector here)
##' and then matches \code{pattern} to the individual column names using
##' \code{\link{grep}}.
##'
##' Similarly, \code{getEcols} can be used to explore the column names and
##' decide for the appropriate \code{pattern} value.
##'
##' These functions are useful to check the parameters to be provided to
##' \code{\link{readMSnSet2}}.
##'
##' @title Returns the matching column names of indices.
##' @param f A connection object or a \code{character} string to be
##' read in with \code{readLines(f, n = 1)}.
##' @param pattern A \code{character} string containing a regular
##' expression to be matched to the file's header.
##' @param ... Additional parameters passed to \code{\link{strsplit}}
##' to split the file header into individual column names.
##' @param n An \code{integer} specifying which line in file \code{f}
##' to grep (get). Default is 1. Note that this argument must be
##' named.
##' @return Depending on \code{value}, the matching column names of
##' indices. In case of \code{getEcols}, a \code{character} of
##' column names.
##' @seealso \code{\link{readMSnSet2}}
##' @author Laurent Gatto
grepEcols <- function(f, pattern, ..., n = 1)
grep(pattern, strsplit(readLines(f, n), ...)[n][[1]])
##' @rdname grepEcols
getEcols <- function(f, ..., n = 1)
strsplit(readLines(f, n)[n], ...)[[1]]
MSnExp.size <- function(x)
object.size(x) + sum(unlist(unname(eapply(assayData(x),
object.size))))
## convert vector of length n to a semicolon separated character
## vector of length 1
utils.vec2ssv <- function(vec, sep=";") {
paste0(vec, collapse=sep)
}
## converts an n by m data.frame into an 1 by m data.frame where the
## vector columns of length n are converted to a semicolon separated
## character vector of length 1
utils.vec2ssv.data.frame <- function(x, sep = ";", exclude) {
if (nrow(x) == 1L)
return(x)
nms <- names(x)
if (missing(exclude)) {
ans <- lapply(x, utils.vec2ssv)
} else {
if (is.numeric(exclude)) {
x0 <- x[, exclude, drop = FALSE]
x <- x[, -exclude, drop = FALSE]
} else if (is.character(exclude)) {
## convert to logical, making sure that the column names
## to be excluded are present
stopifnot(all(exclude %in% names(x)))
exclude <- names(x) %in% exclude
x0 <- x[, exclude, drop = FALSE]
x <- x[, !exclude, drop = FALSE]
} else if (is.logical(exclude)) {
x0 <- x[, exclude, drop = FALSE]
x <- x[, !exclude, drop = FALSE]
} else {
stop("Can only exclude numeric, characters or logicals.")
}
ans <- lapply(x, utils.vec2ssv)
x0 <- lapply(x0, head, n = 1L)
ans <- c(x0, ans)
ans <- ans[nms] ## preserve original order
}
data.frame(ans, stringsAsFactors = FALSE)
}
## convert a semicolon separated character vector of length 1 to a
## vector of length n
utils.ssv2vec <- function(ssv, sep=";", unlist=TRUE) {
vec <- strsplit(ssv, sep)
if (unlist) {
return(unlist(vec))
} else {
return(vec)
}
}
utils.list2ssv <- function(l, sep=";") {
unlist(lapply(l, utils.vec2ssv, sep=sep))
}
utils.ssv2list <- function(ssv, sep=";") {
utils.ssv2vec(ssv, unlist=FALSE, sep=sep)
}
## similar to merge(..., all.x=TRUE) but if called multiple times
## exisiting columns would not duplicated (with/without suffixes)
## but filled/overwritten using the values from y
## params: x, y, by, by.x, by.y see ?merge
## exclude: character, columns which should excluded
## order: logical, preserve order?
utils.leftJoin <- function(x, y, by, by.x=by, by.y=by,
exclude=character(), order=TRUE) {
## create character ids to allow ids covering several columns
rxc <- do.call(paste, c(x[, by.x, drop=FALSE], sep=";"))
ryc <- do.call(paste, c(y[, by.y, drop=FALSE], sep=";"))
## determine matching rows
ryid <- match(rxc, ryc, 0L)
rjid <- match(ryc, rxc, 0L)
ryid <- ryid[ryid > 0]
rjid <- rjid[rjid > 0]
## preserve order?
if (order) {
rjid <- sort(rjid)
}
cnx <- colnames(x)
cny <- colnames(y)
## exclude columns
keepx <- !cnx %in% exclude
keepy <- !cny %in% c(exclude, by.y)
cnx <- cnx[keepx]
cny <- cny[keepy]
x <- x[, keepx, drop=FALSE]
y <- y[, keepy, drop=FALSE]
## start joining
joined <- x[, cnx]
## only import equal columns from y
cjid <- match(cny, cnx, 0L)
cyid <- match(cnx, cny, 0L)
cjid <- cjid[cjid > 0]
cyid <- cyid[cyid > 0]
joined[rjid, cjid] <- y[ryid, cyid]
## add missing columns from y
cym <- setdiff(cny, cnx)
if (length(cym)) {
joined[, cym] <- NA
joined[rjid, cym] <- y[ryid, cym]
}
return(joined)
}
## @param featureData fData(msexp)/fData(msset)
## @param id output of mzID::flatten(mzID(filename))
## @param fcol column name of fData data.frame used for merging
## @param icol column name of idData data.frame used for merging
## @noRd
utils.mergeSpectraAndIdentificationData <- function(featureData, id,
fcol, icol, acc,
desc, pepseq,
rank = "rank") {
## mzR::acquisitionNum (stored in fData()[, "acquisition.number"] and
## mzID::acquisitionnum should be identical
if (!all(fcol %in% colnames(featureData))) {
stop("The column(s) ", sQuote(fcol),
" are not all in the feature data.frame!")
}
if (!all(icol %in% colnames(id))) {
stop("The column(s) ", sQuote(icol),
" are not all in the identification data.frame!")
}
if (sum(fcol %in% colnames(featureData)) != sum(icol %in% colnames(id))) {
stop("The number of selected column(s) in the feature and identification ",
"data don't match!")
}
## sort id data to ensure the best matching peptide is on top in case of
## multiple matching peptides
o <- do.call("order", lapply(c(icol, rank), function(j) id[, j]))
id <- id[o, ]
## use flat version of accession/description if multiple ones are available
id[, acc] <- ave(as.character(id[, acc]), id[, icol], FUN = utils.vec2ssv)
id[, desc] <- ave(as.character(id[, desc]), id[, icol], FUN = utils.vec2ssv)
## remove duplicated entries
id <- id[!duplicated(id[, icol]), ]
featureData <- utils.leftJoin(
x = featureData, y = id, by.x = fcol, by.y = icol,
exclude = c("spectrumid", # vendor specific nativeIDs
"spectrumID",
"spectrumFile") # is stored in fileId + MSnExp@files
)
## number of members in the protein group
featureData$nprot <- sapply(utils.ssv2list(featureData[, acc]),
function(x) {
n <- length(x)
if (n == 1 && is.na(x)) return(NA)
n
})
## number of peptides observed for each protein
featureData$npep.prot <- as.integer(ave(featureData[, acc],
featureData[, pepseq],
FUN = length))
## number of PSMs observed for each protein
featureData$npsm.prot <- as.integer(ave(featureData[, acc],
featureData[, acc],
FUN = length))
## number of PSMs observed for each protein
featureData$npsm.pep <- as.integer(ave(featureData[, pepseq],
featureData[, pepseq],
FUN = length))
return(featureData)
}
utils.removeNoId <- function(object, fcol, keep) {
if (!fcol %in% fvarLabels(object))
stop(fcol, " not in fvarLabels(",
getVariableName(match.call(), 'object'), ").")
if (is.null(keep)) noid <- is.na(fData(object)[, fcol])
else {
if (!is.logical(keep))
stop("'keep must be a logical.'")
if (length(keep) != nrow(fData(object)))
stop("The length of 'keep' does not match the number of spectra.")
noid <- !keep
}
object <- object[!noid, ]
object <- nologging(object, 1)
object <- logging(object, paste0("Filtered ", sum(noid),
" unidentified peptides out"))
if (validObject(object))
return(object)
}
utils.removeMultipleAssignment <- function(object, nprot = "nprot") {
keep <- which(fData(object)[, nprot] == 1)
object <- object[keep, ]
object <- nologging(object, 1)
object <- logging(object,
paste0("Removed ", sum(!keep),
" features assigned to multiple proteins"))
if (validObject(object))
return(object)
}
utils.idSummary <- function(fd) {
if (any(!c("spectrumFile", "idFile") %in% colnames(fd))) {
stop("No quantification/identification data found! Did you run ",
sQuote("addIdentificationData"), "?")
}
idSummary <- fd[!duplicated(fd$spectrumFile), c("spectrumFile", "idFile")]
idSummary$coverage <- sapply(idSummary$spectrumFile, function(f) {
round(mean(!is.na(fd$idFile[fd$spectrumFile == f])), 3)
})
rownames(idSummary) <- NULL
colnames(idSummary) <- c("spectrumFile", "idFile", "coverage")
return(idSummary)
}
utils.removeNoIdAndMultipleAssignments <-
function(object, pepseq = "sequence", nprot = "nprot") {
if (anyNA(fData(object)[, pepseq]))
object <- removeNoId(object, pepseq)
if (any(fData(object)[, nprot] > 1))
object <- removeMultipleAssignment(object, nprot)
return(object)
}
##' Compares equality of all members of a list.
##'
##' @title Tests equality of list elements class
##' @param x A code{list}.
##' @param class A \code{character} defining the expected class.
##' @param valid A \code{logical} defining if all elements should be
##' tested for validity. Default is \code{TRUE}.
##' @return \code{TRUE} is all elements of \code{x} inherit from
##' \code{class}.
##' @author Laurent Gatto
##' @examples
##' listOf(list(), "foo")
##' listOf(list("a", "b"), "character")
##' listOf(list("a", 1), "character")
listOf <- function(x, class, valid = TRUE) {
cla <- all(sapply(x, inherits, class))
if (valid) val <- all(sapply(x, validObject))
else val <- TRUE
cla & val
}
##' Calculates a non-parametric version of the coefficient of
##' variation where the standard deviation is replaced by the median
##' absolute deviations (see \code{\link{mad}} for details) and
##' divided by the absolute value of the mean.
##'
##' Note that the \code{mad} of a single value is 0 (as opposed to
##' \code{NA} for the standard deviation, see example below).
##'
##'
##' @title Non-parametric coefficient of variation
##' @param x A \code{numeric}.
##' @param na.rm A \code{logical} (default is \code{TRUE} indicating
##' whether \code{NA} values should be stripped before the computation
##' of the median absolute deviation and mean.
##' @return A \code{numeric}.
##' @author Laurent Gatto
##' @examples
##' set.seed(1)
##' npcv(rnorm(10))
##' replicate(10, npcv(rnorm(10)))
##' npcv(1)
##' mad(1)
##' sd(1)
npcv <- function(x, na.rm = TRUE) {
mdx <- mad(x, na.rm = na.rm)
mdx/abs(mean(x, na.rm = na.rm))
}
##' Compares two \code{\linkS4class{MSnSet}} instances. The
##' \code{qual} and \code{processingData} slots are generally omitted.
##'
##' @title Compare two MSnSets
##' @param x First MSnSet
##' @param y Second MSnSet
##' @param qual Should the \code{qual} slots be compared? Default is
##' \code{FALSE}.
##' @param proc Should the \code{processingData} slots be compared?
##' Default is \code{FALSE}.
##' @return A \code{logical}
##' @author Laurent Gatto
compareMSnSets <- function(x, y, qual = FALSE, proc = FALSE) {
if (!proc) ## do not compare @processingData
x@processingData <-
y@processingData <- new("MSnProcess")
if (!qual) ## do not compare @qual
x@qual <- y@qual
all.equal(x, y)
}
##' Similar to rowsum but calculates the mean. It is slower than colMeans but
##' supports grouping variables. See ?rowsum for details.
##' @param x matrix
##' @param group a vector/factor of grouping
##' @param reorder if TRUE the rows are ordered by `sort(unique(group))`
##' @param na.rm logical. Should missing values (including `NaN`) be omitted
##' @return matrix
##' @author Sebastian Gibb <mail@@sebastiangibb.de>
##' @noRd
rowmean <- function(x, group, reorder=FALSE, na.rm=FALSE) {
if (na.rm) {
nna <- !is.na(x)
mode(nna) <- "numeric"
} else {
nna <- x
nna[] <- 1
}
nna <- rowsum(nna, group=group, reorder=reorder, na.rm=na.rm)
rs <- rowsum(x, group=group, reorder=reorder, na.rm=na.rm)
rs/nna
}
##' Similar to rowsum but calculates the sd.
##' See ?rowsum for details.
##' @param x matrix
##' @param group a vector/factor of grouping
##' @param reorder if TRUE the rows are ordered by `sort(unique(group))`
##' @param na.rm logical. Should missing values (including `NaN`) be omitted
##' @return matrix
##' @author Sebastian Gibb <mail@@sebastiangibb.de>
##' @noRd
rowsd <- function(x, group, reorder=FALSE, na.rm=FALSE) {
if (na.rm) {
nna <- !is.na(x)
mode(nna) <- "numeric"
} else {
nna <- x
nna[] <- 1
}
nna <- rowsum(nna, group=group, reorder=reorder, na.rm=na.rm)
nna[nna == 1] <- NA_real_ # return NA if n == 1 (similar to sd)
var <- rowmean(x*x, group=group, reorder=reorder, na.rm=na.rm) -
rowmean(x, group=group, reorder=reorder, na.rm=na.rm)^2L
sqrt(var * nna/(nna - 1L))
}
setMethod("trimws", "data.frame",
function(x, which, ...) {
for (i in 1:ncol(x)) {
if (inherits(x[, i], "character"))
x[, i] <- base::trimws(x[, i], which)
}
x
})
setMethod("trimws", "MSnSet",
function(x, which, ...) {
fData(x) <- trimws(fData(x), which, ...)
x <- logging(x, "Trimmed featureData white spaces")
x
})
setMethod("isEmpty", "environment",
function(x) length(ls(x)) == 0)
## Simple helper to help differentiate between on disk and in
## memory objects.
isOnDisk <- function(object)
any(grepl("spectraProcessingQueue", slotNames(object)))
## Simple function to determine whether parallel or serial processing should be
## performed.
## Check testthat/test_OnDiskMSnExp_benchmarks.R for performance comparisons.
## Parameter object is expected to beb a
getBpParam <- function(object, BPPARAM=bpparam()) {
parallel_thresh <- options()$MSnbase$PARALLEL_THRESH
if (is.null(parallel_thresh) )
parallel_thresh <- 1000
## If it's empty, return SerialParam
if (length(object) == 0)
return(SerialParam())
if (length(object) < parallel_thresh)
return(SerialParam())
return(BPPARAM)
}
countAndPrint <- function(x) {
if (length(x) == 0)
return("")
tb <- table(x)
paste(paste0(names(tb), " (", tb, ")"), collapse = ", ")
}
## see issue #131
.isCentroided <- function(pk, k = 0.025, qtl = 0.9) {
.qtl <- quantile(pk[, 2], qtl)
x <- pk[pk[, 2] > .qtl, 1]
quantile(diff(x), 0.25) > k
}
##' @title Reads profile/centroided mode from an mzML file
##' @param x An instance of \code{MSnExp} or \code{OnDiskMSnExp}
##' @return A \code{logical}
##' @noRd
.isCentroidedFromFile <- function(f) {
if (!requireNamespace("XML"))
stop("Please install the XML package to use this functionality.")
xml <- XML::xmlParse(f)
x <- XML::xpathSApply(xml,
"//x:spectrum/x:cvParam[@accession='MS:1000127' or @accession='MS:1000128']/../@index |
//x:cvParam[@accession='MS:1000127' or @accession='MS:1000128']/@name",
namespaces = c(x = "http://psi.hupo.org/ms/mzml"))
index <- as.double(x[seq(1, length(x), by = 2)])
res <- rep(NA, length(index))
res[grepl("centroid", x[seq(2, length(x), by = 2)])] <- TRUE
res[grepl("profile", x[seq(2, length(x), by = 2)])] <- FALSE
res
}
## Returns the extension of the file. If that extension is on of the
## usual archive extensions, as defined in gexts, then the last part
## after the dot is removed and the extension is extracted again.
.fileExt <- function(f,
gexts = c("gz", "gzip", "bzip", "bzip2", "xz",
"zip")) {
ext <- tools::file_ext(f)
if (ext %in% gexts) {
f <- basename(f)
f <- sub("\\.[a-z]+$", "", f)
ext <- .fileExt(f)
}
ext
}
## see this comment
## https://github.com/lgatto/MSnbase/issues/183#issuecomment-273512931
## for some background about this function
.firstMsLevel <- function(object) {
if (inherits(object, "OnDiskMSnExp")) msLevel(object)[1]
else msLevel(object[[1]])
}
#' @title Open an MS file using the mzR package
#'
#' @description Opens an MS file using the mzR package determining the corrent
#' backend based on the file ending of the specified file.
#'
#' @param x \code{character(1)}: the file name.
#'
#' @return A file handle to the opened MS file.
#'
#' @author Johannes Rainer
#'
#' @noRd
.openMSfile <- function(x) {
if (missing(x) || length(x) != 1)
stop("parameter 'x' has to be of length 1")
mzR::openMSfile(x, backend = NULL)
}
##' This function produces the opposite as the \code{stringsAsFactors}
##' argument in the \code{data.frame} or \code{read.table} functions;
##' it converts \code{factors} columns to \code{characters}.
##'
##' @title Converts factors to strings
##' @param x A \code{data.frame}
##' @return A \code{data.frame} where \code{factors} are converted to
##' \code{characters}.
##' @author Laurent Gatto
##' @examples
##' data(iris)
##' str(iris)
##' str(factorsAsStrings(iris))
factorsAsStrings <- function(x) {
x <- lapply(x,
function(xx) {
if (is.factor(xx)) as.character(xx)
else xx
})
data.frame(x, stringsAsFactors = FALSE)
}
##' Convert a \code{vector} of characters to camel case by replacing
##' dots by captial letters.
##'
##' @title Convert to camel case by replacing dots by captial letters
##' @param x A \code{vector} to be transformed to camel case.
##' @param prefix An optional \code{character} of length one. Any
##' additional elements are ignores.
##' @return A \code{character} of same length as \code{x}.
##' @author Laurent Gatto
##' @examples
##' nms <- c("aa.foo", "ab.bar")
##' makeCamelCase(nms)
##' makeCamelCase(nms, prefix = "x")
makeCamelCase <- function(x, prefix) {
if (!missing(prefix))
x <- paste(prefix[1], x, sep = ".")
gsub('\\.(\\w?)', '\\U\\1', x, perl = TRUE)
}
##' Reduce a data.frame so that the (primary) key column contains only
##' unique entries and other columns pertaining to that entry are
##' combined into semicolon-separated values into a single
##' row/observation.
##'
##' An important side-effect of reducing a `data.frame` is that all
##' columns other than the key are converted to characters when they
##' are collapsed to a semi-column separated value (even if only one
##' value is present) as soon as one observation of transformed.
##'
##' @title Reduce a data.frame
##' @param x A \code{data.frame}.
##' @param key The column name (currenly only one is supported) to be
##' used as primary key.
##' @param sep The separator. Default is \code{;}.
##' @return A reduced \code{data.frame}.
##' @author Laurent Gatto
##' @examples
##' dfr <- data.frame(A = c(1, 1, 2),
##' B = c("x", "x", "z"),
##' C = LETTERS[1:3])
##' dfr
##' dfr2 <- reduce(dfr, key = "A")
##' dfr2
##' ## column A used as key is still num
##' str(dfr2)
##' dfr3 <- reduce(dfr, key = "B")
##' dfr3
##' ## A is converted to chr; B remains factor
##' str(dfr3)
##' dfr4 <- data.frame(A = 1:3,
##' B = LETTERS[1:3],
##' C = c(TRUE, FALSE, NA))
##' ## No effect of reducing, column classes are maintained
##' str(reduce(dfr4, key = "B"))
setMethod("reduce", "data.frame",
function(x, key, sep = ";") {
if (nrow(x) %in% c(0, 1))
return(x)
if (missing(key))
stop("Need a key column to reduce the data.frame")
if (length(key) != 1L)
stop("Key must be of length 1.")
if (!key %in% names(x))
stop("key not found in column names.")
ans <- by(x, x[, key], utils.vec2ssv.data.frame, exclude = key)
ans <- do.call(rbind, ans)
rownames(ans) <- NULL
ans
})
.reduce_list <- function(x) {
x <- x[lengths(x) > 0]
sel <- sapply(x, function(xx) any(xx != ""))
x[sel]
}
#' @param fd data.frame, feature data (columns required: acquisitionNum,
#' precursorScanNum)
#' @param an integer, acquisitionNum of spectrum of interest (parent and
#' children will be selected)
#' @noRd
.filterSpectraHierarchy <- function(fd, an) {
if (!is.data.frame(fd)) {
stop("'fd' is not a data.frame")
}
if (!all(c("acquisitionNum", "precursorScanNum") %in% colnames(fd))) {
stop("column(s) acquisitionNum/precursorScanNum is/are missing")
}
## we could use recursion which is slow in R
## or reformat the adjacency list into a nested tree
## list model but most ms data are limited to at most 3 levels and the
## filtering isn't done very often, so we use for loops here
parents <- logical(nrow(fd))
## find current scan
parents[fd$acquisitionNum %in% an] <- TRUE
children <- parents
## find parent scan
nLastParents <- 0L
nParents <- 1L
while (nLastParents < nParents) {
parents[fd$acquisitionNum %in% fd$precursorScanNum[parents]] <- TRUE
nLastParents <- nParents
nParents <- sum(parents)
}
## find children scans
nLastChildren <- 0L
nChildren <- 1L
while (nLastChildren < nChildren) {
children[fd$precursorScanNum %in% fd$acquisitionNum[children]] <- TRUE
nLastChildren <- nChildren
nChildren <- sum(children)
}
parents | children
}
windowIndices <- function(i, hws, n) {
stopifnot(i <= n)
max(1L, i - hws):min(n, i + hws)
}
#' The function aggregates `x` for `toBin` falling into bins defined
#' by `breaks` using the `fun` function.
#'
#' @details
#'
#' This is a combination of the code from the former bin_Spectrum.
#'
#' @param x `numeric` with the values that should be binned.
#'
#' @param toBin `numeric`, same length than `x`, with values to be used for the
#' binning.
#'
#' @param binSize `numeric(1)` with the size of the bins.
#'
#' @param breaks `numeric` defining the breaks/bins.
#'
#' @param fun `function` to be used to aggregate values of `x` falling into the
#' bins defined by `breaks`.
#'
#' @return `list` with elements `x` and `mids` being the aggregated values
#' of `x` for values in `toBin` falling within each bin and the bin mid
#' points.
#'
#' @author Johannes Rainer, Sebastian Gibb
#'
#' @noRd
.bin_values <- function(x, toBin, binSize = 1, breaks = seq(floor(min(toBin)),
ceiling(max(toBin)),
by = binSize),
fun = max) {
if (length(x) != length(toBin))
stop("lengths of 'x' and 'toBin' have to match.")
fun <- match.fun(fun)
breaks <- .fix_breaks(breaks, range(toBin))
nbrks <- length(breaks)
idx <- findInterval(toBin, breaks)
## Ensure that indices are within breaks.
idx[which(idx < 1L)] <- 1L
idx[which(idx >= nbrks)] <- nbrks - 1L
ints <- double(nbrks - 1L)
ints[unique(idx)] <- unlist(lapply(base::split(x, idx), fun),
use.names = FALSE)
list(x = ints, mids = (breaks[-nbrks] + breaks[-1L]) / 2L)
}
#' Simple function to ensure that breaks (for binning) are span al leat the
#' expected range.
#'
#' @param brks `numeric` with *breaks* such as calculated by `seq`.
#'
#' @param rng `numeric(2)` with the range of original numeric values on which
#' the breaks were calculated.
#'
#' @noRd
.fix_breaks <- function(brks, rng) {
## Assuming breaks being sorted.
if (brks[length(brks)] <= rng[2])
brks <- c(brks, max((rng[2] + 1e-6),
brks[length(brks)] + mean(diff(brks))))
brks
}
##' Helper functions to check whether raw files contain spectra or
##' chromatograms.
##'
##' @title Checks if raw data files have any spectra or chromatograms
##' @param files A `character()` with raw data filenames.
##' @return A `logical(n)` where `n == length(x)` with `TRUE` if that
##' files contains at least one spectrum, `FALSE` otherwise.
##' @author Laurent Gatto
##' @rdname hasSpectraOrChromatograms
##' @md
##' @examples
##' f <- msdata::proteomics(full.names = TRUE)[1:2]
##' hasSpectra(f)
##' hasChromatograms(f)
hasSpectra <- function(files) {
sapply(files, mzR:::.hasSpectra)
}
##' @rdname hasSpectraOrChromatograms
hasChromatograms <- function(files) {
sapply(files, mzR:::.hasChromatograms)
}
#' @title Get the index of the particular element for each level
#'
#' `levelIndex` returns the index of the first, middle or last element for
#' each level of a factor within the factor.
#'
#' @param x `factor` or `vector` that can be converted into a `factor`
#'
#' @param which `character` defining for which element the index should be
#' returned, can be either `"first"`, `"middle"` or `"last"`.
#'
#' @return `integer` same length than `levels(x)` with the index for each
#' level in `x`.
#'
#' @author Johannes Rainer
#'
#' @md
#'
#' @noRd
#'
#' @examples
#'
#' f <- factor(c("a", "a", "b", "a", "b", "c", "c", "b", "d", "d", "d"))
#' f
#'
#' levelIndex(f, which = "first")
#' levelIndex(f, which = "middle")
#' levelIndex(f, which = "last")
#'
#' f <- factor(c("a", "a", "b", "a", "b", "c", "c", "b", "d", "d", "d"),
#' levels = c("d", "a", "c", "b"))
#' levelIndex(f, which = "first")
#' levelIndex(f, which = "middle")
#' levelIndex(f, which = "last")
levelIndex <- function(x, which = c("first", "middle", "last")) {
x <- as.factor(x)
res <- switch(match.arg(which),
"first" = match(levels(x), x),
"last" = length(x) - match(levels(x), rev(x)) + 1L,
"middle" = vapply(levels(x), function(z) {
idx <- which(x == z)
idx[ceiling(length(idx) / 2L)]
}, integer(1), USE.NAMES = FALSE))
names(res) <- levels(x)
res
}
| /R/utils.R | no_license | stanstrup/MSnbase | R | false | false | 48,337 | r | ##' Returns a \code{data.frame} of amino acid properties: \code{AA},
##' \code{ResidueMass}, \code{Abbrev3}, \code{ImmoniumIonMass},
##' \code{Name}, \code{Hydrophobicity}, \code{Hydrophilicity},
##' \code{SideChainMass}, \code{pK1}, \code{pK2} and \code{pI}.
##'
##' @title Amino acids
##' @return A \code{data.frame}
##' @author Laurent Gatto
##' @examples
##' get.amino.acids()
get.amino.acids <- function()
.get.amino.acids()
.get.amino.acids <- function() {
get("amino.acids",envir=.MSnbaseEnv)
}
##' Returns a \code{double} of used atomic mass.
##'
##' @title Atomic mass.
##' @return A named \code{double}.
##' @author Sebastian Gibb
##' @examples
##' get.atomic.mass()
get.atomic.mass <- function()
.get.atomic.mass()
.get.atomic.mass <- function() {
get("atomic.mass",envir=.MSnbaseEnv)
}
formatRt <- function(rt) {
ans <- NA
if (is.numeric(rt)) {
min <- floor(rt/60)
sec <- round(rt-(min*60))
ans <- sprintf("%d:%02d", min, sec)
} else if (is.character(rt)) {
ans <- strsplit(rt, ":")
ans <- sapply(ans, function(x) {
x <- as.numeric(x)
60 * x[1] + x[2]
})
} else {
warning("Input must be numeric of character.")
}
return(ans)
}
#' @param fileIds numeric, could be a vector
#' @param spectrumIds numeric, could be a vector
#' @param nFiles numeric, max number of files
#' @param nSpectra numeric, max number of spectra
#' @return character, in the format F001.S0001
#' @noRd
formatFileSpectrumNames <- function(fileIds, spectrumIds,
nFiles=length(fileIds),
nSpectra=length(spectrumIds)) {
digits <- ceiling(log10(c(nFiles, nSpectra) + 1L))
if (length(fileIds) != 1L && length(spectrumIds) != length(fileIds)) {
stop("Length of 'fileIds' has to be one or equal to ",
"the length of 'spectrumIds'.")
}
sprintf(paste0("F%0", digits[1L], "d.S%0", digits[2L], "d"),
fileIds, spectrumIds)
}
utils.removePeaks_centroided <- function(int, t) {
int[int <= t] <- 0L
int
}
utils.removePeaks <- function(int, t) {
peakRanges <- as(int > 0L, "IRanges")
toLow <- max(extractList(int, peakRanges)) <= t
replaceROWS(int, peakRanges[toLow], 0L)
}
## For internal use - use utils.removePrecMz_Spectrum that will set
## the paramters based on data accessed directly in the spectrum
## object.
utils.removePrecMz <- function(mz, int, precMz, tolerance = 25e-6) {
if (!is.numeric(precMz) || length(precMz) != 1L) {
stop("precMz must be numeric of length 1.")
}
i <- relaxedMatch(precMz, mz, tolerance = tolerance)
if (!is.na(i)) {
peakRanges <- as(int > 0L, "IRanges")
i <- findOverlaps(IRanges(i, width = 1L), peakRanges,
type = "within", select = "first")
if (!is.na(i)) {
int <- replaceROWS(int, peakRanges[i], 0L)
}
}
int
}
utils.removePrecMz_Spectrum <- function(spectrum,
precMz = NULL,
tolerance = 25e-6) {
if (is.null(precMz))
precMz <- precursorMz(spectrum)
if (!is.numeric(precMz))
stop("precMz must either 'NULL' or numeric.")
spectrum@intensity <- utils.removePrecMz(mz(spectrum),
intensity(spectrum),
precMz = precMz,
tolerance = tolerance)
spectrum
}
utils.removePrecMz_list <- function(object, precMz, tolerance = 25e-6) {
idx <- which(object$mz > precMz[1] & object$mz < precMz[2])
object$int <- utils.removePrecMz(object$mz,
object$int,
precMz = precMz,
tolerance = tolerance)
object
}
#' Removes zeros from input except the ones that in the direct neighbourhood of
#' non-zero values.
#'
#' @param x \code{numeric}, vector to be cleaned
#' @param all \code{logical}, should all zeros be removed?
#' @param na.rm \code{logical}, should NAs removed before looking for zeros?
#' @return logical vector, \code{TRUE} for keeping the value
#' @note The return value for \code{NA} is always \code{FALSE}.
#' @examples
#' x <- c(1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0)
#' # T, T, F, T, T, T, T, T, T, T, T, F, F
#' r <- c(TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
#' FALSE, FALSE)
#' stopifnot(utils.clean(x) == r)
#' @noRd
utils.clean <- function(x, all=FALSE, na.rm=FALSE) {
notNA <- !is.na(x)
notZero <- x != 0 & notNA
if (all) {
notZero
} else if (na.rm) {
notNA[notNA] <- utils.enableNeighbours(notZero[notNA])
notNA
} else {
utils.enableNeighbours(notZero)
}
}
#' Switch FALSE to TRUE in the direct neighborhod of TRUE.
#' (used in utils.clean)
#'
#' @param x logical
#' @return logical
#' @examples
#' x <- c(TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
#' FALSE, FALSE)
#' r <- c(TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
#' FALSE, FALSE)
#' stopifnot(utils.enableNeighbours(x) == r)
#' @noRd
utils.enableNeighbours <- function(x) {
stopifnot(is.logical(x))
x | c(x[-1], FALSE) | c(FALSE, x[-length(x)])
}
zoom <- function(x, w = 0.05) {
new("ReporterIons",
mz = x,
width = w,
name = "xlim",
reporterNames = paste("xlim", x, sep = "."),
pcol = rep("grey", length(x)))
}
makeImpuritiesMatrix <- function(x, filename, edit = TRUE) {
if (!missing(filename)) {
m <- read.csv(filename, row.names = 1)
x <- ncol(m)
if (ncol(m) != nrow(m))
stop(paste0("Problem reading impurity matrix. Not square.\n",
"Please read '?purityCorrect' for details."))
ncharge <- x/2
a <- (x/2)
b <- (x/2) + 1
res <- matrix(0, x, x)
diag(res) <- 100 - rowSums(m)
for (k in 1:ncharge) {
diag(res[(1+k):x, 1:(x-k)]) <- m[(1+k):x, (a-k+1)]
diag(res[1:(x-k), (1+k):x]) <- m[1:(x-k), (b+k-1)]
}
## test <- matrix(0, 6, 6)
## diag(test) <- 100 - rowSums(m)
## diag(test[4:6, 1:3]) <- m[4:6, 1] ## col1: -3
## diag(test[3:6, 1:4]) <- m[3:6, 2] ## col2: -2
## diag(test[2:6, 1:5]) <- m[2:6, 3] ## col3: -1
## diag(test[1:5, 2:6]) <- m[1:5, 4] ## col4: +1
## diag(test[1:4, 3:6]) <- m[1:4, 5] ## col5: +2
## diag(test[1:3, 4:6]) <- m[1:3, 6] ## col6: +3
## test <- test/100
M <- res/100
rownames(M) <- colnames(M) <- rownames(m)
} else {
if (x==4) {
M <- matrix(c(0.929,0.059,0.002,0.000,
0.020,0.923,0.056,0.001,
0.000,0.030,0.924,0.045,
0.000,0.001,0.040,0.923),
nrow=4, byrow = TRUE)
rownames(M) <- colnames(M) <-
reporterNames(iTRAQ4)
} else if (x == 6) {
M <- matrix(c(0.939, 0.061, 0.000, 0.000, 0.000, 0.000,
0.005, 0.928, 0.067, 0.000, 0.000, 0.000,
0.000, 0.011, 0.947, 0.042, 0.000, 0.000,
0.000, 0.000, 0.017, 0.942, 0.041, 0.000,
0.000, 0.000, 0.000, 0.016, 0.963, 0.021,
0.000, 0.000, 0.000, 0.002, 0.032, 0.938),
nrow = 6, byrow = TRUE)
rownames(M) <- colnames(M) <-
reporterNames(TMT6)
} else if (x == 8) {
f <- dir(system.file("extdata", package = "MSnbase"),
pattern = "iTRAQ8plexPurityCorrection",
full.names = TRUE)
M <- makeImpuritiesMatrix(filename = f, edit = FALSE)
rownames(M) <- colnames(M) <- c(113:119, 121)
} else if (x == 10) {
## see TMT10.R
M <- structure(c(0.95, 0, 0.003, 0, 0, 0, 0, 0, 0, 0, 0, 0.94, 0,
0.004, 0, 0, 0, 0, 0, 0, 0.05, 0, 0.949, 0, 0.006,
0, 0, 0, 0, 0, 0, 0.058, 0, 0.955, 0, 0.008, 0,
0.001, 0, 0, 0, 0, 0.048, 0, 0.964, 0, 0.014, 0, 0,
0, 0, 0, 0, 0.041, 0, 0.957, 0, 0.015, 0, 0.002, 0,
0, 0, 0, 0.03, 0, 0.962, 0, 0.017, 0, 0, 0, 0, 0,
0, 0.035, 0, 0.928, 0, 0.02, 0, 0, 0, 0, 0, 0,
0.024, 0, 0.965, 0, 0, 0, 0, 0, 0, 0, 0, 0.024, 0,
0.956),
.Dim = c(10L, 10L),
.Dimnames = list(
c("126", "127N", "127C", "128N", "128C", "129N",
"129C", "130N", "130C", "131"),
c("126", "127N", "127C", "128N", "128C", "129N",
"129C", "130N", "130C", "131")))
} else {
M <- diag(x)
}
}
rownames(M) <- paste("% reporter", rownames(M))
if (edit) M <- edit(M)
return(M)
}
utils.getMzDelta <- function(spectrum, percentage) {
## Computes the m/z differences between all the
## 'percentage' top intensity peaks in a spectrum
## Contributed by Guangchuang Yu for the plotMzDelta QC
mz <- mz(spectrum)
i <- intensity(spectrum)
idx <- order(i, decreasing=TRUE)
tops <- idx[1:floor(length(idx) * percentage)] ## top 'percentage' of peaks
mz.filtered <- mz[tops]
delta <- vector("list", length = length(mz.filtered))
i <- 1
while (length(mz.filtered) > 1) {
m <- mz.filtered[1]
mz.filtered <- mz.filtered[-1]
delta[[i]] <- abs(mz.filtered-m)
i <- i+1
}
return(unlist(delta))
}
utils.getMzDelta_list <- function (object, percentage) {
idx <- order(object$int, decreasing = TRUE)
tops <- idx[1:floor(length(idx) * percentage)]
mz.filtered <- object$mz[tops]
delta <- vector("list", length = length(mz.filtered))
i <- 1
while (length(mz.filtered) > 1) {
m <- mz.filtered[1]
mz.filtered <- mz.filtered[-1]
delta[[i]] <- abs(mz.filtered - m)
i <- i+1
}
return(unlist(delta))
}
fillUp <- function(x) {
if (!any(is.na(x)) & !any(x != ""))
return(x)
for (i in 2:length(x)) {
if (is.na(x[i]) | (x[i] == ""))
x[i] <- x[i - 1]
}
return(x)
}
##' Return the name of variable \code{varname} in call \code{match_call}.
##'
##' @title Return a variable name
##' @param match_call An object of class \code{call}, as returned by \code{match.call}.
##' @param varname An \code{character} of length 1 which is looked up in \code{match_call}.
##' @return A \code{character} with the name of the variable passed as parameter
##' \code{varname} in parent close of \code{match_call}.
##' @examples
##' a <- 1
##' f <- function(x, y)
##' MSnbase:::getVariableName(match.call(), "x")
##' f(x = a)
##' f(y = a)
##' @author Laurent Gatto
getVariableName <- function(match_call, varname) {
match_call <- as.list(match_call)
varname <- varname[1]
mcx <- match_call[[varname]]
while (any(sapply(mcx, length) != 1))
mcx <- unlist(lapply(mcx, as.list))
tail(as.character(mcx), n = 1)
}
#' rowwise max, similar to rowwise mean via rowMeans
#'
#' @param x matrix
#' @param na.rm logical
#' @return double vector with maximum values per row
#' @seealso Biobase::rowMax (could not handle missing values/NA)
#' @noRd
.rowMaxs <- function(x, na.rm=FALSE) {
stopifnot(is.matrix(x))
if (na.rm) {
x[is.na(x)] <- -Inf
}
nr <- nrow(x)
x[(max.col(x, ties.method="first") - 1L) * nr + 1L:nr]
}
#' summarise rows by an user-given function
#'
#' @param x matrix
#' @param fun function to summarise rows, if \code{fun} equals
#' \code{sum}/\code{mean} the more efficient \code{rowSums}/\code{rowMeans} are
#' used.
#' @param ... further arguments passed to \code{fun}
#' @return double, summarised rows
#' @noRd
.summariseRows <- function(x, fun, ...) {
stopifnot(is.matrix(x))
stopifnot(is.function(fun))
if (identical(fun, sum)) {
rowSums(x, ...)
} else if (identical(fun, mean)) {
rowMeans(x, ...)
} else {
apply(x, 1L, fun, ...)
}
}
#' find top n indices of each group
#'
#' @param x matrix
#' @param groupBy factor/character of length \code{nrow(x)}
#' @param n consider just the top \code{n} values
#' @param fun function to summarise rows
#' @param ... further arguments passed to \code{fun}
#' @return double, indices sorted by summarising function \code{fun}
#' @noRd
.topIdx <- function(x, groupBy, n, fun, ...) {
if (n < 1) {
stop(sQuote("n"), " has to be greater or equal than 1.")
}
if (nrow(x) != length(groupBy)) {
stop(sQuote("nrow(x)"), " and ", sQuote("length(groupBy)"),
" have to be equal.")
}
rs <- .summariseRows(x, fun, ...)
o <- order(as.double(rs), decreasing=TRUE, na.last=TRUE)
idx <- unlist(lapply(split(o, groupBy[o]), "[", 1:n), use.names=FALSE)
idx[!is.na(idx)]
}
## Computes header from assay data by-passing cache
.header <- function(object) {
if (length(object) == 0)
return(data.frame())
if (all(msLevel(object) == 1)) {
ln <- length(object)
nas <- rep(NA, ln)
hd <- list(fileIdx = fromFile(object),
retention.time = rtime(object),
precursor.mz = nas,
precursor.intensity = nas,
charge = nas,
peaks.count = peaksCount(object),
tic = tic(object),
ionCount = ionCount(object),
ms.level = msLevel(object),
acquisition.number = acquisitionNum(object),
collision.energy = nas)
} else {
## tbl <- table(fromFile(object))
## idx <- as.numeric(unlist(apply(tbl, 1, function(x) 1:x)))
hd <- list(fileIdx = fromFile(object),
retention.time = rtime(object),
precursor.mz = precursorMz(object),
precursor.intensity = precursorIntensity(object),
charge = precursorCharge(object),
peaks.count = peaksCount(object),
tic = tic(object),
ionCount = ionCount(object),
ms.level = msLevel(object),
acquisition.number = acquisitionNum(object),
collision.energy = collisionEnergy(object))
}
## items are either a numeric or a list of integer() - keep former only
sel <- sapply(hd, function(i) !is.list(i))
hd <- as.data.frame(hd[sel])
return(hd)
}
checkHeader <- function(object) {
if (object@.cache$level == 0) {
ans <- TRUE
} else {
cachedhd <- header(object)
fromdata <- .header(object)
ans <- identical(cachedhd, fromdata)
}
return(ans)
}
updateSpectrum2 <- function(x) {
## Version 0.2.0 of Spectrum has now a tic slot (MSnbase 1.5.3)
newx <- new("Spectrum2")
newx@merged <- x@merged
newx@precScanNum <- x@precScanNum
newx@precursorMz <- x@precursorMz
newx@precursorIntensity <- x@precursorIntensity
newx@precursorCharge <- x@precursorCharge
newx@collisionEnergy <- x@collisionEnergy
newx@msLevel <- x@msLevel
newx@peaksCount <- x@peaksCount
newx@rt <- x@rt
newx@acquisitionNum <- x@acquisitionNum
newx@scanIndex <- x@scanIndex
newx@mz <- x@mz
newx@intensity <- x@intensity
newx@fromFile <- x@fromFile
newx@centroided <- x@centroided
newx@tic <- 0
if (validObject(newx))
return(newx)
}
updateMSnExp <- function(x) {
for (fn in featureNames(x)) {
.x <- x[[fn]]
assign(fn, updateSpectrum2(.x), envir = assayData(x))
}
if (validObject(x))
return(x)
}
cramer4 <- function(object, imp) {
## see Shadford et al. 2005, BMC Genomics
if (missing(imp)) {
impM <- matrix(c(0.0, 1.0, 5.9, 0.2,
0.0, 2.0, 5.6, 0.1,
0.0, 3.0, 4.5, 0.1,
0.1, 4.0, 3.5, 0.1),
nrow = 4, byrow = TRUE)
colnames(impM) <- c("-2", "-1", "+1", "+2")
rownames(impM) <- 114:117
imp <- as.numeric(impM)
names(imp) <- letters[1:length(imp)]
}
w <- (100 - (imp["a"] + imp["e"] + imp["i"] + imp["m"]))
x <- (100 - (imp["b"] + imp["f"] + imp["j"] + imp["n"]))
y <- (100 - (imp["c"] + imp["g"] + imp["k"] + imp["o"]))
z <- (100 - (imp["d"] + imp["h"] + imp["l"] + imp["p"]))
C <- matrix(c(w, imp["f"], imp["c"], 0,
imp["i"], x, imp["g"], imp["d"],
imp["m"], imp["j"], y, imp["h"],
0, imp["n"], imp["k"], z),
ncol = 4, byrow = TRUE)
if (det(C) == 0) {
warning("Determinant of C is 0, correction impossible")
object@processingData@processing <-
c(object@processingData@processing,
"No impurity correction possible, det(C) is 0")
} else {
e <- exprs(object)
res <- apply(e, 1, function(.e) {
d1 <- matrix(c(.e,
imp["f"], x, imp["j"], imp["n"],
imp["c"], imp["g"], y, imp["k"],
0, imp["d"], imp["h"], z),
ncol = 4, byrow = TRUE)
d2 <- matrix(c(w, imp["i"], imp["m"], 0,
.e,
imp["c"], imp["g"], y, imp["k"],
0, imp["d"], imp["h"], z),
ncol = 4, byrow = TRUE)
d3 <- matrix(c(w, imp["i"], imp["m"], 0,
imp["f"], x, imp["j"], imp["n"],
.e,
0, imp["d"], imp["h"], z),
ncol = 4, byrow = TRUE)
d4 <- matrix(c(w, imp["i"], imp["m"], 0,
imp["f"], x, imp["j"], imp["n"],
imp["c"], imp["j"], y, imp["k"],
.e),
ncol = 4, byrow = TRUE)
res <- c(det(d1)/det(C),
det(d2)/det(C),
det(d3)/det(C),
det(d4)/det(C))
return(res)
})
res <- t(res)
rownames(res) <- featureNames(object)
colnames(res) <- sampleNames(object)
object@processingData@processing <-
c(object@processingData@processing,
"Impurity correction using Cramer's rule.")
exprs(object) <- res
}
if (validObject(object))
return(object)
}
## cramer6 <- function(x, imp) {
## if (missing(imp)) {
## imp <- c(0, 0, 0, 6.1, 0, 0,
## 0, 0, 0.5, 6.7, 0, 0,
## 0, 0, 1.1, 4.2, 0, 0,
## 0, 0, 1.7, 4.1, 0, 0,
## 0, 0, 1.6, 2.1, 0, 0,
## 0, 0.2, 3.2, 2.8, 0, 0)
## names(imp) <- letters[1:length(imp)]
## impM <- matrix(imp, nrow = 6, byrow = TRUE)
## colnames(impM) <- c("-3", "-2", "-1", "+1", "+2", "+3")
## rownames(impM) <- 126:131
## imp <- as.numeric(imp)
## }
## return(FALSE)
## }
getColsFromPattern <- function(x, pattern) {
if (missing(pattern)) {
stop(sQuote("pattern"), " must not be missing.")
}
if (!is.matrix(x)) {
stop(sQuote("x"), " must be a matrix.")
}
if (nchar(pattern) != ncol(x)) {
stop("The ", sQuote("pattern"), " must be equal to the number of columns.")
}
pattern <- strsplit(pattern, "")[[1L]]
if (!all(unique(pattern) %in% c("0", "1"))) {
stop(sQuote("pattern"), " must be composed of '0' or '1' defining columns",
" with or without 'NA's.")
}
pattern == "1"
}
getRowsFromPattern <- function(x, pattern) {
cols <- getColsFromPattern(x, pattern)
x <- x[, cols, drop=FALSE]
rowSums(is.na(x)) == 0
}
.filterNA <- function(x, pNA=0L, pattern) {
if (!is.matrix(x)) {
stop(sQuote("x"), " must be a matrix.")
}
if (!is.numeric(pNA)) {
stop(sQuote("pNA"), " must be numeric.")
}
if (length(pNA) > 1) {
stop(sQuote("pNA"), " must be of length one.")
}
if (missing(pattern)) { ## using pNA
if (pNA > 1) {
pNA <- 1
}
if (pNA < 0) {
pNA <- 0
}
k <- rowSums(is.na(x)) / ncol(x)
k <= pNA
} else { ## using pattern
getRowsFromPattern(x, pattern)
}
}
nologging <- function(object, n = 1) {
## removes the last n entries from
## object@processingData@processing
l <- length(object@processingData@processing)
x <- seq(l, length = n, by = -1)
object@processingData@processing <-
object@processingData@processing[-x]
stopifnot(length(object@processingData@processing) == (l - n))
if (validObject(object))
return(object)
}
logging <- function(object, msg, date. = TRUE) {
if (date.)
msg <- paste0(msg, " [", date(), "]")
object@processingData@processing <-
c(object@processingData@processing, msg)
if (validObject(object))
return(object)
}
##' Given a text spread sheet \code{f} and a \code{pattern} to
##' be matched to its header (first line in the file), the function
##' returns the matching columns names or indices of the
##' corresponding \code{data.frame}.
##'
##' The function starts by reading the first line of the file (or connection)
##' \code{f} with \code{\link{readLines}}, then splits it
##' according to the optional \code{...} arguments (it is important to
##' correctly specify \code{\link{strsplit}}'s \code{split} character vector here)
##' and then matches \code{pattern} to the individual column names using
##' \code{\link{grep}}.
##'
##' Similarly, \code{getEcols} can be used to explore the column names and
##' decide for the appropriate \code{pattern} value.
##'
##' These functions are useful to check the parameters to be provided to
##' \code{\link{readMSnSet2}}.
##'
##' @title Returns the matching column names of indices.
##' @param f A connection object or a \code{character} string to be
##' read in with \code{readLines(f, n = 1)}.
##' @param pattern A \code{character} string containing a regular
##' expression to be matched to the file's header.
##' @param ... Additional parameters passed to \code{\link{strsplit}}
##' to split the file header into individual column names.
##' @param n An \code{integer} specifying which line in file \code{f}
##' to grep (get). Default is 1. Note that this argument must be
##' named.
##' @return Depending on \code{value}, the matching column names of
##' indices. In case of \code{getEcols}, a \code{character} of
##' column names.
##' @seealso \code{\link{readMSnSet2}}
##' @author Laurent Gatto
grepEcols <- function(f, pattern, ..., n = 1)
grep(pattern, strsplit(readLines(f, n), ...)[n][[1]])
##' @rdname grepEcols
getEcols <- function(f, ..., n = 1)
strsplit(readLines(f, n)[n], ...)[[1]]
MSnExp.size <- function(x)
object.size(x) + sum(unlist(unname(eapply(assayData(x),
object.size))))
## convert vector of length n to a semicolon separated character
## vector of length 1
utils.vec2ssv <- function(vec, sep=";") {
paste0(vec, collapse=sep)
}
## converts an n by m data.frame into an 1 by m data.frame where the
## vector columns of length n are converted to a semicolon separated
## character vector of length 1
utils.vec2ssv.data.frame <- function(x, sep = ";", exclude) {
if (nrow(x) == 1L)
return(x)
nms <- names(x)
if (missing(exclude)) {
ans <- lapply(x, utils.vec2ssv)
} else {
if (is.numeric(exclude)) {
x0 <- x[, exclude, drop = FALSE]
x <- x[, -exclude, drop = FALSE]
} else if (is.character(exclude)) {
## convert to logical, making sure that the column names
## to be excluded are present
stopifnot(all(exclude %in% names(x)))
exclude <- names(x) %in% exclude
x0 <- x[, exclude, drop = FALSE]
x <- x[, !exclude, drop = FALSE]
} else if (is.logical(exclude)) {
x0 <- x[, exclude, drop = FALSE]
x <- x[, !exclude, drop = FALSE]
} else {
stop("Can only exclude numeric, characters or logicals.")
}
ans <- lapply(x, utils.vec2ssv)
x0 <- lapply(x0, head, n = 1L)
ans <- c(x0, ans)
ans <- ans[nms] ## preserve original order
}
data.frame(ans, stringsAsFactors = FALSE)
}
## convert a semicolon separated character vector of length 1 to a
## vector of length n
utils.ssv2vec <- function(ssv, sep=";", unlist=TRUE) {
vec <- strsplit(ssv, sep)
if (unlist) {
return(unlist(vec))
} else {
return(vec)
}
}
utils.list2ssv <- function(l, sep=";") {
unlist(lapply(l, utils.vec2ssv, sep=sep))
}
utils.ssv2list <- function(ssv, sep=";") {
utils.ssv2vec(ssv, unlist=FALSE, sep=sep)
}
## similar to merge(..., all.x=TRUE) but if called multiple times
## exisiting columns would not duplicated (with/without suffixes)
## but filled/overwritten using the values from y
## params: x, y, by, by.x, by.y see ?merge
## exclude: character, columns which should excluded
## order: logical, preserve order?
utils.leftJoin <- function(x, y, by, by.x=by, by.y=by,
exclude=character(), order=TRUE) {
## create character ids to allow ids covering several columns
rxc <- do.call(paste, c(x[, by.x, drop=FALSE], sep=";"))
ryc <- do.call(paste, c(y[, by.y, drop=FALSE], sep=";"))
## determine matching rows
ryid <- match(rxc, ryc, 0L)
rjid <- match(ryc, rxc, 0L)
ryid <- ryid[ryid > 0]
rjid <- rjid[rjid > 0]
## preserve order?
if (order) {
rjid <- sort(rjid)
}
cnx <- colnames(x)
cny <- colnames(y)
## exclude columns
keepx <- !cnx %in% exclude
keepy <- !cny %in% c(exclude, by.y)
cnx <- cnx[keepx]
cny <- cny[keepy]
x <- x[, keepx, drop=FALSE]
y <- y[, keepy, drop=FALSE]
## start joining
joined <- x[, cnx]
## only import equal columns from y
cjid <- match(cny, cnx, 0L)
cyid <- match(cnx, cny, 0L)
cjid <- cjid[cjid > 0]
cyid <- cyid[cyid > 0]
joined[rjid, cjid] <- y[ryid, cyid]
## add missing columns from y
cym <- setdiff(cny, cnx)
if (length(cym)) {
joined[, cym] <- NA
joined[rjid, cym] <- y[ryid, cym]
}
return(joined)
}
## @param featureData fData(msexp)/fData(msset)
## @param id output of mzID::flatten(mzID(filename))
## @param fcol column name of fData data.frame used for merging
## @param icol column name of idData data.frame used for merging
## @noRd
utils.mergeSpectraAndIdentificationData <- function(featureData, id,
fcol, icol, acc,
desc, pepseq,
rank = "rank") {
## mzR::acquisitionNum (stored in fData()[, "acquisition.number"] and
## mzID::acquisitionnum should be identical
if (!all(fcol %in% colnames(featureData))) {
stop("The column(s) ", sQuote(fcol),
" are not all in the feature data.frame!")
}
if (!all(icol %in% colnames(id))) {
stop("The column(s) ", sQuote(icol),
" are not all in the identification data.frame!")
}
if (sum(fcol %in% colnames(featureData)) != sum(icol %in% colnames(id))) {
stop("The number of selected column(s) in the feature and identification ",
"data don't match!")
}
## sort id data to ensure the best matching peptide is on top in case of
## multiple matching peptides
o <- do.call("order", lapply(c(icol, rank), function(j) id[, j]))
id <- id[o, ]
## use flat version of accession/description if multiple ones are available
id[, acc] <- ave(as.character(id[, acc]), id[, icol], FUN = utils.vec2ssv)
id[, desc] <- ave(as.character(id[, desc]), id[, icol], FUN = utils.vec2ssv)
## remove duplicated entries
id <- id[!duplicated(id[, icol]), ]
featureData <- utils.leftJoin(
x = featureData, y = id, by.x = fcol, by.y = icol,
exclude = c("spectrumid", # vendor specific nativeIDs
"spectrumID",
"spectrumFile") # is stored in fileId + MSnExp@files
)
## number of members in the protein group
featureData$nprot <- sapply(utils.ssv2list(featureData[, acc]),
function(x) {
n <- length(x)
if (n == 1 && is.na(x)) return(NA)
n
})
## number of peptides observed for each protein
featureData$npep.prot <- as.integer(ave(featureData[, acc],
featureData[, pepseq],
FUN = length))
## number of PSMs observed for each protein
featureData$npsm.prot <- as.integer(ave(featureData[, acc],
featureData[, acc],
FUN = length))
## number of PSMs observed for each protein
featureData$npsm.pep <- as.integer(ave(featureData[, pepseq],
featureData[, pepseq],
FUN = length))
return(featureData)
}
utils.removeNoId <- function(object, fcol, keep) {
if (!fcol %in% fvarLabels(object))
stop(fcol, " not in fvarLabels(",
getVariableName(match.call(), 'object'), ").")
if (is.null(keep)) noid <- is.na(fData(object)[, fcol])
else {
if (!is.logical(keep))
stop("'keep must be a logical.'")
if (length(keep) != nrow(fData(object)))
stop("The length of 'keep' does not match the number of spectra.")
noid <- !keep
}
object <- object[!noid, ]
object <- nologging(object, 1)
object <- logging(object, paste0("Filtered ", sum(noid),
" unidentified peptides out"))
if (validObject(object))
return(object)
}
utils.removeMultipleAssignment <- function(object, nprot = "nprot") {
keep <- which(fData(object)[, nprot] == 1)
object <- object[keep, ]
object <- nologging(object, 1)
object <- logging(object,
paste0("Removed ", sum(!keep),
" features assigned to multiple proteins"))
if (validObject(object))
return(object)
}
utils.idSummary <- function(fd) {
if (any(!c("spectrumFile", "idFile") %in% colnames(fd))) {
stop("No quantification/identification data found! Did you run ",
sQuote("addIdentificationData"), "?")
}
idSummary <- fd[!duplicated(fd$spectrumFile), c("spectrumFile", "idFile")]
idSummary$coverage <- sapply(idSummary$spectrumFile, function(f) {
round(mean(!is.na(fd$idFile[fd$spectrumFile == f])), 3)
})
rownames(idSummary) <- NULL
colnames(idSummary) <- c("spectrumFile", "idFile", "coverage")
return(idSummary)
}
utils.removeNoIdAndMultipleAssignments <-
function(object, pepseq = "sequence", nprot = "nprot") {
if (anyNA(fData(object)[, pepseq]))
object <- removeNoId(object, pepseq)
if (any(fData(object)[, nprot] > 1))
object <- removeMultipleAssignment(object, nprot)
return(object)
}
##' Compares equality of all members of a list.
##'
##' @title Tests equality of list elements class
##' @param x A code{list}.
##' @param class A \code{character} defining the expected class.
##' @param valid A \code{logical} defining if all elements should be
##' tested for validity. Default is \code{TRUE}.
##' @return \code{TRUE} is all elements of \code{x} inherit from
##' \code{class}.
##' @author Laurent Gatto
##' @examples
##' listOf(list(), "foo")
##' listOf(list("a", "b"), "character")
##' listOf(list("a", 1), "character")
listOf <- function(x, class, valid = TRUE) {
cla <- all(sapply(x, inherits, class))
if (valid) val <- all(sapply(x, validObject))
else val <- TRUE
cla & val
}
##' Calculates a non-parametric version of the coefficient of
##' variation where the standard deviation is replaced by the median
##' absolute deviations (see \code{\link{mad}} for details) and
##' divided by the absolute value of the mean.
##'
##' Note that the \code{mad} of a single value is 0 (as opposed to
##' \code{NA} for the standard deviation, see example below).
##'
##'
##' @title Non-parametric coefficient of variation
##' @param x A \code{numeric}.
##' @param na.rm A \code{logical} (default is \code{TRUE} indicating
##' whether \code{NA} values should be stripped before the computation
##' of the median absolute deviation and mean.
##' @return A \code{numeric}.
##' @author Laurent Gatto
##' @examples
##' set.seed(1)
##' npcv(rnorm(10))
##' replicate(10, npcv(rnorm(10)))
##' npcv(1)
##' mad(1)
##' sd(1)
npcv <- function(x, na.rm = TRUE) {
mdx <- mad(x, na.rm = na.rm)
mdx/abs(mean(x, na.rm = na.rm))
}
##' Compares two \code{\linkS4class{MSnSet}} instances. The
##' \code{qual} and \code{processingData} slots are generally omitted.
##'
##' @title Compare two MSnSets
##' @param x First MSnSet
##' @param y Second MSnSet
##' @param qual Should the \code{qual} slots be compared? Default is
##' \code{FALSE}.
##' @param proc Should the \code{processingData} slots be compared?
##' Default is \code{FALSE}.
##' @return A \code{logical}
##' @author Laurent Gatto
compareMSnSets <- function(x, y, qual = FALSE, proc = FALSE) {
if (!proc) ## do not compare @processingData
x@processingData <-
y@processingData <- new("MSnProcess")
if (!qual) ## do not compare @qual
x@qual <- y@qual
all.equal(x, y)
}
##' Similar to rowsum but calculates the mean. It is slower than colMeans but
##' supports grouping variables. See ?rowsum for details.
##' @param x matrix
##' @param group a vector/factor of grouping
##' @param reorder if TRUE the rows are ordered by `sort(unique(group))`
##' @param na.rm logical. Should missing values (including `NaN`) be omitted
##' @return matrix
##' @author Sebastian Gibb <mail@@sebastiangibb.de>
##' @noRd
rowmean <- function(x, group, reorder=FALSE, na.rm=FALSE) {
if (na.rm) {
nna <- !is.na(x)
mode(nna) <- "numeric"
} else {
nna <- x
nna[] <- 1
}
nna <- rowsum(nna, group=group, reorder=reorder, na.rm=na.rm)
rs <- rowsum(x, group=group, reorder=reorder, na.rm=na.rm)
rs/nna
}
##' Similar to rowsum but calculates the sd.
##' See ?rowsum for details.
##' @param x matrix
##' @param group a vector/factor of grouping
##' @param reorder if TRUE the rows are ordered by `sort(unique(group))`
##' @param na.rm logical. Should missing values (including `NaN`) be omitted
##' @return matrix
##' @author Sebastian Gibb <mail@@sebastiangibb.de>
##' @noRd
rowsd <- function(x, group, reorder=FALSE, na.rm=FALSE) {
if (na.rm) {
nna <- !is.na(x)
mode(nna) <- "numeric"
} else {
nna <- x
nna[] <- 1
}
nna <- rowsum(nna, group=group, reorder=reorder, na.rm=na.rm)
nna[nna == 1] <- NA_real_ # return NA if n == 1 (similar to sd)
var <- rowmean(x*x, group=group, reorder=reorder, na.rm=na.rm) -
rowmean(x, group=group, reorder=reorder, na.rm=na.rm)^2L
sqrt(var * nna/(nna - 1L))
}
setMethod("trimws", "data.frame",
function(x, which, ...) {
for (i in 1:ncol(x)) {
if (inherits(x[, i], "character"))
x[, i] <- base::trimws(x[, i], which)
}
x
})
setMethod("trimws", "MSnSet",
function(x, which, ...) {
fData(x) <- trimws(fData(x), which, ...)
x <- logging(x, "Trimmed featureData white spaces")
x
})
setMethod("isEmpty", "environment",
function(x) length(ls(x)) == 0)
## Simple helper to help differentiate between on disk and in
## memory objects.
isOnDisk <- function(object)
any(grepl("spectraProcessingQueue", slotNames(object)))
## Simple function to determine whether parallel or serial processing should be
## performed.
## Check testthat/test_OnDiskMSnExp_benchmarks.R for performance comparisons.
## Parameter object is expected to beb a
getBpParam <- function(object, BPPARAM=bpparam()) {
parallel_thresh <- options()$MSnbase$PARALLEL_THRESH
if (is.null(parallel_thresh) )
parallel_thresh <- 1000
## If it's empty, return SerialParam
if (length(object) == 0)
return(SerialParam())
if (length(object) < parallel_thresh)
return(SerialParam())
return(BPPARAM)
}
countAndPrint <- function(x) {
if (length(x) == 0)
return("")
tb <- table(x)
paste(paste0(names(tb), " (", tb, ")"), collapse = ", ")
}
## see issue #131
.isCentroided <- function(pk, k = 0.025, qtl = 0.9) {
.qtl <- quantile(pk[, 2], qtl)
x <- pk[pk[, 2] > .qtl, 1]
quantile(diff(x), 0.25) > k
}
##' @title Reads profile/centroided mode from an mzML file
##' @param x An instance of \code{MSnExp} or \code{OnDiskMSnExp}
##' @return A \code{logical}
##' @noRd
.isCentroidedFromFile <- function(f) {
if (!requireNamespace("XML"))
stop("Please install the XML package to use this functionality.")
xml <- XML::xmlParse(f)
x <- XML::xpathSApply(xml,
"//x:spectrum/x:cvParam[@accession='MS:1000127' or @accession='MS:1000128']/../@index |
//x:cvParam[@accession='MS:1000127' or @accession='MS:1000128']/@name",
namespaces = c(x = "http://psi.hupo.org/ms/mzml"))
index <- as.double(x[seq(1, length(x), by = 2)])
res <- rep(NA, length(index))
res[grepl("centroid", x[seq(2, length(x), by = 2)])] <- TRUE
res[grepl("profile", x[seq(2, length(x), by = 2)])] <- FALSE
res
}
## Returns the extension of the file. If that extension is on of the
## usual archive extensions, as defined in gexts, then the last part
## after the dot is removed and the extension is extracted again.
.fileExt <- function(f,
gexts = c("gz", "gzip", "bzip", "bzip2", "xz",
"zip")) {
ext <- tools::file_ext(f)
if (ext %in% gexts) {
f <- basename(f)
f <- sub("\\.[a-z]+$", "", f)
ext <- .fileExt(f)
}
ext
}
## see this comment
## https://github.com/lgatto/MSnbase/issues/183#issuecomment-273512931
## for some background about this function
.firstMsLevel <- function(object) {
if (inherits(object, "OnDiskMSnExp")) msLevel(object)[1]
else msLevel(object[[1]])
}
#' @title Open an MS file using the mzR package
#'
#' @description Opens an MS file using the mzR package determining the corrent
#' backend based on the file ending of the specified file.
#'
#' @param x \code{character(1)}: the file name.
#'
#' @return A file handle to the opened MS file.
#'
#' @author Johannes Rainer
#'
#' @noRd
.openMSfile <- function(x) {
if (missing(x) || length(x) != 1)
stop("parameter 'x' has to be of length 1")
mzR::openMSfile(x, backend = NULL)
}
##' This function produces the opposite as the \code{stringsAsFactors}
##' argument in the \code{data.frame} or \code{read.table} functions;
##' it converts \code{factors} columns to \code{characters}.
##'
##' @title Converts factors to strings
##' @param x A \code{data.frame}
##' @return A \code{data.frame} where \code{factors} are converted to
##' \code{characters}.
##' @author Laurent Gatto
##' @examples
##' data(iris)
##' str(iris)
##' str(factorsAsStrings(iris))
factorsAsStrings <- function(x) {
x <- lapply(x,
function(xx) {
if (is.factor(xx)) as.character(xx)
else xx
})
data.frame(x, stringsAsFactors = FALSE)
}
##' Convert a \code{vector} of characters to camel case by replacing
##' dots by captial letters.
##'
##' @title Convert to camel case by replacing dots by captial letters
##' @param x A \code{vector} to be transformed to camel case.
##' @param prefix An optional \code{character} of length one. Any
##' additional elements are ignores.
##' @return A \code{character} of same length as \code{x}.
##' @author Laurent Gatto
##' @examples
##' nms <- c("aa.foo", "ab.bar")
##' makeCamelCase(nms)
##' makeCamelCase(nms, prefix = "x")
makeCamelCase <- function(x, prefix) {
if (!missing(prefix))
x <- paste(prefix[1], x, sep = ".")
gsub('\\.(\\w?)', '\\U\\1', x, perl = TRUE)
}
##' Reduce a data.frame so that the (primary) key column contains only
##' unique entries and other columns pertaining to that entry are
##' combined into semicolon-separated values into a single
##' row/observation.
##'
##' An important side-effect of reducing a `data.frame` is that all
##' columns other than the key are converted to characters when they
##' are collapsed to a semi-column separated value (even if only one
##' value is present) as soon as one observation of transformed.
##'
##' @title Reduce a data.frame
##' @param x A \code{data.frame}.
##' @param key The column name (currenly only one is supported) to be
##' used as primary key.
##' @param sep The separator. Default is \code{;}.
##' @return A reduced \code{data.frame}.
##' @author Laurent Gatto
##' @examples
##' dfr <- data.frame(A = c(1, 1, 2),
##' B = c("x", "x", "z"),
##' C = LETTERS[1:3])
##' dfr
##' dfr2 <- reduce(dfr, key = "A")
##' dfr2
##' ## column A used as key is still num
##' str(dfr2)
##' dfr3 <- reduce(dfr, key = "B")
##' dfr3
##' ## A is converted to chr; B remains factor
##' str(dfr3)
##' dfr4 <- data.frame(A = 1:3,
##' B = LETTERS[1:3],
##' C = c(TRUE, FALSE, NA))
##' ## No effect of reducing, column classes are maintained
##' str(reduce(dfr4, key = "B"))
setMethod("reduce", "data.frame",
function(x, key, sep = ";") {
if (nrow(x) %in% c(0, 1))
return(x)
if (missing(key))
stop("Need a key column to reduce the data.frame")
if (length(key) != 1L)
stop("Key must be of length 1.")
if (!key %in% names(x))
stop("key not found in column names.")
ans <- by(x, x[, key], utils.vec2ssv.data.frame, exclude = key)
ans <- do.call(rbind, ans)
rownames(ans) <- NULL
ans
})
.reduce_list <- function(x) {
x <- x[lengths(x) > 0]
sel <- sapply(x, function(xx) any(xx != ""))
x[sel]
}
#' @param fd data.frame, feature data (columns required: acquisitionNum,
#' precursorScanNum)
#' @param an integer, acquisitionNum of spectrum of interest (parent and
#' children will be selected)
#' @noRd
.filterSpectraHierarchy <- function(fd, an) {
if (!is.data.frame(fd)) {
stop("'fd' is not a data.frame")
}
if (!all(c("acquisitionNum", "precursorScanNum") %in% colnames(fd))) {
stop("column(s) acquisitionNum/precursorScanNum is/are missing")
}
## we could use recursion which is slow in R
## or reformat the adjacency list into a nested tree
## list model but most ms data are limited to at most 3 levels and the
## filtering isn't done very often, so we use for loops here
parents <- logical(nrow(fd))
## find current scan
parents[fd$acquisitionNum %in% an] <- TRUE
children <- parents
## find parent scan
nLastParents <- 0L
nParents <- 1L
while (nLastParents < nParents) {
parents[fd$acquisitionNum %in% fd$precursorScanNum[parents]] <- TRUE
nLastParents <- nParents
nParents <- sum(parents)
}
## find children scans
nLastChildren <- 0L
nChildren <- 1L
while (nLastChildren < nChildren) {
children[fd$precursorScanNum %in% fd$acquisitionNum[children]] <- TRUE
nLastChildren <- nChildren
nChildren <- sum(children)
}
parents | children
}
windowIndices <- function(i, hws, n) {
stopifnot(i <= n)
max(1L, i - hws):min(n, i + hws)
}
#' The function aggregates `x` for `toBin` falling into bins defined
#' by `breaks` using the `fun` function.
#'
#' @details
#'
#' This is a combination of the code from the former bin_Spectrum.
#'
#' @param x `numeric` with the values that should be binned.
#'
#' @param toBin `numeric`, same length than `x`, with values to be used for the
#' binning.
#'
#' @param binSize `numeric(1)` with the size of the bins.
#'
#' @param breaks `numeric` defining the breaks/bins.
#'
#' @param fun `function` to be used to aggregate values of `x` falling into the
#' bins defined by `breaks`.
#'
#' @return `list` with elements `x` and `mids` being the aggregated values
#' of `x` for values in `toBin` falling within each bin and the bin mid
#' points.
#'
#' @author Johannes Rainer, Sebastian Gibb
#'
#' @noRd
.bin_values <- function(x, toBin, binSize = 1, breaks = seq(floor(min(toBin)),
ceiling(max(toBin)),
by = binSize),
fun = max) {
if (length(x) != length(toBin))
stop("lengths of 'x' and 'toBin' have to match.")
fun <- match.fun(fun)
breaks <- .fix_breaks(breaks, range(toBin))
nbrks <- length(breaks)
idx <- findInterval(toBin, breaks)
## Ensure that indices are within breaks.
idx[which(idx < 1L)] <- 1L
idx[which(idx >= nbrks)] <- nbrks - 1L
ints <- double(nbrks - 1L)
ints[unique(idx)] <- unlist(lapply(base::split(x, idx), fun),
use.names = FALSE)
list(x = ints, mids = (breaks[-nbrks] + breaks[-1L]) / 2L)
}
#' Simple function to ensure that breaks (for binning) are span al leat the
#' expected range.
#'
#' @param brks `numeric` with *breaks* such as calculated by `seq`.
#'
#' @param rng `numeric(2)` with the range of original numeric values on which
#' the breaks were calculated.
#'
#' @noRd
.fix_breaks <- function(brks, rng) {
## Assuming breaks being sorted.
if (brks[length(brks)] <= rng[2])
brks <- c(brks, max((rng[2] + 1e-6),
brks[length(brks)] + mean(diff(brks))))
brks
}
##' Helper functions to check whether raw files contain spectra or
##' chromatograms.
##'
##' @title Checks if raw data files have any spectra or chromatograms
##' @param files A `character()` with raw data filenames.
##' @return A `logical(n)` where `n == length(x)` with `TRUE` if that
##' files contains at least one spectrum, `FALSE` otherwise.
##' @author Laurent Gatto
##' @rdname hasSpectraOrChromatograms
##' @md
##' @examples
##' f <- msdata::proteomics(full.names = TRUE)[1:2]
##' hasSpectra(f)
##' hasChromatograms(f)
hasSpectra <- function(files) {
sapply(files, mzR:::.hasSpectra)
}
##' @rdname hasSpectraOrChromatograms
hasChromatograms <- function(files) {
sapply(files, mzR:::.hasChromatograms)
}
#' @title Get the index of the particular element for each level
#'
#' `levelIndex` returns the index of the first, middle or last element for
#' each level of a factor within the factor.
#'
#' @param x `factor` or `vector` that can be converted into a `factor`
#'
#' @param which `character` defining for which element the index should be
#' returned, can be either `"first"`, `"middle"` or `"last"`.
#'
#' @return `integer` same length than `levels(x)` with the index for each
#' level in `x`.
#'
#' @author Johannes Rainer
#'
#' @md
#'
#' @noRd
#'
#' @examples
#'
#' f <- factor(c("a", "a", "b", "a", "b", "c", "c", "b", "d", "d", "d"))
#' f
#'
#' levelIndex(f, which = "first")
#' levelIndex(f, which = "middle")
#' levelIndex(f, which = "last")
#'
#' f <- factor(c("a", "a", "b", "a", "b", "c", "c", "b", "d", "d", "d"),
#' levels = c("d", "a", "c", "b"))
#' levelIndex(f, which = "first")
#' levelIndex(f, which = "middle")
#' levelIndex(f, which = "last")
levelIndex <- function(x, which = c("first", "middle", "last")) {
x <- as.factor(x)
res <- switch(match.arg(which),
"first" = match(levels(x), x),
"last" = length(x) - match(levels(x), rev(x)) + 1L,
"middle" = vapply(levels(x), function(z) {
idx <- which(x == z)
idx[ceiling(length(idx) / 2L)]
}, integer(1), USE.NAMES = FALSE))
names(res) <- levels(x)
res
}
|
# clusteredRNApheno4.R
# R version 3.3.1 (2016-06-21)
# June 8, 2017. Mallory B. Lai.
# Reviewed by: TODO (Mallory B. Lai) : Find reviewer to proofread
# Creating combined pheno & RNA seq BN for clustered Brassica gene data
# using bnlearn package. Data taken from Brassica control
# and droughted conditions.
#-----------------------------------------------------------------------
library(bnlearn)
library(stringr)
library(dplyr)
library(tidyr)
library(data.table)
#-----------------------------------------------------------------------
#### Preprocessing:
# Set working directory.
# setwd("E:/Mallory Lai/PhenoRNAnetworkBrassica")
#setwd("/Users/mblai/Documents/GitHub/PhenoRNAnetworkBrassica")
######## RNA
# Read in RNAseq data.
RNA <- read.csv(file = "largeDE.csv", row.names = 1)
## Transpose data.
RNA <- t(RNA)
# Convert to data.frame.
RNA <- as.data.frame(RNA)
# Discretize the data.
discRNA <- discretize(RNA, method = "quantile", breaks = 3)
# Split rownames into Treatment/Timepoint and Replicate.
rnaNames <- str_split_fixed(rownames(RNA), '_', 2)
# Create a Timepoint column.
discRNA$Timepoint <- as.numeric(str_split_fixed(rnaNames[, 1], '', 2)[, 2])
# Create a treatment column named int.
discRNA$int <- as.factor(str_split_fixed(rnaNames[, 1], '', 2)[, 1])
# Remove rnaNames vector and RNA dataframe.
rm(rnaNames)
rm(RNA)
# Order RNA dataframe by Timepoint and int.
discRNA <- discRNA[with(discRNA, order(Timepoint, int)), ]
# Convert data intervals to -1, 0, and 1 representing low, medium, and high.
for (i in 1:(dim(discRNA)[2] - 2)){
levels(discRNA[, i]) <- c(-1, 0, 1)
discRNA[, i] <- as.factor(discRNA[, i])
}
# Transform RNA data frame.
discRNA <- t(discRNA)
# Rename column names to match timepoint and treatment.
colnames(discRNA) <- paste(discRNA[dim(discRNA)[1] - 1,],
discRNA[dim(discRNA)[1],], sep = "")
# Read in cluster classification.
cl <- read.csv(file = "mod80.csv", row.names = 1)
# Separate clusters to form gene modules.
c1 <- discRNA[c(cl[1:(length(cl[,1]) - sum(is.na(cl[,1]))), 1]), ]
# Add timepoint and treatment row.
c1 <- rbind(c1, TP = discRNA[dim(discRNA)[1] - 1,], Trmt = discRNA[dim(discRNA)[1],])
# Transpose.
c1 <- as.data.frame(t(c1))
# Gather all genes into one gene module, M1.
m1 <- gather(c1, Gene, M1, -TP, -Trmt)
# Count number of values per treatment and time point.
mCounts <- as.data.frame(table(m1[, c(1, 2, 4)]))
# Order mCounts dataframe by Timepoint, treatment, and module value.
mCounts <- mCounts[with(mCounts, order(TP, Trmt, M1)), ]
# Add column for proportion per timepoint, treatment, and module value.
mCounts$Prop <- mCounts$Freq/((length(cl[,1]) - sum(is.na(cl[,1])))*2)
# Create a column with the number of counts proportional to 12.
mCounts$twelve <- mCounts$Prop * 12
# Round the counts proportional to 12.
mCounts$round <- round(mCounts$twelve)
# Convert to data.table.
mCounts <- as.data.table(mCounts)
# Add a column that counts the total in round.
mCounts <- mCounts[, total := sum(round), by = list(TP, Trmt)]
# Convert to dataframe for easier updates.
mCounts <- as.data.frame(mCounts)
# If total count is 13, round to nearest 1/2 number and then round that.
mCounts[mCounts$total == 13, 'round'] <- round(round(mCounts[mCounts$total == 13, 'twelve']/0.5) *.5)
# Convert back to data table for easy updating.
mCounts <- as.data.table(mCounts)
# Update the total column with new rounding.
mCounts <- mCounts[, total := sum(round), by = list(TP, Trmt)]
# Add a column that identifies the max proportion per treatment and time point.
mCounts <- mCounts[, max := max(Prop), by = list(TP, Trmt)]
# Convert back to dataframe for easy subsetting.
mCounts <- as.data.frame(mCounts)
# Add 1 to the count with the max proportion.
mCounts[mCounts$total == 11 & mCounts$Prop == mCounts$max, 'round'] <-
mCounts[mCounts$total == 11
& mCounts$Prop == mCounts$max, 'round'] + 1
# Convert back to data table for easy updating.
mCounts <- as.data.table(mCounts)
# Update the total column with new rounding.
mCounts <- mCounts[, total := sum(round), by = list(TP, Trmt)]
# Extract the round column as a dataframe.
mod <- data.frame(TP = mCounts$TP, Trmt = mCounts$Trmt,
Value = mCounts$M1, M1 = mCounts$round)
# Remove unnecessary dataframes.
rm(m1)
rm(c1)
rm(mCounts)
############ TO DO: Update loop to perform a similar function as above.
# Loop through remaining clusters.
for (i in 2:(dim(cl)[2]))
{
# Separate clusters to form gene modules.
c1 <- discRNA[c(cl[1:(length(cl[,i]) - sum(is.na(cl[,i]))), i]), ]
# Add timepoint and treatment row.
c1 <- rbind(c1, TP = discRNA[dim(discRNA)[1] - 1,], Trmt = discRNA[dim(discRNA)[1],])
# Transpose.
c1 <- as.data.frame(t(c1))
# Gather all genes into one gene module
module <- gather(c1, Gene, M, -TP, -Trmt)
# Order module dataframe by Timepoint and int.
module <- module[with(module, order(TP, Trmt)), ]
# Count number of values per treatment and time point.
counts <- as.data.frame(table(module[, c(1, 2, 4)]))
# Order mCounts dataframe by Timepoint, treatment, and module value.
counts <- counts[with(counts, order(TP, Trmt, M)), ]
# Add column for proportion per timepoint, treatment, and module value.
counts$Prop <- counts$Freq/((length(cl[,i]) - sum(is.na(cl[,i])))*2)
# Create a column with the number of counts proportional to 12.
counts$twelve <- counts$Prop * 12
# Round the counts proportional to 12.
counts$round <- round(counts$twelve)
# Convert to data.table.
counts <- as.data.table(counts)
# Add a column that counts the total in round.
counts <- counts[, total := sum(round), by = list(TP, Trmt)]
# Convert to dataframe for easier updates.
counts <- as.data.frame(counts)
# If total count is 13, round to nearest 1/2 number and then round that.
counts[counts$total == 13, 'round'] <- round(round(counts[counts$total == 13, 'twelve']/0.5) *.5)
# Convert back to data table for easy updating.
counts <- as.data.table(counts)
# Update the total column with new rounding.
counts <- counts[, total := sum(round), by = list(TP, Trmt)]
# Add a column that identifies the max proportion per treatment and time point.
counts <- counts[, max := max(Prop), by = list(TP, Trmt)]
# Add a column that identifies the second highest proportion per treatment
# and time point.
counts <- counts[, max2 := as.numeric(Prop)][, max2 := sort(Prop, T)[2], by = list(TP, Trmt)]
# Add a column that identifies the min proportion per treatment and time point.
counts <- counts[, min := min(Prop), by = list(TP, Trmt)]
# Convert back to dataframe for easy subsetting.
counts <- as.data.frame(counts)
# Add 1 to the count with the max proportion.
counts[counts$total <= 11 & counts$Prop == counts$max, 'round'] <-
counts[counts$total <= 11
& counts$Prop == counts$max, 'round'] + 1
# Convert back to data table for easy updating.
counts <- as.data.table(counts)
# Update the total column with new rounding.
counts <- counts[, total := sum(round), by = list(TP, Trmt)]
# Convert back to dataframe for easy subsetting.
counts <- as.data.frame(counts)
# Add 1 to the count with the 2nd highest proportion if
# still less than 12.
counts[counts$total <= 11 & counts$Prop == counts$max2, 'round'] <-
counts[counts$total <= 11
& counts$Prop == counts$max2, 'round'] + 1
# Convert back to data table for easy updating.
counts <- as.data.table(counts)
# Update the total column with new rounding.
counts <- counts[, total := sum(round), by = list(TP, Trmt)]
# If there are any column totals of 13, subtract one from the
# value with the lowest proportion.
counts[counts$total == 13 & counts$Prop == counts$min, 'round'] <-
counts[counts$total == 13
& counts$Prop == counts$min, 'round'] - 1
# Convert back to data table for easy updating.
counts <- as.data.table(counts)
# Update the total column with new rounding.
counts <- counts[, total := sum(round), by = list(TP, Trmt)]
# Add 1 to the count with the highest proportion if
# still less than 12.
counts[counts$total <= 11 & counts$Prop == counts$max, 'round'] <-
counts[counts$total <= 11
& counts$Prop == counts$max, 'round'] + 1
# Bind round column to mod dataframe.
mod <- cbind(mod, counts$round)
}
# Remove unneccesary dataframes.
rm(c1)
rm(i)
rm(cl)
rm(discRNA)
rm(module)
# Rename modules in mod dataframe.
colnames(mod)[5:dim(mod)[2]] <- paste("M", 2:(dim(mod)[2]-3), sep = "")
# Expand modules to match the number of -1, 0, and 1's that should be
# in the RNA-Seq dataframe.
RNAmod <- as.data.frame(sapply(mod[, 4:dim(mod)[2]], function(x) rep(mod$Value, times = x)))
#### Pheno.
# Read in phenotype file.
Pheno <- read.csv(file = "PhenoBrassicaImp.csv", row.names = 1)
# Rename SM... to get rid of periods.
colnames(Pheno)[8] <- "SM"
# Add a column for Time of Day, named TOD.
Pheno$TOD <- rep(c(7, 11, 15, 19, 23, 3), each = 24, 2)
#### Discretize data.
# Discretize the phenotype data, excluding fluorescence.
phenoDisc <- discretize(Pheno[, c(3, 4, 6, 7, 8)],
method = "interval",
breaks = c(5, 5, 5, 5, 5))
# Use arules package to discretize fluorescence and detach due
# to the overlap in package functions with bnlearn.
library(arules)
fluor <- discretize(Pheno[, 5], method = "cluster")
detach("package:arules", unload=TRUE)
# Attach fluorescence data to phenoDisc dataframe.
phenoDisc$fluor <- fluor
# Add INT column to discretized data.
phenoDisc$INT <- as.factor(Pheno$Treatment)
# Add Timepoint column to discretized data.
phenoDisc$TP <- as.factor(Pheno$Timepoint)
# Order Pheno dataframe by Timepoint and int.
phenoDisc <- phenoDisc[with(phenoDisc, order(TP, INT)), ]
# Combine pheno and RNA data frames.
rnaPheno <- cbind(phenoDisc, RNAmod)
# Remove unneccesary dataframes.
rm(phenoDisc)
rm(Pheno)
# Remove extra columns.
rnaPheno$Trmt <- NULL
rnaPheno$INT <- NULL
rnaPheno$TP <- NULL
rnaPheno$TP <- NULL
# Create a whitelist using expert knowledge of
# physiological interactions.
wh <- data.frame(from = c("SM", "Photo", "Starch", "gs", "gs"),
to = c("gs", "fluor", "NSC", "Starch", "Photo"))
# Create a blacklist to soil moisture.
bl <- tiers2blacklist(list(colnames(rnaPheno)[5],
colnames(rnaPheno)[-5]))
# Learn network structure.
bn <- tabu(rnaPheno, score = "bde",
iss = 5, tabu = 50)
plot(bn) # Has a higher score than rsmax2
bn <- rsmax2(rnaPheno, restrict = "aracne",
blacklist = bl, whitelist = wh,
maximize = "tabu", score = "bde",
maximize.args = list(iss = 5))
plot(bn)
#write.csv(bn$arcs, "M58bn.csv")
bnParam <- bn.fit(bn, rnaPheno, method = "bayes")
boot <- boot.strength(rnaPheno, R = 500, algorithm = "rsmax2",
algorithm.args = list(restrict = "aracne",
blacklist = bl, whitelist = wh,
maximize = "tabu",
score = "bde",
maximize.args = list(iss = 5)))
boot[(boot$strength > 0.85) & (boot$direction >= 0.5), ]
avg.boot <- averaged.network(boot, threshold = 0.85)
plot(avg.boot)
nodes <- names(training)
start <- random.graph(nodes = nodes, method = "ic-dag", num = 100,
every = 3)
netlist <- suppressWarnings(lapply(start, function(net){
tabu(training, score = "bde", tabu = 50, iss = 10)
}))
rnd <- custom.strength(netlist, nodes = nodes)
modelAvg <- rnd[(rnd$strength > .85) & (rnd$direction >= .5), ]
avg.start <- averaged.network(rnd, threshold = .85)
plot(avg.start)
plot(bn)
# Write csv of network arcs.
#write.csv(bn$arcs, file = "clustBNarcs.csv")
plot(bn)
bn.mle <- bn.fit(bn, training, method = "bayes")
bn.fit.barchart(bn.mle$Photo, xlab = "P()")
bn.mle$Photo
bn.fit.barchart(bn.mle$M8, xlab = "P()")
bn.mle$M8
bn.fit.barchart(bn.mle$M3, xlab = "P()")
bn.mle$M3
bn.fit.barchart(bn.mle$Starch, xlab = "P()")
bn.mle$Starch
bn.fit.barchart(bn.mle$Starch, xlab = "P()")
bn.mle$M4
testOrder <- test[with(test, order(M9, M4)), ]
testOrder <- testOrder[, c(12, 22)]
s <- testOrder[testOrder$M4 == "-1" & testOrder$M9 == "-1", ]
l <- testOrder[testOrder$M4 == "-1" & testOrder$M9 == "0", ]
c <- testOrder[testOrder$M4 == "-1" & testOrder$M9 == "1", ]
| /R scripts/clusteredRNApheno4.R | no_license | EwersLabUWyo/brassicaDroughtBN | R | false | false | 12,701 | r | # clusteredRNApheno4.R
# R version 3.3.1 (2016-06-21)
# June 8, 2017. Mallory B. Lai.
# Reviewed by: TODO (Mallory B. Lai) : Find reviewer to proofread
# Creating combined pheno & RNA seq BN for clustered Brassica gene data
# using bnlearn package. Data taken from Brassica control
# and droughted conditions.
#-----------------------------------------------------------------------
library(bnlearn)
library(stringr)
library(dplyr)
library(tidyr)
library(data.table)
#-----------------------------------------------------------------------
#### Preprocessing:
# Set working directory.
# setwd("E:/Mallory Lai/PhenoRNAnetworkBrassica")
#setwd("/Users/mblai/Documents/GitHub/PhenoRNAnetworkBrassica")
######## RNA
# Read in RNAseq data.
RNA <- read.csv(file = "largeDE.csv", row.names = 1)
## Transpose data.
RNA <- t(RNA)
# Convert to data.frame.
RNA <- as.data.frame(RNA)
# Discretize the data.
discRNA <- discretize(RNA, method = "quantile", breaks = 3)
# Split rownames into Treatment/Timepoint and Replicate.
rnaNames <- str_split_fixed(rownames(RNA), '_', 2)
# Create a Timepoint column.
discRNA$Timepoint <- as.numeric(str_split_fixed(rnaNames[, 1], '', 2)[, 2])
# Create a treatment column named int.
discRNA$int <- as.factor(str_split_fixed(rnaNames[, 1], '', 2)[, 1])
# Remove rnaNames vector and RNA dataframe.
rm(rnaNames)
rm(RNA)
# Order RNA dataframe by Timepoint and int.
discRNA <- discRNA[with(discRNA, order(Timepoint, int)), ]
# Convert data intervals to -1, 0, and 1 representing low, medium, and high.
for (i in 1:(dim(discRNA)[2] - 2)){
levels(discRNA[, i]) <- c(-1, 0, 1)
discRNA[, i] <- as.factor(discRNA[, i])
}
# Transform RNA data frame.
discRNA <- t(discRNA)
# Rename column names to match timepoint and treatment.
colnames(discRNA) <- paste(discRNA[dim(discRNA)[1] - 1,],
discRNA[dim(discRNA)[1],], sep = "")
# Read in cluster classification.
cl <- read.csv(file = "mod80.csv", row.names = 1)
# Separate clusters to form gene modules.
c1 <- discRNA[c(cl[1:(length(cl[,1]) - sum(is.na(cl[,1]))), 1]), ]
# Add timepoint and treatment row.
c1 <- rbind(c1, TP = discRNA[dim(discRNA)[1] - 1,], Trmt = discRNA[dim(discRNA)[1],])
# Transpose.
c1 <- as.data.frame(t(c1))
# Gather all genes into one gene module, M1.
m1 <- gather(c1, Gene, M1, -TP, -Trmt)
# Count number of values per treatment and time point.
mCounts <- as.data.frame(table(m1[, c(1, 2, 4)]))
# Order mCounts dataframe by Timepoint, treatment, and module value.
mCounts <- mCounts[with(mCounts, order(TP, Trmt, M1)), ]
# Add column for proportion per timepoint, treatment, and module value.
mCounts$Prop <- mCounts$Freq/((length(cl[,1]) - sum(is.na(cl[,1])))*2)
# Create a column with the number of counts proportional to 12.
mCounts$twelve <- mCounts$Prop * 12
# Round the counts proportional to 12.
mCounts$round <- round(mCounts$twelve)
# Convert to data.table.
mCounts <- as.data.table(mCounts)
# Add a column that counts the total in round.
mCounts <- mCounts[, total := sum(round), by = list(TP, Trmt)]
# Convert to dataframe for easier updates.
mCounts <- as.data.frame(mCounts)
# If total count is 13, round to nearest 1/2 number and then round that.
mCounts[mCounts$total == 13, 'round'] <- round(round(mCounts[mCounts$total == 13, 'twelve']/0.5) *.5)
# Convert back to data table for easy updating.
mCounts <- as.data.table(mCounts)
# Update the total column with new rounding.
mCounts <- mCounts[, total := sum(round), by = list(TP, Trmt)]
# Add a column that identifies the max proportion per treatment and time point.
mCounts <- mCounts[, max := max(Prop), by = list(TP, Trmt)]
# Convert back to dataframe for easy subsetting.
mCounts <- as.data.frame(mCounts)
# Add 1 to the count with the max proportion.
mCounts[mCounts$total == 11 & mCounts$Prop == mCounts$max, 'round'] <-
mCounts[mCounts$total == 11
& mCounts$Prop == mCounts$max, 'round'] + 1
# Convert back to data table for easy updating.
mCounts <- as.data.table(mCounts)
# Update the total column with new rounding.
mCounts <- mCounts[, total := sum(round), by = list(TP, Trmt)]
# Extract the round column as a dataframe.
mod <- data.frame(TP = mCounts$TP, Trmt = mCounts$Trmt,
Value = mCounts$M1, M1 = mCounts$round)
# Remove unnecessary dataframes.
rm(m1)
rm(c1)
rm(mCounts)
############ TO DO: Update loop to perform a similar function as above.
# Loop through remaining clusters.
for (i in 2:(dim(cl)[2]))
{
# Separate clusters to form gene modules.
c1 <- discRNA[c(cl[1:(length(cl[,i]) - sum(is.na(cl[,i]))), i]), ]
# Add timepoint and treatment row.
c1 <- rbind(c1, TP = discRNA[dim(discRNA)[1] - 1,], Trmt = discRNA[dim(discRNA)[1],])
# Transpose.
c1 <- as.data.frame(t(c1))
# Gather all genes into one gene module
module <- gather(c1, Gene, M, -TP, -Trmt)
# Order module dataframe by Timepoint and int.
module <- module[with(module, order(TP, Trmt)), ]
# Count number of values per treatment and time point.
counts <- as.data.frame(table(module[, c(1, 2, 4)]))
# Order mCounts dataframe by Timepoint, treatment, and module value.
counts <- counts[with(counts, order(TP, Trmt, M)), ]
# Add column for proportion per timepoint, treatment, and module value.
counts$Prop <- counts$Freq/((length(cl[,i]) - sum(is.na(cl[,i])))*2)
# Create a column with the number of counts proportional to 12.
counts$twelve <- counts$Prop * 12
# Round the counts proportional to 12.
counts$round <- round(counts$twelve)
# Convert to data.table.
counts <- as.data.table(counts)
# Add a column that counts the total in round.
counts <- counts[, total := sum(round), by = list(TP, Trmt)]
# Convert to dataframe for easier updates.
counts <- as.data.frame(counts)
# If total count is 13, round to nearest 1/2 number and then round that.
counts[counts$total == 13, 'round'] <- round(round(counts[counts$total == 13, 'twelve']/0.5) *.5)
# Convert back to data table for easy updating.
counts <- as.data.table(counts)
# Update the total column with new rounding.
counts <- counts[, total := sum(round), by = list(TP, Trmt)]
# Add a column that identifies the max proportion per treatment and time point.
counts <- counts[, max := max(Prop), by = list(TP, Trmt)]
# Add a column that identifies the second highest proportion per treatment
# and time point.
counts <- counts[, max2 := as.numeric(Prop)][, max2 := sort(Prop, T)[2], by = list(TP, Trmt)]
# Add a column that identifies the min proportion per treatment and time point.
counts <- counts[, min := min(Prop), by = list(TP, Trmt)]
# Convert back to dataframe for easy subsetting.
counts <- as.data.frame(counts)
# Add 1 to the count with the max proportion.
counts[counts$total <= 11 & counts$Prop == counts$max, 'round'] <-
counts[counts$total <= 11
& counts$Prop == counts$max, 'round'] + 1
# Convert back to data table for easy updating.
counts <- as.data.table(counts)
# Update the total column with new rounding.
counts <- counts[, total := sum(round), by = list(TP, Trmt)]
# Convert back to dataframe for easy subsetting.
counts <- as.data.frame(counts)
# Add 1 to the count with the 2nd highest proportion if
# still less than 12.
counts[counts$total <= 11 & counts$Prop == counts$max2, 'round'] <-
counts[counts$total <= 11
& counts$Prop == counts$max2, 'round'] + 1
# Convert back to data table for easy updating.
counts <- as.data.table(counts)
# Update the total column with new rounding.
counts <- counts[, total := sum(round), by = list(TP, Trmt)]
# If there are any column totals of 13, subtract one from the
# value with the lowest proportion.
counts[counts$total == 13 & counts$Prop == counts$min, 'round'] <-
counts[counts$total == 13
& counts$Prop == counts$min, 'round'] - 1
# Convert back to data table for easy updating.
counts <- as.data.table(counts)
# Update the total column with new rounding.
counts <- counts[, total := sum(round), by = list(TP, Trmt)]
# Add 1 to the count with the highest proportion if
# still less than 12.
counts[counts$total <= 11 & counts$Prop == counts$max, 'round'] <-
counts[counts$total <= 11
& counts$Prop == counts$max, 'round'] + 1
# Bind round column to mod dataframe.
mod <- cbind(mod, counts$round)
}
# Remove unneccesary dataframes.
rm(c1)
rm(i)
rm(cl)
rm(discRNA)
rm(module)
# Rename modules in mod dataframe.
colnames(mod)[5:dim(mod)[2]] <- paste("M", 2:(dim(mod)[2]-3), sep = "")
# Expand modules to match the number of -1, 0, and 1's that should be
# in the RNA-Seq dataframe.
RNAmod <- as.data.frame(sapply(mod[, 4:dim(mod)[2]], function(x) rep(mod$Value, times = x)))
#### Pheno.
# Read in phenotype file.
Pheno <- read.csv(file = "PhenoBrassicaImp.csv", row.names = 1)
# Rename SM... to get rid of periods.
colnames(Pheno)[8] <- "SM"
# Add a column for Time of Day, named TOD.
Pheno$TOD <- rep(c(7, 11, 15, 19, 23, 3), each = 24, 2)
#### Discretize data.
# Discretize the phenotype data, excluding fluorescence.
phenoDisc <- discretize(Pheno[, c(3, 4, 6, 7, 8)],
method = "interval",
breaks = c(5, 5, 5, 5, 5))
# Use arules package to discretize fluorescence and detach due
# to the overlap in package functions with bnlearn.
library(arules)
fluor <- discretize(Pheno[, 5], method = "cluster")
detach("package:arules", unload=TRUE)
# Attach fluorescence data to phenoDisc dataframe.
phenoDisc$fluor <- fluor
# Add INT column to discretized data.
phenoDisc$INT <- as.factor(Pheno$Treatment)
# Add Timepoint column to discretized data.
phenoDisc$TP <- as.factor(Pheno$Timepoint)
# Order Pheno dataframe by Timepoint and int.
phenoDisc <- phenoDisc[with(phenoDisc, order(TP, INT)), ]
# Combine pheno and RNA data frames.
rnaPheno <- cbind(phenoDisc, RNAmod)
# Remove unneccesary dataframes.
rm(phenoDisc)
rm(Pheno)
# Remove extra columns.
rnaPheno$Trmt <- NULL
rnaPheno$INT <- NULL
rnaPheno$TP <- NULL
rnaPheno$TP <- NULL
# Create a whitelist using expert knowledge of
# physiological interactions.
wh <- data.frame(from = c("SM", "Photo", "Starch", "gs", "gs"),
to = c("gs", "fluor", "NSC", "Starch", "Photo"))
# Create a blacklist to soil moisture.
bl <- tiers2blacklist(list(colnames(rnaPheno)[5],
colnames(rnaPheno)[-5]))
# Learn network structure.
bn <- tabu(rnaPheno, score = "bde",
iss = 5, tabu = 50)
plot(bn) # Has a higher score than rsmax2
bn <- rsmax2(rnaPheno, restrict = "aracne",
blacklist = bl, whitelist = wh,
maximize = "tabu", score = "bde",
maximize.args = list(iss = 5))
plot(bn)
#write.csv(bn$arcs, "M58bn.csv")
bnParam <- bn.fit(bn, rnaPheno, method = "bayes")
boot <- boot.strength(rnaPheno, R = 500, algorithm = "rsmax2",
algorithm.args = list(restrict = "aracne",
blacklist = bl, whitelist = wh,
maximize = "tabu",
score = "bde",
maximize.args = list(iss = 5)))
boot[(boot$strength > 0.85) & (boot$direction >= 0.5), ]
avg.boot <- averaged.network(boot, threshold = 0.85)
plot(avg.boot)
nodes <- names(training)
start <- random.graph(nodes = nodes, method = "ic-dag", num = 100,
every = 3)
netlist <- suppressWarnings(lapply(start, function(net){
tabu(training, score = "bde", tabu = 50, iss = 10)
}))
rnd <- custom.strength(netlist, nodes = nodes)
modelAvg <- rnd[(rnd$strength > .85) & (rnd$direction >= .5), ]
avg.start <- averaged.network(rnd, threshold = .85)
plot(avg.start)
plot(bn)
# Write csv of network arcs.
#write.csv(bn$arcs, file = "clustBNarcs.csv")
plot(bn)
bn.mle <- bn.fit(bn, training, method = "bayes")
bn.fit.barchart(bn.mle$Photo, xlab = "P()")
bn.mle$Photo
bn.fit.barchart(bn.mle$M8, xlab = "P()")
bn.mle$M8
bn.fit.barchart(bn.mle$M3, xlab = "P()")
bn.mle$M3
bn.fit.barchart(bn.mle$Starch, xlab = "P()")
bn.mle$Starch
bn.fit.barchart(bn.mle$Starch, xlab = "P()")
bn.mle$M4
testOrder <- test[with(test, order(M9, M4)), ]
testOrder <- testOrder[, c(12, 22)]
s <- testOrder[testOrder$M4 == "-1" & testOrder$M9 == "-1", ]
l <- testOrder[testOrder$M4 == "-1" & testOrder$M9 == "0", ]
c <- testOrder[testOrder$M4 == "-1" & testOrder$M9 == "1", ]
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc_and_utility.R
\name{umx_read_lower}
\alias{umx_read_lower}
\title{Read lower-triangle of data matrix from console or file}
\usage{
umx_read_lower(file = "", diag = TRUE, names = NULL, ensurePD = FALSE)
}
\arguments{
\item{file}{Path to a file to read (Default "" will read from user input)}
\item{diag}{Whether the data include the diagonal. Defaults to TRUE}
\item{names}{The default names for the variables.
Defaults to as.character(paste("X", 1:n, sep=""))}
\item{ensurePD}{Whether to coerce the resultant matrix to positive definite (Defaults to FALSE)}
}
\value{
\itemize{
\item \code{\link[=matrix]{matrix()}}
}
}
\description{
umx_read_lower will read a lower triangle of data, either from the
console, or from file, and return a full matrix, optionally coerced to
positive definite. This is useful, especially when copying data from a paper
that includes just the lower triangle of a correlation matrix.
}
\examples{
\dontrun{
require(umx) # for umxRAM
IQtests = c("brainstorm", "matrix", "moral", "shopping", "typing")
allCols = c("C", IQtests, "avgIQ", "maxIQ", "video")
df = umx_read_lower(file = "", diag = FALSE)
0.38
0.86 0.30
0.42 0.12 0.27
0.66 0.21 0.38 0.18
0.80 0.13 0.50 0.25 0.43
0.19 0.11 0.19 0.12 -0.06 0.22
0.27 0.09 0.33 0.05 -0.04 0.28 .73
0.52 0.17 0.38 0.37 0.39 0.44 0.18 0.13
dimnames(df) = list(allCols, allCols) # manually add
df = umx_read_lower(file = "", diag = FALSE, names = allCols, ensurePD= TRUE)
0.38
0.86 0.30
0.42 0.12 0.27
0.66 0.21 0.38 0.18
0.80 0.13 0.50 0.25 0.43
0.19 0.11 0.19 0.12 -0.06 0.22
0.27 0.09 0.33 0.05 -0.04 0.28 .73
0.52 0.17 0.38 0.37 0.39 0.44 0.18 0.13
round(df, 2)
m1 = umxRAM("wooley", data = mxData(df, type="cov", numObs = 90),
umxPath("g", to = IQtests),
umxPath(var = "g", fixedAt= 1),
umxPath(var = IQtests)
)
summary(m1)
}
}
\references{
\itemize{
\item \url{https://github.com/tbates/umx}, \url{https://tbates.github.io}
}
}
\seealso{
Other Data Functions: \code{\link{umxHetCor}()},
\code{\link{umx_as_numeric}()},
\code{\link{umx_lower2full}()},
\code{\link{umx_make_MR_data}()},
\code{\link{umx_make_TwinData}()},
\code{\link{umx_make_fake_data}()},
\code{\link{umx_make_raw_from_cov}()},
\code{\link{umx_polychoric}()},
\code{\link{umx_polypairwise}()},
\code{\link{umx_polytriowise}()},
\code{\link{umx_rename}()}, \code{\link{umx_reorder}()},
\code{\link{umx_select_valid}()},
\code{\link{umx_stack}()}, \code{\link{umx}},
\code{\link{xmu_data_swap_a_block}()}
}
\concept{Data Functions}
| /man/umx_read_lower.Rd | no_license | JunqiangZheng/umx | R | false | true | 2,591 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc_and_utility.R
\name{umx_read_lower}
\alias{umx_read_lower}
\title{Read lower-triangle of data matrix from console or file}
\usage{
umx_read_lower(file = "", diag = TRUE, names = NULL, ensurePD = FALSE)
}
\arguments{
\item{file}{Path to a file to read (Default "" will read from user input)}
\item{diag}{Whether the data include the diagonal. Defaults to TRUE}
\item{names}{The default names for the variables.
Defaults to as.character(paste("X", 1:n, sep=""))}
\item{ensurePD}{Whether to coerce the resultant matrix to positive definite (Defaults to FALSE)}
}
\value{
\itemize{
\item \code{\link[=matrix]{matrix()}}
}
}
\description{
umx_read_lower will read a lower triangle of data, either from the
console, or from file, and return a full matrix, optionally coerced to
positive definite. This is useful, especially when copying data from a paper
that includes just the lower triangle of a correlation matrix.
}
\examples{
\dontrun{
require(umx) # for umxRAM
IQtests = c("brainstorm", "matrix", "moral", "shopping", "typing")
allCols = c("C", IQtests, "avgIQ", "maxIQ", "video")
df = umx_read_lower(file = "", diag = FALSE)
0.38
0.86 0.30
0.42 0.12 0.27
0.66 0.21 0.38 0.18
0.80 0.13 0.50 0.25 0.43
0.19 0.11 0.19 0.12 -0.06 0.22
0.27 0.09 0.33 0.05 -0.04 0.28 .73
0.52 0.17 0.38 0.37 0.39 0.44 0.18 0.13
dimnames(df) = list(allCols, allCols) # manually add
df = umx_read_lower(file = "", diag = FALSE, names = allCols, ensurePD= TRUE)
0.38
0.86 0.30
0.42 0.12 0.27
0.66 0.21 0.38 0.18
0.80 0.13 0.50 0.25 0.43
0.19 0.11 0.19 0.12 -0.06 0.22
0.27 0.09 0.33 0.05 -0.04 0.28 .73
0.52 0.17 0.38 0.37 0.39 0.44 0.18 0.13
round(df, 2)
m1 = umxRAM("wooley", data = mxData(df, type="cov", numObs = 90),
umxPath("g", to = IQtests),
umxPath(var = "g", fixedAt= 1),
umxPath(var = IQtests)
)
summary(m1)
}
}
\references{
\itemize{
\item \url{https://github.com/tbates/umx}, \url{https://tbates.github.io}
}
}
\seealso{
Other Data Functions: \code{\link{umxHetCor}()},
\code{\link{umx_as_numeric}()},
\code{\link{umx_lower2full}()},
\code{\link{umx_make_MR_data}()},
\code{\link{umx_make_TwinData}()},
\code{\link{umx_make_fake_data}()},
\code{\link{umx_make_raw_from_cov}()},
\code{\link{umx_polychoric}()},
\code{\link{umx_polypairwise}()},
\code{\link{umx_polytriowise}()},
\code{\link{umx_rename}()}, \code{\link{umx_reorder}()},
\code{\link{umx_select_valid}()},
\code{\link{umx_stack}()}, \code{\link{umx}},
\code{\link{xmu_data_swap_a_block}()}
}
\concept{Data Functions}
|
#----------------------------#
#-----Introductory to R:-----#
#---------(Part II)----------#
#----------------------------#
# In R, we can create and save our own function to use in the global environment
# For example we can create a TestFunction() that take two numbers and return sum and product of the two numbers.
TestFunction = function(number1,number2){
SumResult = number1+number2
ProductResult = number1 * number2
c(SumResult,ProductResult)
}
# Create a variable x to save the result with TestFunction() that takes inputs 2 and 4.
x = TestFunction(2,4)
# You can check the result by printing the varaible x
x
# You can also extract the result of each element stored in variable x by its index
x[1]
x[2]
# Note that once the function is defined in the global environment, it can be reused as many time as you want until it is cleared from the globa enivronment.
a = TestFunction(10, 20)
# Let's practice creating a function that take two vectors a and b.
# The DistanceTest() function will be used to calculate the square-root of the sum of squared difference
DistanceTest = function(a,b){
Dist = sqrt(sum((a-b)^2))
Dist
}
# Create vector x1 and x2
x1 <- c(1,2,3,4)
x2 <- c(0,0,1,2)
# Using the DistanceTest function to calculate the result
DistanceTest(x1,x2)
# We can manually check the answer by writing the whole equation (hard-coding)
D <- sqrt((1-0)^2+(2-0)^2+(3-1)^2+(4-2)^2)
D
# Write a function that has a vector as its input and returns the mean and Standard Deviation
?sd
?mean
FunctionMeanSD = function(a){
a_mean = mean(a)
a_sd = sd(a)
c(a_mean,a_sd)
}
# The function will take a vector and calculate and return its mean and standard deviation.
FunctionMeanSD(x1)
# Basic For Loop
# In R, we can create a for loop to reduce significant amount of replicated work.
# For example, if we want to find how many numbers are greater than 5 in a vector.
# Hard coding: [x1 - 5, if positive => greater], [x2 - 5, if negative => not greater], ... [xn - 5, if negative => not greater]
# For Loop Solution:
x <- c(10, 12, 2, 4, 5, 11, 3, 8, 6, 5)
count <- 0
for (i in x){
if (i >= 5)
count = count+1
}
print(count)
# Another use of For Loop is to run calcuation without repeating the work over and over
# Example: Multiply x by 5 and store the result into a new vector y
order <- 1:10
y <- c()
for (i in order){
result <- x[i]*5
y <- append(y, result)
}
print(y)
# Another way for the same result is to create a vector 'w' with 10 zero value, then add the result to the zeros
w <- rep(0,10)
order <- 1:10
for (i in order){
result <- x[i]*5
w[i] <- result
}
print(w)
| /DS612_R-Code_Lecture_2.R | permissive | NormanLo4319/DS612_Data_Mining_with_Business_Applications_R | R | false | false | 2,623 | r | #----------------------------#
#-----Introductory to R:-----#
#---------(Part II)----------#
#----------------------------#
# In R, we can create and save our own function to use in the global environment
# For example we can create a TestFunction() that take two numbers and return sum and product of the two numbers.
TestFunction = function(number1,number2){
SumResult = number1+number2
ProductResult = number1 * number2
c(SumResult,ProductResult)
}
# Create a variable x to save the result with TestFunction() that takes inputs 2 and 4.
x = TestFunction(2,4)
# You can check the result by printing the varaible x
x
# You can also extract the result of each element stored in variable x by its index
x[1]
x[2]
# Note that once the function is defined in the global environment, it can be reused as many time as you want until it is cleared from the globa enivronment.
a = TestFunction(10, 20)
# Let's practice creating a function that take two vectors a and b.
# The DistanceTest() function will be used to calculate the square-root of the sum of squared difference
DistanceTest = function(a,b){
Dist = sqrt(sum((a-b)^2))
Dist
}
# Create vector x1 and x2
x1 <- c(1,2,3,4)
x2 <- c(0,0,1,2)
# Using the DistanceTest function to calculate the result
DistanceTest(x1,x2)
# We can manually check the answer by writing the whole equation (hard-coding)
D <- sqrt((1-0)^2+(2-0)^2+(3-1)^2+(4-2)^2)
D
# Write a function that has a vector as its input and returns the mean and Standard Deviation
?sd
?mean
FunctionMeanSD = function(a){
a_mean = mean(a)
a_sd = sd(a)
c(a_mean,a_sd)
}
# The function will take a vector and calculate and return its mean and standard deviation.
FunctionMeanSD(x1)
# Basic For Loop
# In R, we can create a for loop to reduce significant amount of replicated work.
# For example, if we want to find how many numbers are greater than 5 in a vector.
# Hard coding: [x1 - 5, if positive => greater], [x2 - 5, if negative => not greater], ... [xn - 5, if negative => not greater]
# For Loop Solution:
x <- c(10, 12, 2, 4, 5, 11, 3, 8, 6, 5)
count <- 0
for (i in x){
if (i >= 5)
count = count+1
}
print(count)
# Another use of For Loop is to run calcuation without repeating the work over and over
# Example: Multiply x by 5 and store the result into a new vector y
order <- 1:10
y <- c()
for (i in order){
result <- x[i]*5
y <- append(y, result)
}
print(y)
# Another way for the same result is to create a vector 'w' with 10 zero value, then add the result to the zeros
w <- rep(0,10)
order <- 1:10
for (i in order){
result <- x[i]*5
w[i] <- result
}
print(w)
|
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97153296111166e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) | /myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615768244-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,803 | r | testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97153296111166e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) |
library(cultevo)
### Name: page.test
### Title: Page test for monotonicity of ranks.
### Aliases: page.test page.L page.compute.exact
### ** Examples
# exact p value computation for N=4, k=4
page.test(t(replicate(4, sample(4))))
# exact p value computation for N=4, k=10
page.test(t(replicate(4, sample(10))))
# approximate p value computation for N=4, k=23
result <- page.test(t(replicate(4, sample(23))), verbose = FALSE)
print(result)
# raw calculation of the significance levels
page.compute.exact(6, 4, 322)
| /data/genthat_extracted_code/cultevo/examples/page.test.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 524 | r | library(cultevo)
### Name: page.test
### Title: Page test for monotonicity of ranks.
### Aliases: page.test page.L page.compute.exact
### ** Examples
# exact p value computation for N=4, k=4
page.test(t(replicate(4, sample(4))))
# exact p value computation for N=4, k=10
page.test(t(replicate(4, sample(10))))
# approximate p value computation for N=4, k=23
result <- page.test(t(replicate(4, sample(23))), verbose = FALSE)
print(result)
# raw calculation of the significance levels
page.compute.exact(6, 4, 322)
|
rm(list=ls(all=T))
setwd("D:/Gaikwad/Data/Live Project 1/Project_1")
#Load Libraries
x = c("ggplot2", "corrgram", "DMwR", "caret", "randomForest", "unbalanced", "C50", "dummies", "e1071", "Information",
"MASS", "rpart", "gbm", "ROSE", 'sampling', 'DataCombine', 'inTrees')
#install.packages(x)
lapply(x, require, character.only = TRUE)
rm(x)
#read cvs file
bank_loan = read.csv("bank-loan.csv", header = T, na.strings = c(" ", "", "NA"))
########################################################################################################
##Explore the data
str(bank_loan)
# list types for each attribute
sapply(bank_loan, class)
#Unique values in a column
unique(bank_loan$ed)
summary(bank_loan)
str(bank_loan)
#convert each varaible
bank_loan$age = as.numeric(bank_loan$age)
bank_loan$ed = as.numeric(bank_loan$ed)
bank_loan$employ = as.numeric(bank_loan$employ)
bank_loan$address = as.numeric(bank_loan$address)
bank_loan$debtinc = as.numeric(bank_loan$debtinc)
bank_loan$income = as.numeric(bank_loan$income)
bank_loan$creddebt = as.numeric(bank_loan$creddebt)
bank_loan$othdebtdebt = as.numeric(bank_loan$othdebt)
typeof(bank_loan)
##############################################################################################################
#Missing Values Analysis
#sum of missing values
#sum(is.na(bank_loan))
#create dataframe with missing percentage
missing_val = data.frame(apply(bank_loan,2,function(x){sum(is.na(x))}))
#(explanation of above code:
#(1) Creating own function of x as I need
#to calculate missing values from all columns.
#(2) 2 is written as we are doing operations on column.
#(3)Apply: is used for calculating missing values of all the variables)
#convert row names into columns
missing_val$Columns = row.names(missing_val)
#Rename the variable name
names(missing_val)[1] = "Missing_percentage"
#Calculate percentage
missing_val$Missing_percentage = (missing_val$Missing_percentage/nrow(bank_data)) * 100
#arranging descending order
missing_val = missing_val[order(-missing_val$Missing_percentage),]
row.names(missing_val) = NULL
#Rearranging the columns
missing_val = missing_val[,c(2,1)]
#writting output result back into disk
write.csv(missing_val, "Missing_perc-r.csv", row.names = F)
#Missing values are present only in "default" so it's not needed to plot bar graph.
###test the missing value
#actual = 45
#mean = 35.01767
#median = 34
#knn = 37.48551
#bank_loan[40,1] = NA
#freez the KNN as the respective value is closer to 45 in KNN method
#Mean Method
#bank_loan$age[is.na(bank_loan$age)] = mean(bank_loan$age, na.rm = T)
#Median Method
#bank_loan$age[is.na(bank_loan$age)] = median(bank_loan$age, na.rm = T)
# kNN Imputation
bank_loan = knnImputation(bank_loan, k = 3)
########################################################################################################
#Outlier Analysis#
#BoxPlots - Distribution and Outlier Check/analysis
numeric_index = sapply(bank_loan,is.numeric) #selecting only numeric
numeric_data = bank_loan[numeric_index]
#numeric_index
cnames = c("age", "employ", "address", "income", "debtinc", "creddebt", "othdebt", "default")
#As there are multiple numeric variable choose the for_loop for plotting the box plot
# Meaning of assign(paste0("gn",i)= assign is used to assign the name to dataset and in this case gn1 to gn8
# will be assign to loops executing in the for loop step by step.
for (i in 1:length(cnames))
{
assign(paste0("gn",i), ggplot(aes_string(y = (cnames[i]), x = "default"), data = subset(bank_loan))+
stat_boxplot(geom = "errorbar", width = 0.5) +
geom_boxplot(outlier.colour="red", fill = "grey" ,outlier.shape=18,
outlier.size=1, notch=FALSE) +
theme(legend.position="bottom")+
labs(y=cnames[i],x="default")+
ggtitle(paste("Box plot of default for",cnames[i])))
}
# Plotting plots together
gridExtra::grid.arrange(gn1,gn5,gn2,gn6,gn7,gn8,ncol=3)
#removing outlyer via box plot
# defining new data frame for experiment
df = bank_loan
val = bank_loan$income[bank_loan$income %in% boxplot.stats(bank_loan$income)$out]
#val
# ! is used as not symbol in below code, which is for writting condition
# don't select that oberservations which contain the value for val(in above code)
bank_loan = bank_loan[which(!bank_loan$income %in% val),]
#Loop to remove from all varibales
for (i in cnames){
print(i)
val=bank_loan[,i][bank_loan[,i]%in% boxplot.stats(bank_loan[,i])$out]
print(length(val))
bank_loan = bank_loan[which(!bank_loan[,i] %in% val),]
}
#################################################################################################
#Feature Selection
#Correlation Plot
#bank_loan = df
#Explanation= Order is F don't need to follow any numeric variable order, #upper pannel is pie chart
corrgram(bank_loan[,cnames], order = F, upper.panel=panel.pie, text.panel=panel.txt, main = "Correlation Plot")
## Explanation of plot: red color indicates that variables are negatively correlated and blue color indicates
# that variables are positively correlated.
#Here we will not take any selection algo on categorial because we have only one categorial variable
#Dimension Reduction
bank_loan = subset(bank_loan, select = -c(creddebt,othdebt))
##############################################################################################################
#Feature Scaling
#Normality check
qqnorm(bank_loan$income)
hist(bank_loan$income)
#Normalisation
cnames = c("age", "employ", "address", "income", "debtinc")
#(in consol crosscheck cnames)
for(i in cnames){
print(i)
bank_loan[,i] = (bank_loan[,i] - min(bank_loan[,i]))/
(max(bank_loan[,i] - min(bank_loan[,i])))
}
#Standardisation
#reloaded the data which came after feature selection
for(i in cnames){
print(i)
bank_loan[,i] = (bank_loan[,i] - mean(bank_loan[,i]))/
sd(bank_loan[,i])
}
#############################################################################################################
#Model Development
#Clean the environment
library(DataCombine)
rmExcept("bank_loan")
#Divide data into train and test using stratified sampling method
set.seed(1234)
train.index = createDataPartition(bank_loan$default, p = .80, list = FALSE)
train = bank_loan[ train.index,]
test = bank_loan[-train.index,]
train$default=as.factor(train$default)
str(train$default)
#Logistic Regression
logit_model = glm(default ~ ., data = train, family = "binomial")
#explanation of above: . means consider all variable except default
# glm iss built in function for builting ligistic regression
# default is target variable
#summary of the model
summary(logit_model)
#predict using logistic regression
logit_Predictions = predict(logit_model, newdata = test, type = "response")
#cross check
#logit_Predictions
#convert prob into 1 and 0
logit_Predictions = ifelse(logit_Predictions > 0.5, 1, 0)
##Evaluate the performance of classification model
ConfMatrix_RF = table(test$default, logit_Predictions)
#False Negative rate
FNR = FN/FN+TP
##Decision tree for classification
#Develop Model on training data
str(bank_loan$default)
bank_loan$default[bank_loan$default %in% "1"] = "yes"
bank_loan$default[bank_loan$default %in% "0"] = "no"
C50_model = C5.0(default ~., train, trials = 100, rules = TRUE)
#Summary of DT model
summary(C50_model)
#write rules into disk
write(capture.output(summary(C50_model)), "c50Rules.txt")
#Lets predict for test cases
C50_Predictions = predict(C50_model, test[,-6], type = "class")
##Evaluate the performance of classification model
ConfMatrix_C50 = table(test$default, C50_Predictions)
confusionMatrix(ConfMatrix_C50)
#False Negative rate
#FNR = FN/FN+TP
###Random Forest
RF_model = randomForest(default ~ ., train, importance = TRUE, ntree = 500)
#Presdict test data using random forest model
RF_Predictions = predict(RF_model, test[,-6])
##Evaluate the performance of classification model
ConfMatrix_RF = table(test$default, RF_Predictions)
confusionMatrix(ConfMatrix_RF)
#False Negative rate
#FNR = FN/FN+TP
##KNN Implementation
library(class)
#Predict test data
KNN_Predictions = knn(train[, 1:6], test[, 1:6], train$default, k = 6)
#Confusion matrix
Conf_matrix = table(KNN_Predictions, test$default)
#Accuracy
sum(diag(Conf_matrix))/nrow(test)
#False Negative rate
#FNR = FN/FN+TP
#naive Bayes
library(e1071)
#Develop model
NB_model = naiveBayes(default ~ ., data = train)
#predict on test cases #raw
NB_Predictions = predict(NB_model, test[,1:6], type = 'class')
#Look at confusion matrix
Conf_matrix = table(observed = test[,6], predicted = NB_Predictions)
confusionMatrix(Conf_matrix)
#statical way
mean(NB_Predictions == test$default)
| /Bank_Loan_Default_case.r | no_license | ShubhamGaikwad1522/Bank-Loan-Defaulter-Prediction | R | false | false | 9,077 | r | rm(list=ls(all=T))
setwd("D:/Gaikwad/Data/Live Project 1/Project_1")
#Load Libraries
x = c("ggplot2", "corrgram", "DMwR", "caret", "randomForest", "unbalanced", "C50", "dummies", "e1071", "Information",
"MASS", "rpart", "gbm", "ROSE", 'sampling', 'DataCombine', 'inTrees')
#install.packages(x)
lapply(x, require, character.only = TRUE)
rm(x)
#read cvs file
bank_loan = read.csv("bank-loan.csv", header = T, na.strings = c(" ", "", "NA"))
########################################################################################################
##Explore the data
str(bank_loan)
# list types for each attribute
sapply(bank_loan, class)
#Unique values in a column
unique(bank_loan$ed)
summary(bank_loan)
str(bank_loan)
#convert each varaible
bank_loan$age = as.numeric(bank_loan$age)
bank_loan$ed = as.numeric(bank_loan$ed)
bank_loan$employ = as.numeric(bank_loan$employ)
bank_loan$address = as.numeric(bank_loan$address)
bank_loan$debtinc = as.numeric(bank_loan$debtinc)
bank_loan$income = as.numeric(bank_loan$income)
bank_loan$creddebt = as.numeric(bank_loan$creddebt)
bank_loan$othdebtdebt = as.numeric(bank_loan$othdebt)
typeof(bank_loan)
##############################################################################################################
#Missing Values Analysis
#sum of missing values
#sum(is.na(bank_loan))
#create dataframe with missing percentage
missing_val = data.frame(apply(bank_loan,2,function(x){sum(is.na(x))}))
#(explanation of above code:
#(1) Creating own function of x as I need
#to calculate missing values from all columns.
#(2) 2 is written as we are doing operations on column.
#(3)Apply: is used for calculating missing values of all the variables)
#convert row names into columns
missing_val$Columns = row.names(missing_val)
#Rename the variable name
names(missing_val)[1] = "Missing_percentage"
#Calculate percentage
missing_val$Missing_percentage = (missing_val$Missing_percentage/nrow(bank_data)) * 100
#arranging descending order
missing_val = missing_val[order(-missing_val$Missing_percentage),]
row.names(missing_val) = NULL
#Rearranging the columns
missing_val = missing_val[,c(2,1)]
#writting output result back into disk
write.csv(missing_val, "Missing_perc-r.csv", row.names = F)
#Missing values are present only in "default" so it's not needed to plot bar graph.
###test the missing value
#actual = 45
#mean = 35.01767
#median = 34
#knn = 37.48551
#bank_loan[40,1] = NA
#freez the KNN as the respective value is closer to 45 in KNN method
#Mean Method
#bank_loan$age[is.na(bank_loan$age)] = mean(bank_loan$age, na.rm = T)
#Median Method
#bank_loan$age[is.na(bank_loan$age)] = median(bank_loan$age, na.rm = T)
# kNN Imputation
bank_loan = knnImputation(bank_loan, k = 3)
########################################################################################################
#Outlier Analysis#
#BoxPlots - Distribution and Outlier Check/analysis
numeric_index = sapply(bank_loan,is.numeric) #selecting only numeric
numeric_data = bank_loan[numeric_index]
#numeric_index
cnames = c("age", "employ", "address", "income", "debtinc", "creddebt", "othdebt", "default")
#As there are multiple numeric variable choose the for_loop for plotting the box plot
# Meaning of assign(paste0("gn",i)= assign is used to assign the name to dataset and in this case gn1 to gn8
# will be assign to loops executing in the for loop step by step.
for (i in 1:length(cnames))
{
assign(paste0("gn",i), ggplot(aes_string(y = (cnames[i]), x = "default"), data = subset(bank_loan))+
stat_boxplot(geom = "errorbar", width = 0.5) +
geom_boxplot(outlier.colour="red", fill = "grey" ,outlier.shape=18,
outlier.size=1, notch=FALSE) +
theme(legend.position="bottom")+
labs(y=cnames[i],x="default")+
ggtitle(paste("Box plot of default for",cnames[i])))
}
# Plotting plots together
gridExtra::grid.arrange(gn1,gn5,gn2,gn6,gn7,gn8,ncol=3)
#removing outlyer via box plot
# defining new data frame for experiment
df = bank_loan
val = bank_loan$income[bank_loan$income %in% boxplot.stats(bank_loan$income)$out]
#val
# ! is used as not symbol in below code, which is for writting condition
# don't select that oberservations which contain the value for val(in above code)
bank_loan = bank_loan[which(!bank_loan$income %in% val),]
#Loop to remove from all varibales
for (i in cnames){
print(i)
val=bank_loan[,i][bank_loan[,i]%in% boxplot.stats(bank_loan[,i])$out]
print(length(val))
bank_loan = bank_loan[which(!bank_loan[,i] %in% val),]
}
#################################################################################################
#Feature Selection
#Correlation Plot
#bank_loan = df
#Explanation= Order is F don't need to follow any numeric variable order, #upper pannel is pie chart
corrgram(bank_loan[,cnames], order = F, upper.panel=panel.pie, text.panel=panel.txt, main = "Correlation Plot")
## Explanation of plot: red color indicates that variables are negatively correlated and blue color indicates
# that variables are positively correlated.
#Here we will not take any selection algo on categorial because we have only one categorial variable
#Dimension Reduction
bank_loan = subset(bank_loan, select = -c(creddebt,othdebt))
##############################################################################################################
#Feature Scaling
#Normality check
qqnorm(bank_loan$income)
hist(bank_loan$income)
#Normalisation
cnames = c("age", "employ", "address", "income", "debtinc")
#(in consol crosscheck cnames)
for(i in cnames){
print(i)
bank_loan[,i] = (bank_loan[,i] - min(bank_loan[,i]))/
(max(bank_loan[,i] - min(bank_loan[,i])))
}
#Standardisation
#reloaded the data which came after feature selection
for(i in cnames){
print(i)
bank_loan[,i] = (bank_loan[,i] - mean(bank_loan[,i]))/
sd(bank_loan[,i])
}
#############################################################################################################
#Model Development
#Clean the environment
library(DataCombine)
rmExcept("bank_loan")
#Divide data into train and test using stratified sampling method
set.seed(1234)
train.index = createDataPartition(bank_loan$default, p = .80, list = FALSE)
train = bank_loan[ train.index,]
test = bank_loan[-train.index,]
train$default=as.factor(train$default)
str(train$default)
#Logistic Regression
logit_model = glm(default ~ ., data = train, family = "binomial")
#explanation of above: . means consider all variable except default
# glm iss built in function for builting ligistic regression
# default is target variable
#summary of the model
summary(logit_model)
#predict using logistic regression
logit_Predictions = predict(logit_model, newdata = test, type = "response")
#cross check
#logit_Predictions
#convert prob into 1 and 0
logit_Predictions = ifelse(logit_Predictions > 0.5, 1, 0)
##Evaluate the performance of classification model
ConfMatrix_RF = table(test$default, logit_Predictions)
#False Negative rate
FNR = FN/FN+TP
##Decision tree for classification
#Develop Model on training data
str(bank_loan$default)
bank_loan$default[bank_loan$default %in% "1"] = "yes"
bank_loan$default[bank_loan$default %in% "0"] = "no"
C50_model = C5.0(default ~., train, trials = 100, rules = TRUE)
#Summary of DT model
summary(C50_model)
#write rules into disk
write(capture.output(summary(C50_model)), "c50Rules.txt")
#Lets predict for test cases
C50_Predictions = predict(C50_model, test[,-6], type = "class")
##Evaluate the performance of classification model
ConfMatrix_C50 = table(test$default, C50_Predictions)
confusionMatrix(ConfMatrix_C50)
#False Negative rate
#FNR = FN/FN+TP
###Random Forest
RF_model = randomForest(default ~ ., train, importance = TRUE, ntree = 500)
#Presdict test data using random forest model
RF_Predictions = predict(RF_model, test[,-6])
##Evaluate the performance of classification model
ConfMatrix_RF = table(test$default, RF_Predictions)
confusionMatrix(ConfMatrix_RF)
#False Negative rate
#FNR = FN/FN+TP
##KNN Implementation
library(class)
#Predict test data
KNN_Predictions = knn(train[, 1:6], test[, 1:6], train$default, k = 6)
#Confusion matrix
Conf_matrix = table(KNN_Predictions, test$default)
#Accuracy
sum(diag(Conf_matrix))/nrow(test)
#False Negative rate
#FNR = FN/FN+TP
#naive Bayes
library(e1071)
#Develop model
NB_model = naiveBayes(default ~ ., data = train)
#predict on test cases #raw
NB_Predictions = predict(NB_model, test[,1:6], type = 'class')
#Look at confusion matrix
Conf_matrix = table(observed = test[,6], predicted = NB_Predictions)
confusionMatrix(Conf_matrix)
#statical way
mean(NB_Predictions == test$default)
|
########################################
# load libraries
########################################
# load some packages that we'll need
library(dplyr)
library(ggplot2)
library(reshape)
library(scales)
# be picky about white backgrounds on our plots
theme_set(theme_bw())
# set the data and figure directories
data_dir <- '.'
figure_dir <- '.'
########################################
# load weather and trip data
########################################
# load RData file output by load_trips.R
load(sprintf('%s/trips.RData', data_dir))
########################################
# plot trip data
########################################
# plot the distribution of trip times across all rides
# plot the distribution of trip times by rider type
# plot the number of trips over each day
# plot the number of trips by gender and age
########################################
# plot trip and weather data
########################################
# plot the minimum temperature over each day
# plot the number of trips as a function of the minimum temperature, where each point represents a day
# you'll need to summarize the trips and join to the weather data to do this
# repeat this, splitting results by whether there was substantial precipitation or not
# you'll need to decide what constitutes "substantial precipitation" and create a new T/F column to indicate this
# add a smoothed fit on top of the previous plot, using geom_smooth
| /week1/citibike/plot_trips.R | no_license | psychologicalmuffin/coursework | R | false | false | 1,451 | r | ########################################
# load libraries
########################################
# load some packages that we'll need
library(dplyr)
library(ggplot2)
library(reshape)
library(scales)
# be picky about white backgrounds on our plots
theme_set(theme_bw())
# set the data and figure directories
data_dir <- '.'
figure_dir <- '.'
########################################
# load weather and trip data
########################################
# load RData file output by load_trips.R
load(sprintf('%s/trips.RData', data_dir))
########################################
# plot trip data
########################################
# plot the distribution of trip times across all rides
# plot the distribution of trip times by rider type
# plot the number of trips over each day
# plot the number of trips by gender and age
########################################
# plot trip and weather data
########################################
# plot the minimum temperature over each day
# plot the number of trips as a function of the minimum temperature, where each point represents a day
# you'll need to summarize the trips and join to the weather data to do this
# repeat this, splitting results by whether there was substantial precipitation or not
# you'll need to decide what constitutes "substantial precipitation" and create a new T/F column to indicate this
# add a smoothed fit on top of the previous plot, using geom_smooth
|
#' @importFrom magrittr "%>%"
#' @importFrom rlang .data
#' @title Get metrics for a list of channels
#'
#' @description Get metrics for a list of channels
#'
#' \href{https://developers.google.com/youtube/analytics/content_owner_reports}{https://developers.google.com/youtube/analytics/content_owner_reports}
#'
#' @param token Access token
#' @param contentOwner Content owner.
#' @param from Starting date.
#' @param to Ending date.
#' @param channels List of channels.
#' @param metrics List of metrics. Defaults to \code{c("views")}.
#' @param dimensions List of dimensions. Defaults to \code{c("channel")}.
#'
#' @return Returns a \code{\link{data.frame}} of results
#'
#' @examples
#' \dontrun{
#' channelMetrics <- getChannelMetrics(token, contentOwner = "ContentOwner",
#' from = startDate, to = endDate, channels$channelId)
#' }
#'
#' @export
getChannelMetrics <- function(token, contentOwner, from, to, channels, metrics = NULL, dimensions = NULL) {
host <- "https://youtubeanalytics.googleapis.com"
endpoint <- "/v2/reports"
if (is.null(metrics)) {
# Default metrics
metrics <- c("views")
}
if (is.null(dimensions)) {
# Default dimensions
dimensions <- c("channel")
}
if (length(channels) > 1) {
message(sprintf("Retrieving metrics for %s channels ... ", scales::comma(length(channels))))
showStatus <- TRUE
} else {
showStatus <- FALSE
}
url <- sprintf("%s%s?ids=contentOwner==%s&startDate=%s&endDate=%s&metrics=%s&dimensions=%s&filters=channel==%s",
host, endpoint, contentOwner, from, to,
paste0(metrics, collapse = ","),
paste0(dimensions, collapse = ","),
paste0(channels, collapse = ","))
results <- queryAPI(token, url)
if (is.null(results) || nrow(results$rows) < 1) {
return(tibble::as_tibble(NULL))
}
columnHeaders <- results$columnHeaders
if (!exists("columnHeaders")) {
stop("getChannelMetrics() - Missing columnHeaders")
}
# Put results in data frame
df <- as.data.frame(results$rows, stringsAsFactors = FALSE)
names(df) <- columnHeaders$name
# Convert columns to correct data types
# There must be a more 'functional' way of doing this ...
typeList <- list("STRING", "INTEGER", "FLOAT")
funcList <- list(as.character, as.integer, as.numeric)
for (i in 1:ncol(df)) {
df[ , i] <- funcList[[match(columnHeaders$dataType[i], typeList)]](df[ , i])
}
if (showStatus) {
message("\ndone.")
}
return(df)
}
| /R/getChannelMetrics.R | permissive | EricGoldsmith/rYouTube | R | false | false | 2,545 | r | #' @importFrom magrittr "%>%"
#' @importFrom rlang .data
#' @title Get metrics for a list of channels
#'
#' @description Get metrics for a list of channels
#'
#' \href{https://developers.google.com/youtube/analytics/content_owner_reports}{https://developers.google.com/youtube/analytics/content_owner_reports}
#'
#' @param token Access token
#' @param contentOwner Content owner.
#' @param from Starting date.
#' @param to Ending date.
#' @param channels List of channels.
#' @param metrics List of metrics. Defaults to \code{c("views")}.
#' @param dimensions List of dimensions. Defaults to \code{c("channel")}.
#'
#' @return Returns a \code{\link{data.frame}} of results
#'
#' @examples
#' \dontrun{
#' channelMetrics <- getChannelMetrics(token, contentOwner = "ContentOwner",
#' from = startDate, to = endDate, channels$channelId)
#' }
#'
#' @export
getChannelMetrics <- function(token, contentOwner, from, to, channels, metrics = NULL, dimensions = NULL) {
host <- "https://youtubeanalytics.googleapis.com"
endpoint <- "/v2/reports"
if (is.null(metrics)) {
# Default metrics
metrics <- c("views")
}
if (is.null(dimensions)) {
# Default dimensions
dimensions <- c("channel")
}
if (length(channels) > 1) {
message(sprintf("Retrieving metrics for %s channels ... ", scales::comma(length(channels))))
showStatus <- TRUE
} else {
showStatus <- FALSE
}
url <- sprintf("%s%s?ids=contentOwner==%s&startDate=%s&endDate=%s&metrics=%s&dimensions=%s&filters=channel==%s",
host, endpoint, contentOwner, from, to,
paste0(metrics, collapse = ","),
paste0(dimensions, collapse = ","),
paste0(channels, collapse = ","))
results <- queryAPI(token, url)
if (is.null(results) || nrow(results$rows) < 1) {
return(tibble::as_tibble(NULL))
}
columnHeaders <- results$columnHeaders
if (!exists("columnHeaders")) {
stop("getChannelMetrics() - Missing columnHeaders")
}
# Put results in data frame
df <- as.data.frame(results$rows, stringsAsFactors = FALSE)
names(df) <- columnHeaders$name
# Convert columns to correct data types
# There must be a more 'functional' way of doing this ...
typeList <- list("STRING", "INTEGER", "FLOAT")
funcList <- list(as.character, as.integer, as.numeric)
for (i in 1:ncol(df)) {
df[ , i] <- funcList[[match(columnHeaders$dataType[i], typeList)]](df[ , i])
}
if (showStatus) {
message("\ndone.")
}
return(df)
}
|
#This Script was written by Kanishk Asthana kasthana@eng.ucsd.edu.
#The only thing you need to do is change the input peptide and everything else will be calculated accordingly
input="SLAMMER"
#Source of data: http://www.bmrb.wisc.edu/ref_info/aadata.dat
atomdata=read.csv("atomicfrequencies.txt", header=FALSE)
#Extracting Relevant Data
atomdata=atomdata[,c(1,3,12,13,14,15,16)]
#Labeling Columns
names(atomdata)=c("Symbol","Name","Carbons","Hydrogens","Nitrogens","Oxygens","Sulphurs")
#Converting NA values in Sulphur to Zeros
atomdata$Sulphurs[is.na(atomdata$Sulphurs)]=0
#Converting Data types to Numeric
atomdata$Nitrogens=as.numeric(atomdata$Nitrogens)
atomdata$Oxygens=as.numeric(atomdata$Oxygens)
#Generating Vector of Amino acid elements
aminoAcidVector=unlist(strsplit(input,""));
#All data gives a table of number of atoms in each Amino Acid for input
alldata=sapply(aminoAcidVector,function(aminoAcid){
data=unlist(data.frame(atomdata[atomdata$Symbol==aminoAcid,c(3,4,5,6,7)]))
return(data)
});
print(alldata)
#Sum of Atomic Composition of all Amino Acids
sumofallAminoAcids=rowSums(alldata)
#Calculating number of peptide bonds
numberofPeptideBonds=nchar(input)-1
#Number of Hydrogens to Subtract
hydrogenstoSubtract=2*numberofPeptideBonds
#Number of Oxygens to Subtract
oxygenstoSubtract=numberofPeptideBonds
#Correcting Values
sumofallAminoAcids["Hydrogens"]=sumofallAminoAcids["Hydrogens"]-hydrogenstoSubtract
sumofallAminoAcids["Oxygens"]=sumofallAminoAcids["Oxygens"]-oxygenstoSubtract
#Printing values
print(sumofallAminoAcids)
#Part A of Problem 1
#Now that we know the relative abundance of the isotope of carbon is 0.01 we can do a biomial expansion
#to get the different probabilities of getting different combinations of number of c-12 and c-13 in amino acids
carbonDistribution=dbinom(0:sumofallAminoAcids["Carbons"],sumofallAminoAcids["Carbons"],0.01)
pdf("CarbonBinomiaDistribution.pdf")
#Plotting Carbon Distribution
plot(0:sumofallAminoAcids["Carbons"],carbonDistribution,type="h",main="Carbon Binomial Probability Distribution", ylab="Probability",xlab="Number of C-13s")
dev.off()
pdf("OxygenBinomialDistrbution.pdf")
oxygenDistribution=dbinom(0:sumofallAminoAcids["Oxygens"],sumofallAminoAcids["Oxygens"],0.1)
#Plotting Oxygen Distribution
plot(0:sumofallAminoAcids["Oxygens"],oxygenDistribution,type="h",main="Oxygen Binomial Probability Distribution", ylab="Probability", xlab="Number of O-18s")
dev.off()
#Taking the outer product of the two distributions to get the joint distribution for all possible combinations of oxygen and carbon atoms
combinedMatrix=outer(carbonDistribution,oxygenDistribution,FUN="*")
#Total Mass of Carbon atoms in the input peptide can also vary, the variation can be found out as follows:
carbonMasses=seq(12*sumofallAminoAcids["Carbons"],13*sumofallAminoAcids["Carbons"],by=1)
oxygenMasses=seq(16*sumofallAminoAcids["Oxygens"],18*sumofallAminoAcids["Oxygens"],by=2)
#Taking outer sum of the total possible carbon and oxygen Masses
combinedMasses=outer(carbonMasses,oxygenMasses,FUN="+")
#Adding masses for Hydrogen, Nitrogen and Sulphur
massToAdd=1*sumofallAminoAcids["Hydrogens"]+14*sumofallAminoAcids["Nitrogens"]+32*sumofallAminoAcids["Sulphurs"]
#New adjusted Masses:
combinedMasses=combinedMasses+massToAdd
uniqueMasses=unique(as.numeric(combinedMasses))
print(uniqueMasses)
#The objective now is calculate the probability of occurance of these unique masses from the combinedProbability Distribution
probabilitiesForUniqueMasses=sapply(uniqueMasses,function(mass){
#Getting Logical indexes from the Mass table for using with the probability Distribution table
logicalIndexesForDistribution=(combinedMasses==mass);
massProbabilities=combinedMatrix[logicalIndexesForDistribution]
#Returing total probabilty for each mass
return(sum(massProbabilities))
});
print("Ordered Probabilties for Masses:")
print(probabilitiesForUniqueMasses)
pdf("Problem1AFigure.pdf")
plot(uniqueMasses,probabilitiesForUniqueMasses,type='h',xlab="Mass",ylab="Probability of Occurance",main="Mass Spectrum for Part A")
sink("Problem1AIsotopeProfile.txt")
print("Isotope Profile for 5 lowest masses P0,P1,...P4");
print(probabilitiesForUniqueMasses[1:5])
unlink("Problem1AIsotopeProfile.txt")
dev.off()
#Part B of Problem 1
#For the case of Deuterium having 0.6 percent of hydrogens replaced is similar to having a relative abundance of 0.6 for deuterium
hydrogenDistribution=dbinom(0:sumofallAminoAcids["Hydrogens"],sumofallAminoAcids["Hydrogens"],0.6)
#Plotting Hydrogen Distribution
pdf("HydrogenBinomialDistribution.pdf")
plot(0:sumofallAminoAcids["Hydrogens"],hydrogenDistribution,type="h",main="Hydrogen Binomial Probability Distribution", ylab="Probability",xlab="Number of H-2s")
dev.off()
#Taking the outerproduct of hydrogenDistribution with combinedMatrix to get the joint distribution for hydrogen, oxygen and carbon
completeMatrix=outer(combinedMatrix,hydrogenDistribution,FUN="*")
#Similarly the distribution of masses can be found out like in the previous case
#Taking outer sum of the total possible carbon and oxygen Masses
combinedMasses=outer(carbonMasses,oxygenMasses,FUN="+")
#Computing Hydrogen Masses
hydrogenMasses=seq(1*sumofallAminoAcids["Hydrogens"],2*sumofallAminoAcids["Hydrogens"],by=1)
#Taking the outer sum of above matrix with hydrogen masses
completeMasses=outer(combinedMasses,hydrogenMasses,FUN="+")
#Adding masses for Nitrogen and Sulphur
massToAdd=14*sumofallAminoAcids["Nitrogens"]+32*sumofallAminoAcids["Sulphurs"]
#New adjusted Masses:
completeMasses=completeMasses+massToAdd
#Unique Masses:
uniqueCompleteMasses=unique(as.numeric(completeMasses))
#Calculating summed probabilities
probabilitiesForUniqueCompleteMasses=sapply(uniqueCompleteMasses, function(mass){
#Getting Logical indexes from the Mass table for using with the probability Distribution table
logicalIndexesForDistribution=(completeMasses==mass);
massProbabilities=completeMatrix[logicalIndexesForDistribution]
#Returing total probabilty for each mass
return(sum(massProbabilities))
});
#print("Ordered Probabilties for Masses after including Hydrogen:")
#print(probabilitiesForUniqueCompleteMasses)
pdf("Problem1BFigure.pdf")
plot(uniqueCompleteMasses,probabilitiesForUniqueCompleteMasses,type='h',xlab="Mass",ylab="Probability of Occurance",main="Mass Spectrum for Part B")
sink("Problem1BIsotopeProfile.txt")
print("Isotope Profile for 5 lowest masses P0,P1,...P4");
print(probabilitiesForUniqueCompleteMasses[1:5])
sink()
unlink("Problem1AIsotopeProfile.txt")
dev.off()
| /HW4Script.R | no_license | kanishkasthana/CSE283_HW4 | R | false | false | 6,579 | r | #This Script was written by Kanishk Asthana kasthana@eng.ucsd.edu.
#The only thing you need to do is change the input peptide and everything else will be calculated accordingly
input="SLAMMER"
#Source of data: http://www.bmrb.wisc.edu/ref_info/aadata.dat
atomdata=read.csv("atomicfrequencies.txt", header=FALSE)
#Extracting Relevant Data
atomdata=atomdata[,c(1,3,12,13,14,15,16)]
#Labeling Columns
names(atomdata)=c("Symbol","Name","Carbons","Hydrogens","Nitrogens","Oxygens","Sulphurs")
#Converting NA values in Sulphur to Zeros
atomdata$Sulphurs[is.na(atomdata$Sulphurs)]=0
#Converting Data types to Numeric
atomdata$Nitrogens=as.numeric(atomdata$Nitrogens)
atomdata$Oxygens=as.numeric(atomdata$Oxygens)
#Generating Vector of Amino acid elements
aminoAcidVector=unlist(strsplit(input,""));
#All data gives a table of number of atoms in each Amino Acid for input
alldata=sapply(aminoAcidVector,function(aminoAcid){
data=unlist(data.frame(atomdata[atomdata$Symbol==aminoAcid,c(3,4,5,6,7)]))
return(data)
});
print(alldata)
#Sum of Atomic Composition of all Amino Acids
sumofallAminoAcids=rowSums(alldata)
#Calculating number of peptide bonds
numberofPeptideBonds=nchar(input)-1
#Number of Hydrogens to Subtract
hydrogenstoSubtract=2*numberofPeptideBonds
#Number of Oxygens to Subtract
oxygenstoSubtract=numberofPeptideBonds
#Correcting Values
sumofallAminoAcids["Hydrogens"]=sumofallAminoAcids["Hydrogens"]-hydrogenstoSubtract
sumofallAminoAcids["Oxygens"]=sumofallAminoAcids["Oxygens"]-oxygenstoSubtract
#Printing values
print(sumofallAminoAcids)
#Part A of Problem 1
#Now that we know the relative abundance of the isotope of carbon is 0.01 we can do a biomial expansion
#to get the different probabilities of getting different combinations of number of c-12 and c-13 in amino acids
carbonDistribution=dbinom(0:sumofallAminoAcids["Carbons"],sumofallAminoAcids["Carbons"],0.01)
pdf("CarbonBinomiaDistribution.pdf")
#Plotting Carbon Distribution
plot(0:sumofallAminoAcids["Carbons"],carbonDistribution,type="h",main="Carbon Binomial Probability Distribution", ylab="Probability",xlab="Number of C-13s")
dev.off()
pdf("OxygenBinomialDistrbution.pdf")
oxygenDistribution=dbinom(0:sumofallAminoAcids["Oxygens"],sumofallAminoAcids["Oxygens"],0.1)
#Plotting Oxygen Distribution
plot(0:sumofallAminoAcids["Oxygens"],oxygenDistribution,type="h",main="Oxygen Binomial Probability Distribution", ylab="Probability", xlab="Number of O-18s")
dev.off()
#Taking the outer product of the two distributions to get the joint distribution for all possible combinations of oxygen and carbon atoms
combinedMatrix=outer(carbonDistribution,oxygenDistribution,FUN="*")
#Total Mass of Carbon atoms in the input peptide can also vary, the variation can be found out as follows:
carbonMasses=seq(12*sumofallAminoAcids["Carbons"],13*sumofallAminoAcids["Carbons"],by=1)
oxygenMasses=seq(16*sumofallAminoAcids["Oxygens"],18*sumofallAminoAcids["Oxygens"],by=2)
#Taking outer sum of the total possible carbon and oxygen Masses
combinedMasses=outer(carbonMasses,oxygenMasses,FUN="+")
#Adding masses for Hydrogen, Nitrogen and Sulphur
massToAdd=1*sumofallAminoAcids["Hydrogens"]+14*sumofallAminoAcids["Nitrogens"]+32*sumofallAminoAcids["Sulphurs"]
#New adjusted Masses:
combinedMasses=combinedMasses+massToAdd
uniqueMasses=unique(as.numeric(combinedMasses))
print(uniqueMasses)
#The objective now is calculate the probability of occurance of these unique masses from the combinedProbability Distribution
probabilitiesForUniqueMasses=sapply(uniqueMasses,function(mass){
#Getting Logical indexes from the Mass table for using with the probability Distribution table
logicalIndexesForDistribution=(combinedMasses==mass);
massProbabilities=combinedMatrix[logicalIndexesForDistribution]
#Returing total probabilty for each mass
return(sum(massProbabilities))
});
print("Ordered Probabilties for Masses:")
print(probabilitiesForUniqueMasses)
pdf("Problem1AFigure.pdf")
plot(uniqueMasses,probabilitiesForUniqueMasses,type='h',xlab="Mass",ylab="Probability of Occurance",main="Mass Spectrum for Part A")
sink("Problem1AIsotopeProfile.txt")
print("Isotope Profile for 5 lowest masses P0,P1,...P4");
print(probabilitiesForUniqueMasses[1:5])
unlink("Problem1AIsotopeProfile.txt")
dev.off()
#Part B of Problem 1
#For the case of Deuterium having 0.6 percent of hydrogens replaced is similar to having a relative abundance of 0.6 for deuterium
hydrogenDistribution=dbinom(0:sumofallAminoAcids["Hydrogens"],sumofallAminoAcids["Hydrogens"],0.6)
#Plotting Hydrogen Distribution
pdf("HydrogenBinomialDistribution.pdf")
plot(0:sumofallAminoAcids["Hydrogens"],hydrogenDistribution,type="h",main="Hydrogen Binomial Probability Distribution", ylab="Probability",xlab="Number of H-2s")
dev.off()
#Taking the outerproduct of hydrogenDistribution with combinedMatrix to get the joint distribution for hydrogen, oxygen and carbon
completeMatrix=outer(combinedMatrix,hydrogenDistribution,FUN="*")
#Similarly the distribution of masses can be found out like in the previous case
#Taking outer sum of the total possible carbon and oxygen Masses
combinedMasses=outer(carbonMasses,oxygenMasses,FUN="+")
#Computing Hydrogen Masses
hydrogenMasses=seq(1*sumofallAminoAcids["Hydrogens"],2*sumofallAminoAcids["Hydrogens"],by=1)
#Taking the outer sum of above matrix with hydrogen masses
completeMasses=outer(combinedMasses,hydrogenMasses,FUN="+")
#Adding masses for Nitrogen and Sulphur
massToAdd=14*sumofallAminoAcids["Nitrogens"]+32*sumofallAminoAcids["Sulphurs"]
#New adjusted Masses:
completeMasses=completeMasses+massToAdd
#Unique Masses:
uniqueCompleteMasses=unique(as.numeric(completeMasses))
#Calculating summed probabilities
probabilitiesForUniqueCompleteMasses=sapply(uniqueCompleteMasses, function(mass){
#Getting Logical indexes from the Mass table for using with the probability Distribution table
logicalIndexesForDistribution=(completeMasses==mass);
massProbabilities=completeMatrix[logicalIndexesForDistribution]
#Returing total probabilty for each mass
return(sum(massProbabilities))
});
#print("Ordered Probabilties for Masses after including Hydrogen:")
#print(probabilitiesForUniqueCompleteMasses)
pdf("Problem1BFigure.pdf")
plot(uniqueCompleteMasses,probabilitiesForUniqueCompleteMasses,type='h',xlab="Mass",ylab="Probability of Occurance",main="Mass Spectrum for Part B")
sink("Problem1BIsotopeProfile.txt")
print("Isotope Profile for 5 lowest masses P0,P1,...P4");
print(probabilitiesForUniqueCompleteMasses[1:5])
sink()
unlink("Problem1AIsotopeProfile.txt")
dev.off()
|
##
require (ggplot2)
library(dplyr)
barColor <- "#8a0303"
##################################################################
xxx.limits <- c(0, 20)
xxx.breaks <- seq(0, 30, by = 2)
dft.limits <- c(0, 250)
dft.breaks <- seq(0, 250, by = 10)
##################################################################
kandydaci <- read.csv("kandydaci_sejm_2019_f4.csv", sep = ';', header=T, na.string="NA");
kk <- kandydaci
nrow(kandydaci)
tit <- "2019 elections: PiS/PO/PSL/SLD"
ggplot(kk, aes(x=as.factor(nrk), y=glosy, fill=as.factor(nrk))) +
geom_boxplot() +
ggtitle(tit, subtitle="PO/PiS/SLD/PSL/") +
ylab("votes") +
## https://stackoverflow.com/questions/11724311/how-to-add-a-ggplot2-subtitle-with-different-size-and-colour
theme(plot.subtitle=element_text(size=8)) +
xlab("list number");
kk <- filter (kandydaci, glosy < 125000)
nrow(kk)
tit <- "2019 elections"
ggplot(kk, aes(x=as.factor(nrk), y=glosy, fill=as.factor(nrk))) +
geom_boxplot() +
ggtitle(tit, subtitle="PO/PiS/SLD/PSL/ votes < 125000") +
ylab("votes") +
## https://stackoverflow.com/questions/11724311/how-to-add-a-ggplot2-subtitle-with-different-size-and-colour
theme(plot.subtitle=element_text(size=8)) +
xlab("list number");
kk <- filter(kandydaci, ((komitet=="PO" | komitet=="PiS") & glosy < 125000))
tit <- "2019 elections"
ggplot(kk, aes(x=as.factor(nrk), y=glosy, fill=as.factor(nrk))) +
geom_boxplot() +
ggtitle(tit, subtitle="PO/PiS votes < 125000") +
ylab("votes") +
## https://stackoverflow.com/questions/11724311/how-to-add-a-ggplot2-subtitle-with-different-size-and-colour
theme(plot.subtitle=element_text(size=8)) +
xlab("list number");
kk <- filter (kandydaci, okr == 25 | okr == 26)
tit <- "2019 elections"
ggplot(kk, aes(x=as.factor(nrk), y=glosy, fill=as.factor(nrk))) +
geom_boxplot() +
ggtitle(tit, subtitle="PO/PiS/SLD/PSL/ okr 26/25 (Pomorskie) votes < 125000") +
ylab("votes") +
## https://stackoverflow.com/questions/11724311/how-to-add-a-ggplot2-subtitle-with-different-size-and-colour
theme(plot.subtitle=element_text(size=8)) +
xlab("list number");
| /koncentracja.R | no_license | hrpunio/Erasmus2020_DS | R | false | false | 2,106 | r | ##
require (ggplot2)
library(dplyr)
barColor <- "#8a0303"
##################################################################
xxx.limits <- c(0, 20)
xxx.breaks <- seq(0, 30, by = 2)
dft.limits <- c(0, 250)
dft.breaks <- seq(0, 250, by = 10)
##################################################################
kandydaci <- read.csv("kandydaci_sejm_2019_f4.csv", sep = ';', header=T, na.string="NA");
kk <- kandydaci
nrow(kandydaci)
tit <- "2019 elections: PiS/PO/PSL/SLD"
ggplot(kk, aes(x=as.factor(nrk), y=glosy, fill=as.factor(nrk))) +
geom_boxplot() +
ggtitle(tit, subtitle="PO/PiS/SLD/PSL/") +
ylab("votes") +
## https://stackoverflow.com/questions/11724311/how-to-add-a-ggplot2-subtitle-with-different-size-and-colour
theme(plot.subtitle=element_text(size=8)) +
xlab("list number");
kk <- filter (kandydaci, glosy < 125000)
nrow(kk)
tit <- "2019 elections"
ggplot(kk, aes(x=as.factor(nrk), y=glosy, fill=as.factor(nrk))) +
geom_boxplot() +
ggtitle(tit, subtitle="PO/PiS/SLD/PSL/ votes < 125000") +
ylab("votes") +
## https://stackoverflow.com/questions/11724311/how-to-add-a-ggplot2-subtitle-with-different-size-and-colour
theme(plot.subtitle=element_text(size=8)) +
xlab("list number");
kk <- filter(kandydaci, ((komitet=="PO" | komitet=="PiS") & glosy < 125000))
tit <- "2019 elections"
ggplot(kk, aes(x=as.factor(nrk), y=glosy, fill=as.factor(nrk))) +
geom_boxplot() +
ggtitle(tit, subtitle="PO/PiS votes < 125000") +
ylab("votes") +
## https://stackoverflow.com/questions/11724311/how-to-add-a-ggplot2-subtitle-with-different-size-and-colour
theme(plot.subtitle=element_text(size=8)) +
xlab("list number");
kk <- filter (kandydaci, okr == 25 | okr == 26)
tit <- "2019 elections"
ggplot(kk, aes(x=as.factor(nrk), y=glosy, fill=as.factor(nrk))) +
geom_boxplot() +
ggtitle(tit, subtitle="PO/PiS/SLD/PSL/ okr 26/25 (Pomorskie) votes < 125000") +
ylab("votes") +
## https://stackoverflow.com/questions/11724311/how-to-add-a-ggplot2-subtitle-with-different-size-and-colour
theme(plot.subtitle=element_text(size=8)) +
xlab("list number");
|
test_that("zirf_fit works on matrices", {
sim_dat <- gen_zip(100, beta = c(.1, -.1), xi = c(-.5, .1))
x <- sim_dat$x
z <- sim_dat$z
y <- sim_dat$y
zirf_mat <- suppressWarnings(zirf_fit(x, z, y, rounds = 10, mtry = 1))
expect_match(class(zirf_mat), "zirf_fit")
})
test_that("zirf_fit works on data.frame", {
sim_dat <- gen_zip(100, beta = c(.1, -.1), xi = c(-.5, .1))
x <- as.data.frame(sim_dat$x)
z <- as.data.frame(sim_dat$z)
y <- sim_dat$y
zirf_df <- suppressWarnings(zirf_fit(x, z, y, rounds = 10, mtry = 1))
expect_match(class(zirf_df), "zirf_fit")
})
test_that("zirf_fit works when column names start with numbers (matrices)", {
sim_dat <- gen_zip(100, beta = c(.1, -.1), xi = c(-.5, .1))
x <- sim_dat$x
colnames(x) <- c("34Agg")
z <- sim_dat$z
y <- sim_dat$y
zirf_mat <- suppressWarnings(zirf_fit(x, z, y, rounds = 10, mtry = 1))
expect_match(class(zirf_mat), "zirf_fit")
})
test_that("zirf_fit works when column names start with numbers (data.frames)", {
sim_dat <- gen_zip(100, beta = c(.1, -.1), xi = c(-.5, .1))
x <- sim_dat$x
x <- as.data.frame(x)
names(x) <- c("34Agg")
z <- as.data.frame(sim_dat$z)
y <- sim_dat$y
zirf_df <- suppressWarnings(zirf_fit(x, z, y, rounds = 10, mtry = 1))
expect_match(class(zirf_df), "zirf_fit")
})
test_that("zirf_fit fails when class(x) and class(z) don't match", {
sim_dat <- gen_zip(100, beta = c(.1, -.1), xi = c(-.5, .1))
x <- sim_dat$x
names(x) <- c("34Agg")
z <- as.data.frame(sim_dat$z)
y <- sim_dat$y
zirf_df <- tryCatch(suppressWarnings(zirf_fit(x, z, y, rounds = 10, mtry = 1)),
error = function(e) "correct error")
expect_match(zirf_df, "correct error")
})
test_that("zirf_fit fails when class(x) and class(z) don't match", {
sim_dat <- gen_zip(100, beta = c(.1, -.1), xi = c(-.5, .1))
x <- sim_dat$x
names(x) <- c("34Agg")
z <- as.data.frame(sim_dat$z)
y <- sim_dat$y
zirf_df <- tryCatch(suppressWarnings(zirf_fit(x, z, y, rounds = 10, mtry = 1)),
error = function(e) "correct error")
expect_match(zirf_df, "correct error")
})
sim_dat <- gen_zip(100, beta = c(.1, -.1), xi = c(-.5, .1))
x <- sim_dat$x
colnames(x) <- c("34Agg")
x <- sim_dat$x
z <- sim_dat$z
y <- sim_dat$y
y <- as.data.frame(y)
sim_dat <- gen_zip(100, beta = c(.1, -.1), xi = c(-.5, .1))
newx <- sim_dat$x
newz <- sim_dat$z
zirf_df <- suppressWarnings(zirf_fit(x, z, y, rounds = 10, mtry = 1, newx = newx,
newz = newz))
sim_dat <- gen_zip(100, beta = c(.1, -.1, 0, .1, 0, -.1, 0), xi = c(-.5, .1))
x <- t(sim_dat$x)
z <- sim_dat$z
count_mat <- matrix(rpois(6*100, 1), 6, 100)
rownames(count_mat) <- paste0("V", 1:6)
ff <- zirf_genie3(exprMatrix = x, countMatrix = count_mat, z = z, nCores = 2,
regulators = c("V1", "V2"), targets = c("V1", "V2", "V3", "V4"))
ff <- zirf_genie3(exprMatrix = x, countMatrix = count_mat, z = z, nCores = 2)
#to do:
#write function that computes all zirf models (make sure it uses parallelization
#write functions that incorporate SCENIC functions
#write SCENIC vignette
| /tests/testthat/test-mat_test.R | no_license | daniel-conn17/scRNAzirf | R | false | false | 3,117 | r | test_that("zirf_fit works on matrices", {
sim_dat <- gen_zip(100, beta = c(.1, -.1), xi = c(-.5, .1))
x <- sim_dat$x
z <- sim_dat$z
y <- sim_dat$y
zirf_mat <- suppressWarnings(zirf_fit(x, z, y, rounds = 10, mtry = 1))
expect_match(class(zirf_mat), "zirf_fit")
})
test_that("zirf_fit works on data.frame", {
sim_dat <- gen_zip(100, beta = c(.1, -.1), xi = c(-.5, .1))
x <- as.data.frame(sim_dat$x)
z <- as.data.frame(sim_dat$z)
y <- sim_dat$y
zirf_df <- suppressWarnings(zirf_fit(x, z, y, rounds = 10, mtry = 1))
expect_match(class(zirf_df), "zirf_fit")
})
test_that("zirf_fit works when column names start with numbers (matrices)", {
sim_dat <- gen_zip(100, beta = c(.1, -.1), xi = c(-.5, .1))
x <- sim_dat$x
colnames(x) <- c("34Agg")
z <- sim_dat$z
y <- sim_dat$y
zirf_mat <- suppressWarnings(zirf_fit(x, z, y, rounds = 10, mtry = 1))
expect_match(class(zirf_mat), "zirf_fit")
})
test_that("zirf_fit works when column names start with numbers (data.frames)", {
sim_dat <- gen_zip(100, beta = c(.1, -.1), xi = c(-.5, .1))
x <- sim_dat$x
x <- as.data.frame(x)
names(x) <- c("34Agg")
z <- as.data.frame(sim_dat$z)
y <- sim_dat$y
zirf_df <- suppressWarnings(zirf_fit(x, z, y, rounds = 10, mtry = 1))
expect_match(class(zirf_df), "zirf_fit")
})
test_that("zirf_fit fails when class(x) and class(z) don't match", {
sim_dat <- gen_zip(100, beta = c(.1, -.1), xi = c(-.5, .1))
x <- sim_dat$x
names(x) <- c("34Agg")
z <- as.data.frame(sim_dat$z)
y <- sim_dat$y
zirf_df <- tryCatch(suppressWarnings(zirf_fit(x, z, y, rounds = 10, mtry = 1)),
error = function(e) "correct error")
expect_match(zirf_df, "correct error")
})
test_that("zirf_fit fails when class(x) and class(z) don't match", {
sim_dat <- gen_zip(100, beta = c(.1, -.1), xi = c(-.5, .1))
x <- sim_dat$x
names(x) <- c("34Agg")
z <- as.data.frame(sim_dat$z)
y <- sim_dat$y
zirf_df <- tryCatch(suppressWarnings(zirf_fit(x, z, y, rounds = 10, mtry = 1)),
error = function(e) "correct error")
expect_match(zirf_df, "correct error")
})
sim_dat <- gen_zip(100, beta = c(.1, -.1), xi = c(-.5, .1))
x <- sim_dat$x
colnames(x) <- c("34Agg")
x <- sim_dat$x
z <- sim_dat$z
y <- sim_dat$y
y <- as.data.frame(y)
sim_dat <- gen_zip(100, beta = c(.1, -.1), xi = c(-.5, .1))
newx <- sim_dat$x
newz <- sim_dat$z
zirf_df <- suppressWarnings(zirf_fit(x, z, y, rounds = 10, mtry = 1, newx = newx,
newz = newz))
sim_dat <- gen_zip(100, beta = c(.1, -.1, 0, .1, 0, -.1, 0), xi = c(-.5, .1))
x <- t(sim_dat$x)
z <- sim_dat$z
count_mat <- matrix(rpois(6*100, 1), 6, 100)
rownames(count_mat) <- paste0("V", 1:6)
ff <- zirf_genie3(exprMatrix = x, countMatrix = count_mat, z = z, nCores = 2,
regulators = c("V1", "V2"), targets = c("V1", "V2", "V3", "V4"))
ff <- zirf_genie3(exprMatrix = x, countMatrix = count_mat, z = z, nCores = 2)
#to do:
#write function that computes all zirf models (make sure it uses parallelization
#write functions that incorporate SCENIC functions
#write SCENIC vignette
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimate_variable_importance.R
\name{estimate_variable_importance}
\alias{estimate_variable_importance}
\title{Estimate variable importance}
\usage{
estimate_variable_importance(modelPath, methods = c("anova.test", "auc",
"chi.squared", "gain.ratio", "information.gain", "kruskal.test",
"ranger.impurity", "ranger.permutation"), nVarToPlot = 20,
nIter = 10, nCores = 1L)
}
\arguments{
\item{modelPath}{the path of the RData file where the model is saved}
\item{methods}{character vector. The metric(s) used to estimate variable
importance. The available choices are: \code{anova.test}, \code{auc}, \code{chi.squared}
(package \link{FSelector}), \code{gain.ratio} (\link{FSelector}), \code{information.gain}
(\link{FSelector}), \code{kruskal.test}, \code{ranger.impurity} (\link{ranger}) and/or
\code{ranger.permutation} (\link{ranger})}
\item{nVarToPlot}{numeric. The number of most important variables to
graphically represent}
\item{nIter}{integer. If \code{ranger.impurity} or \code{ranger.permutation} is used as
importance metrics, the number of times the estimate is repeated.}
\item{nCores}{integer.If \code{ranger.impurity} or \code{ranger.permutation} is used as
importance metrics and \code{nIter} larger than 1, the number of CPUs used to
perform the computations.}
}
\value{
a list with two elements: \code{varImp}: the table with the importance
measure(s) for all variables and \code{varImpPlot} a ggplot object representing
the importance of the most important variables.
}
\description{
This function allows to estimate the importance of individual variables in a
model unit of a diagnostic tool.
}
\details{
This functions estimates the variable importance of all the variables
included in the investigated model using the importance metric(s) specified
in the \code{method} argument. In this regard, the function is a wrapper around
the function \link[=mlr]{generateFilterValuesData} from the \code{mlr} package with the
possibility to run multiple iterations for the metrics \code{ranger.impurity} and
\code{ranger.permutation} potentially in parallel (using \code{nCores} larger than 1).
The second step performed by this function corresponds to the production of a
plot representing the importance of the most important variables. The
selection of the metrics is performed by ranking the importance metric values
and the number of variables to be represented is controlled by the argument
\code{nVarToPlot}. If several importance metrics are used, the selection is made
on the average rank of the variables over the different metrics.
}
\seealso{
\link[=mlr]{generateFilterValuesData}
}
| /man/estimate_variable_importance.Rd | no_license | CedricMondy/ecodiag | R | false | true | 2,696 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimate_variable_importance.R
\name{estimate_variable_importance}
\alias{estimate_variable_importance}
\title{Estimate variable importance}
\usage{
estimate_variable_importance(modelPath, methods = c("anova.test", "auc",
"chi.squared", "gain.ratio", "information.gain", "kruskal.test",
"ranger.impurity", "ranger.permutation"), nVarToPlot = 20,
nIter = 10, nCores = 1L)
}
\arguments{
\item{modelPath}{the path of the RData file where the model is saved}
\item{methods}{character vector. The metric(s) used to estimate variable
importance. The available choices are: \code{anova.test}, \code{auc}, \code{chi.squared}
(package \link{FSelector}), \code{gain.ratio} (\link{FSelector}), \code{information.gain}
(\link{FSelector}), \code{kruskal.test}, \code{ranger.impurity} (\link{ranger}) and/or
\code{ranger.permutation} (\link{ranger})}
\item{nVarToPlot}{numeric. The number of most important variables to
graphically represent}
\item{nIter}{integer. If \code{ranger.impurity} or \code{ranger.permutation} is used as
importance metrics, the number of times the estimate is repeated.}
\item{nCores}{integer.If \code{ranger.impurity} or \code{ranger.permutation} is used as
importance metrics and \code{nIter} larger than 1, the number of CPUs used to
perform the computations.}
}
\value{
a list with two elements: \code{varImp}: the table with the importance
measure(s) for all variables and \code{varImpPlot} a ggplot object representing
the importance of the most important variables.
}
\description{
This function allows to estimate the importance of individual variables in a
model unit of a diagnostic tool.
}
\details{
This functions estimates the variable importance of all the variables
included in the investigated model using the importance metric(s) specified
in the \code{method} argument. In this regard, the function is a wrapper around
the function \link[=mlr]{generateFilterValuesData} from the \code{mlr} package with the
possibility to run multiple iterations for the metrics \code{ranger.impurity} and
\code{ranger.permutation} potentially in parallel (using \code{nCores} larger than 1).
The second step performed by this function corresponds to the production of a
plot representing the importance of the most important variables. The
selection of the metrics is performed by ranking the importance metric values
and the number of variables to be represented is controlled by the argument
\code{nVarToPlot}. If several importance metrics are used, the selection is made
on the average rank of the variables over the different metrics.
}
\seealso{
\link[=mlr]{generateFilterValuesData}
}
|
\name{segment}
\alias{segment}
\title{Returns segment data from dendro object.}
\usage{
segment(x)
}
\arguments{
\item{x}{dendro object}
}
\description{
Returns segment data from dendro object.
}
| /man/segment.Rd | no_license | kohske/ggdendro | R | false | false | 203 | rd | \name{segment}
\alias{segment}
\title{Returns segment data from dendro object.}
\usage{
segment(x)
}
\arguments{
\item{x}{dendro object}
}
\description{
Returns segment data from dendro object.
}
|
#' Download raw prescription data for specified county (by state and county names) into R
#'
#' Data from from non-contiguous states not yet processed and available. This raw data only includes data from pharmacies and practitioners and the drugs Hydrocodne and Oxycodone.
#'
#' @param county Filter the data to only this county (e.g. 'Mingo')
#' @param state Filter the data to county within this state (e.g. 'WV')
#' @param key Key needed to make query successful
#' @seealso \url{https://www.washingtonpost.com/graphics/2019/investigations/dea-pain-pill-database/#download-resources}
#'
#' @family raw data
#' @examples \donttest{
#' library(arcos)
#'
#' mingo_wv <- county_raw(county = "Mingo", state="WV", key="WaPo")
#'
#' head(mingo_wv)
#' }
#' @export
county_raw <- function(county = "Mingo", state = "WV", key="WaPo") {
url <- "https://arcos-api.ext.nile.works/v1/county_data"
if (!missing(county)) {
county_name <- str_to_upper(county)
county_name <- str_to_upper(county)
#county_name <- gsub("-", " ", county_name)
county_name <- gsub("%20", " ", county_name)
url <- param_set(url, key = "county", value = county_name)
}
if (!missing(state)) {
url <- param_set(url, key = "state", value = state)
}
if (!missing(key)) {
url <- param_set(url, key = "key", value = key)
#df <- vroom(url)
url <- gsub(" ", "%20", url)
df <- fromJSON(url)
return(df)
} else {
message("Error: API key needed.")
}
}
#' Download raw prescription data for specified county (by county FIPS code) into R
#'
#' Data from from non-contiguous states not yet processed and available. This raw data only includes data from pharmacies and practitioners and the drugs Hydrocodne and Oxycodone.
#'
#' @param fips Filter the data to only this county (e.g. ‘01001’ for Autauga, Alabama)
#' @param key Key needed to make query successful
#' @seealso \url{https://www.washingtonpost.com/graphics/2019/investigations/dea-pain-pill-database/#download-resources}
#'
#' @family raw data
#' @examples \donttest{
#' library(arcos)
#'
#' autauga_al <- county_raw_fips(fips="01001", key="WaPo")
#'
#' head(autauga_al)
#' }
#' @export
county_raw_fips <- function(fips="01001", key="WaPo") {
url <- "https://arcos-api.ext.nile.works/v1/county_fips_data"
if (missing(key)) {
message("Error: API key needed.")
} else {
url <- param_set(url, key = "key", value = key)
if (!missing(fips)) {
url <- param_set(url, key = "fips", value = fips)
#df <- vroom(url)
url <- gsub(" ", "%20", url)
df <- fromJSON(url)
return(df)
} else {
message("Error: No FIPS code detected")
}
}
}
#' Download raw prescription data for specified pharmacy (by BUYER_DEA_NO) into R
#'
#' Data from from non-contiguous states not yet processed and available. This raw data only includes data from pharmacies and practitioners and the drugs Hydrocodne and Oxycodone.
#'
#' @param buyer_dea_no Filter the data to only this pharmacy (e.g. ‘AB0454176’)
#' @param key Key needed to make query successful
#' @seealso \url{https://www.washingtonpost.com/graphics/2019/investigations/dea-pain-pill-database/#download-resources}
#'
#' @family raw data
#' @examples \donttest{
#' library(arcos)
#'
#' solo_pharm <- pharmacy_raw(buyer_dea_no="AB0454176", key="WaPo")
#'
#' head(solo_pharm)
#' }
#' @export
pharmacy_raw <- function(buyer_dea_no="AB0454176", key="WaPo") {
url <- "https://arcos-api.ext.nile.works/v1/pharmacy_data"
if (missing(key)) {
message("Error: API key needed.")
} else {
url <- param_set(url, key = "key", value = key)
if (!missing(buyer_dea_no)) {
url <- param_set(url, key = "buyer_dea_no", value = buyer_dea_no)
#df <- vroom(url)
url <- gsub(" ", "%20", url)
df <- fromJSON(url)
return(df)
} else {
message("Error: No BUYER_DEA_NO id detected")
}
}
}
#' Download raw prescription data for individual drugs and business type by county into R
#'
#' Data from from non-contiguous states not yet processed and available.
#'
#' @param county Filter the data to only this county (e.g. 'Mingo')
#' @param state Filter the data to county within this state (e.g. 'WV')
#' @param drug Filter the data to one of 14 available drugs in the database (e.g. 'OXYCODONE')
#' @param buyer_bus_act If provided, filter the data to one of businesses in BUYER_BUS_ACT (e.g. 'CHAIN PHARMACY', 'HOSP/CLINIC')
#' @param key Key needed to make query successful
#' @seealso \url{https://www.washingtonpost.com/graphics/2019/investigations/dea-pain-pill-database/#download-resources}
#'
#' @family raw data
#' @examples \donttest{
#' library(arcos)
#'
#' bronx <- drug_county_biz(drug="Fentanyl", county = "Laurens", state = "GA",
#' buyer_bus_act = "Chain Retail", key="WaPo")
#'
#' head(bronx)
#' }
#' @export
drug_county_biz <- function(drug="Fentanyl", county = "Laurens", state = "GA", buyer_bus_act = "Chain Retail", key="WaPo") {
url <- "https://arcos-api.ext.nile.works/v1/county_data_drug"
if (!missing(county)) {
county_name <- str_to_upper(county)
county_name <- str_to_upper(county)
#county_name <- gsub("-", " ", county_name)
county_name <- gsub("%20", " ", county_name)
url <- param_set(url, key = "county", value = county_name)
}
if (!missing(state)) {
url <- param_set(url, key = "state", value = state)
}
if (!missing(drug)) {
url <- param_set(url, key = "drug", value = drug)
}
if (!missing(key)) {
url <- param_set(url, key = "key", value = key)
#df <- vroom(url)
url <- gsub(" ", "%20", url)
df <- fromJSON(url)
return(df)
} else {
message("Error: API key needed.")
}
}
#' Download raw prescription data for individual drugs and business type by county FIPS code into R
#'
#' Data from from non-contiguous states not yet processed and available.
#'
#' @param fips Filter the data to only this county (e.g. ‘01001’ for Autauga, Alabama)
#' @param drug Filter the data to one of 14 available drugs in the database (e.g. 'OXYCODONE')
#' @param buyer_bus_act If provided, filter the data to one of businesses in BUYER_BUS_ACT (e.g. 'CHAIN PHARMACY', 'HOSP/CLINIC')
#' @param key Key needed to make query successful
#' @seealso \url{https://www.washingtonpost.com/graphics/2019/investigations/dea-pain-pill-database/#download-resources}
#'
#' @family raw data
#' @examples \donttest{
#' library(arcos)
#'
#' autauga_al <- drug_fips_biz(drug="Fentanyl", fips="01001", buyer_bus_act="Chain Retail", key="WaPo")
#'
#' head(autauga_al)
#' }
#' @export
drug_fips_biz <- function(drug="Fentanyl", fips="01001", buyer_bus_act="Chain Retail", key="WaPo") {
url <- "https://arcos-api.ext.nile.works/v1/county_fips_data_drug"
if (missing(key)) {
message("Error: API key needed.")
} else {
url <- param_set(url, key = "key", value = key)
if (!missing(fips)) {
url <- param_set(url, key = "fips", value = fips)
#df <- vroom(url)
#url <- gsub(" ", "%20", url)
if (!missing(drug)) {
url <- param_set(url, key = "drug", value = drug)
df <- fromJSON(url)
return(df)
} else {
message("Error: No drug name detected")
}
} else {
message("Error: No FIPS code detected")
}
}
}
#' Get list of business types listed in the BUYER_BUS_ACT in the ARCOS database
#'
#' @param key Key needed to make query successful
#' @seealso \url{https://www.washingtonpost.com/graphics/2019/investigations/dea-pain-pill-database/#download-resources}
#'
#' @family raw data
#' @examples \donttest{
#' library(arcos)
#'
#' bl <- buyer_list(key="WaPo")
#'
#' head(bl)
#' }
#' @export
buyer_list <- function(key="WaPo") {
url <- "https://arcos-api.ext.nile.works/v1/buyer_list"
if (!missing(key)) {
url <- param_set(url, key = "key", value = key)
url <- gsub(" ", "%20", url)
df <- fromJSON(url)
return(df)
} else {
message("Error: API key needed.")
}
}
#' Get list of drugs available in the ARCOS database
#'
#' @param key Key needed to make query successful
#' @seealso \url{https://www.washingtonpost.com/graphics/2019/investigations/dea-pain-pill-database/#download-resources}
#'
#' @family raw data
#' @examples \donttest{
#' library(arcos)
#'
#' np <- drug_list(key="WaPo")
#'
#'np
#' }
#' @export
drug_list <- function(key="WaPo") {
url <- "https://arcos-api.ext.nile.works/v1/drug_list"
if (!missing(key)) {
url <- param_set(url, key = "key", value = key)
url <- gsub(" ", "%20", url)
df <- fromJSON(url)
return(df)
} else {
message("Error: API key needed.")
}
}
#' Get dataframe of counties, states, and fips codes that are represented in the ARCOS data
#'
#' @param key Key needed to make query successful
#' @seealso \url{https://www.washingtonpost.com/graphics/2019/investigations/dea-pain-pill-database/#download-resources}
#'
#' @family raw data
#' @examples \donttest{
#' library(arcos)
#'
#' counties <- county_list(key="WaPo")
#'
#' head(counties)
#' }
#' @export
county_list <- function(key="WaPo") {
url <- "https://arcos-api.ext.nile.works/v1/county_list"
if (!missing(key)) {
url <- param_set(url, key = "key", value = key)
url <- gsub(" ", "%20", url)
df <- fromJSON(url)
return(df)
} else {
message("Error: API key needed.")
}
}
#' Download raw ARCOS data (Warning 130+ GB file)
#'
#' @param key Key needed to make query successful
#' @seealso \url{https://www.washingtonpost.com/graphics/2019/investigations/dea-pain-pill-database/#download-resources}
#'
#' @family raw data
#' @examples \donttest{
#' library(arcos)
#'
#' the_whole_thing <- raw_data(key="WaPo")
#'
#' head(the_whole_thing)
#' }
#' @export
raw_data <- function(key="WaPo") {
url <- "https://arcos-api.ext.nile.works/v1/all_the_data"
if (missing(key)) {
message("Error: API key needed.")
} else {
url <- param_set(url, key = "key", value = key)
df <- vroom(url)
#df <- fromJSON(url)
return(df)
}
}
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Cmd + Shift + B'
# Check Package: 'Cmd + Shift + E'
# Test Package: 'Cmd + Shift + T'
| /R/raw.R | no_license | West-Virginia-BUDA/arcos | R | false | false | 10,325 | r | #' Download raw prescription data for specified county (by state and county names) into R
#'
#' Data from from non-contiguous states not yet processed and available. This raw data only includes data from pharmacies and practitioners and the drugs Hydrocodne and Oxycodone.
#'
#' @param county Filter the data to only this county (e.g. 'Mingo')
#' @param state Filter the data to county within this state (e.g. 'WV')
#' @param key Key needed to make query successful
#' @seealso \url{https://www.washingtonpost.com/graphics/2019/investigations/dea-pain-pill-database/#download-resources}
#'
#' @family raw data
#' @examples \donttest{
#' library(arcos)
#'
#' mingo_wv <- county_raw(county = "Mingo", state="WV", key="WaPo")
#'
#' head(mingo_wv)
#' }
#' @export
county_raw <- function(county = "Mingo", state = "WV", key="WaPo") {
url <- "https://arcos-api.ext.nile.works/v1/county_data"
if (!missing(county)) {
county_name <- str_to_upper(county)
county_name <- str_to_upper(county)
#county_name <- gsub("-", " ", county_name)
county_name <- gsub("%20", " ", county_name)
url <- param_set(url, key = "county", value = county_name)
}
if (!missing(state)) {
url <- param_set(url, key = "state", value = state)
}
if (!missing(key)) {
url <- param_set(url, key = "key", value = key)
#df <- vroom(url)
url <- gsub(" ", "%20", url)
df <- fromJSON(url)
return(df)
} else {
message("Error: API key needed.")
}
}
#' Download raw prescription data for specified county (by county FIPS code) into R
#'
#' Data from from non-contiguous states not yet processed and available. This raw data only includes data from pharmacies and practitioners and the drugs Hydrocodne and Oxycodone.
#'
#' @param fips Filter the data to only this county (e.g. ‘01001’ for Autauga, Alabama)
#' @param key Key needed to make query successful
#' @seealso \url{https://www.washingtonpost.com/graphics/2019/investigations/dea-pain-pill-database/#download-resources}
#'
#' @family raw data
#' @examples \donttest{
#' library(arcos)
#'
#' autauga_al <- county_raw_fips(fips="01001", key="WaPo")
#'
#' head(autauga_al)
#' }
#' @export
county_raw_fips <- function(fips="01001", key="WaPo") {
url <- "https://arcos-api.ext.nile.works/v1/county_fips_data"
if (missing(key)) {
message("Error: API key needed.")
} else {
url <- param_set(url, key = "key", value = key)
if (!missing(fips)) {
url <- param_set(url, key = "fips", value = fips)
#df <- vroom(url)
url <- gsub(" ", "%20", url)
df <- fromJSON(url)
return(df)
} else {
message("Error: No FIPS code detected")
}
}
}
#' Download raw prescription data for specified pharmacy (by BUYER_DEA_NO) into R
#'
#' Data from from non-contiguous states not yet processed and available. This raw data only includes data from pharmacies and practitioners and the drugs Hydrocodne and Oxycodone.
#'
#' @param buyer_dea_no Filter the data to only this pharmacy (e.g. ‘AB0454176’)
#' @param key Key needed to make query successful
#' @seealso \url{https://www.washingtonpost.com/graphics/2019/investigations/dea-pain-pill-database/#download-resources}
#'
#' @family raw data
#' @examples \donttest{
#' library(arcos)
#'
#' solo_pharm <- pharmacy_raw(buyer_dea_no="AB0454176", key="WaPo")
#'
#' head(solo_pharm)
#' }
#' @export
pharmacy_raw <- function(buyer_dea_no="AB0454176", key="WaPo") {
url <- "https://arcos-api.ext.nile.works/v1/pharmacy_data"
if (missing(key)) {
message("Error: API key needed.")
} else {
url <- param_set(url, key = "key", value = key)
if (!missing(buyer_dea_no)) {
url <- param_set(url, key = "buyer_dea_no", value = buyer_dea_no)
#df <- vroom(url)
url <- gsub(" ", "%20", url)
df <- fromJSON(url)
return(df)
} else {
message("Error: No BUYER_DEA_NO id detected")
}
}
}
#' Download raw prescription data for individual drugs and business type by county into R
#'
#' Data from from non-contiguous states not yet processed and available.
#'
#' @param county Filter the data to only this county (e.g. 'Mingo')
#' @param state Filter the data to county within this state (e.g. 'WV')
#' @param drug Filter the data to one of 14 available drugs in the database (e.g. 'OXYCODONE')
#' @param buyer_bus_act If provided, filter the data to one of businesses in BUYER_BUS_ACT (e.g. 'CHAIN PHARMACY', 'HOSP/CLINIC')
#' @param key Key needed to make query successful
#' @seealso \url{https://www.washingtonpost.com/graphics/2019/investigations/dea-pain-pill-database/#download-resources}
#'
#' @family raw data
#' @examples \donttest{
#' library(arcos)
#'
#' bronx <- drug_county_biz(drug="Fentanyl", county = "Laurens", state = "GA",
#' buyer_bus_act = "Chain Retail", key="WaPo")
#'
#' head(bronx)
#' }
#' @export
drug_county_biz <- function(drug="Fentanyl", county = "Laurens", state = "GA", buyer_bus_act = "Chain Retail", key="WaPo") {
url <- "https://arcos-api.ext.nile.works/v1/county_data_drug"
if (!missing(county)) {
county_name <- str_to_upper(county)
county_name <- str_to_upper(county)
#county_name <- gsub("-", " ", county_name)
county_name <- gsub("%20", " ", county_name)
url <- param_set(url, key = "county", value = county_name)
}
if (!missing(state)) {
url <- param_set(url, key = "state", value = state)
}
if (!missing(drug)) {
url <- param_set(url, key = "drug", value = drug)
}
if (!missing(key)) {
url <- param_set(url, key = "key", value = key)
#df <- vroom(url)
url <- gsub(" ", "%20", url)
df <- fromJSON(url)
return(df)
} else {
message("Error: API key needed.")
}
}
#' Download raw prescription data for individual drugs and business type by county FIPS code into R
#'
#' Data from from non-contiguous states not yet processed and available.
#'
#' @param fips Filter the data to only this county (e.g. ‘01001’ for Autauga, Alabama)
#' @param drug Filter the data to one of 14 available drugs in the database (e.g. 'OXYCODONE')
#' @param buyer_bus_act If provided, filter the data to one of businesses in BUYER_BUS_ACT (e.g. 'CHAIN PHARMACY', 'HOSP/CLINIC')
#' @param key Key needed to make query successful
#' @seealso \url{https://www.washingtonpost.com/graphics/2019/investigations/dea-pain-pill-database/#download-resources}
#'
#' @family raw data
#' @examples \donttest{
#' library(arcos)
#'
#' autauga_al <- drug_fips_biz(drug="Fentanyl", fips="01001", buyer_bus_act="Chain Retail", key="WaPo")
#'
#' head(autauga_al)
#' }
#' @export
drug_fips_biz <- function(drug="Fentanyl", fips="01001", buyer_bus_act="Chain Retail", key="WaPo") {
url <- "https://arcos-api.ext.nile.works/v1/county_fips_data_drug"
if (missing(key)) {
message("Error: API key needed.")
} else {
url <- param_set(url, key = "key", value = key)
if (!missing(fips)) {
url <- param_set(url, key = "fips", value = fips)
#df <- vroom(url)
#url <- gsub(" ", "%20", url)
if (!missing(drug)) {
url <- param_set(url, key = "drug", value = drug)
df <- fromJSON(url)
return(df)
} else {
message("Error: No drug name detected")
}
} else {
message("Error: No FIPS code detected")
}
}
}
#' Get list of business types listed in the BUYER_BUS_ACT in the ARCOS database
#'
#' @param key Key needed to make query successful
#' @seealso \url{https://www.washingtonpost.com/graphics/2019/investigations/dea-pain-pill-database/#download-resources}
#'
#' @family raw data
#' @examples \donttest{
#' library(arcos)
#'
#' bl <- buyer_list(key="WaPo")
#'
#' head(bl)
#' }
#' @export
buyer_list <- function(key="WaPo") {
url <- "https://arcos-api.ext.nile.works/v1/buyer_list"
if (!missing(key)) {
url <- param_set(url, key = "key", value = key)
url <- gsub(" ", "%20", url)
df <- fromJSON(url)
return(df)
} else {
message("Error: API key needed.")
}
}
#' Get list of drugs available in the ARCOS database
#'
#' @param key Key needed to make query successful
#' @seealso \url{https://www.washingtonpost.com/graphics/2019/investigations/dea-pain-pill-database/#download-resources}
#'
#' @family raw data
#' @examples \donttest{
#' library(arcos)
#'
#' np <- drug_list(key="WaPo")
#'
#'np
#' }
#' @export
drug_list <- function(key="WaPo") {
url <- "https://arcos-api.ext.nile.works/v1/drug_list"
if (!missing(key)) {
url <- param_set(url, key = "key", value = key)
url <- gsub(" ", "%20", url)
df <- fromJSON(url)
return(df)
} else {
message("Error: API key needed.")
}
}
#' Get dataframe of counties, states, and fips codes that are represented in the ARCOS data
#'
#' @param key Key needed to make query successful
#' @seealso \url{https://www.washingtonpost.com/graphics/2019/investigations/dea-pain-pill-database/#download-resources}
#'
#' @family raw data
#' @examples \donttest{
#' library(arcos)
#'
#' counties <- county_list(key="WaPo")
#'
#' head(counties)
#' }
#' @export
county_list <- function(key="WaPo") {
url <- "https://arcos-api.ext.nile.works/v1/county_list"
if (!missing(key)) {
url <- param_set(url, key = "key", value = key)
url <- gsub(" ", "%20", url)
df <- fromJSON(url)
return(df)
} else {
message("Error: API key needed.")
}
}
#' Download raw ARCOS data (Warning 130+ GB file)
#'
#' @param key Key needed to make query successful
#' @seealso \url{https://www.washingtonpost.com/graphics/2019/investigations/dea-pain-pill-database/#download-resources}
#'
#' @family raw data
#' @examples \donttest{
#' library(arcos)
#'
#' the_whole_thing <- raw_data(key="WaPo")
#'
#' head(the_whole_thing)
#' }
#' @export
raw_data <- function(key="WaPo") {
url <- "https://arcos-api.ext.nile.works/v1/all_the_data"
if (missing(key)) {
message("Error: API key needed.")
} else {
url <- param_set(url, key = "key", value = key)
df <- vroom(url)
#df <- fromJSON(url)
return(df)
}
}
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Cmd + Shift + B'
# Check Package: 'Cmd + Shift + E'
# Test Package: 'Cmd + Shift + T'
|
## power calculation for Andrew Reynolds
rm(list=ls())
library(pipeR)
library(tmvtnorm)
calculatePower <- function(n, diff, sd, rho, N.SIMS = 1000) {
d.cov <- rho * sd ^ 2
sigma <- matrix(c(sd^2,d.cov,d.cov,sd^2),nrow = 2)
pow <- 0
base <- 8.6 # irrelevant, really
for (sim in 1:N.SIMS) {
# treatment group
x <- mvrnorm(n , mu= c(base,base + diff), Sigma = sigma)
# control group
xc <- mvrnorm(n , mu= c(base,base), Sigma = sigma)
if (t.test(x[,1]-x[,2],xc[,1]-xc[,2])$p.val < 0.05) {
pow <- pow + 1
}
}
pow/N.SIMS
}
# x <- rtmvnorm(n, mean=c(base,base + diff),
# sigma=sigma, lower=c(6,0))
# xc <- rtmvnorm(n, mean=c(base,base),
# sigma=sigma, lower=c(6,0))
par(mfrow=c(1,1))
lowest <- 50
highest <- 100
plot('',xlim=c(lowest,highest),ylim=c(0,1),
xlab='N (per group)',ylab='power')
ns <- seq(lowest,highest,1)
ns %>% map(calculatePower,diff = -0.4,sd=1.4, rho = 0.8, N.SIMS=3000) %>%
unlist %>%
( function(x) { c(rep(NA,lowest),x) } ) %>>%
(~ powers) %>%
lines
abline(h=0.8,col='blue')
which(powers > 0.8)
powers[60]
grid()
## how does the power vary with unbalanced design
# calculatePowerUnbalanced <- function(n, nc, diff, sd, rho, N.SIMS = 1000) {
#
# d.cov <- rho * sd ^ 2
# sigma <- matrix(c(sd^2,d.cov,d.cov,sd^2),nrow = 2)
# pow <- 0
# x.m <- vector()
# s.m <- vector()
# base <- 8.6
# for (sim in 1:N.SIMS) {
# x <- mvrnorm(n , mu= c(base,base + diff) ,Sigma = sigma)
# x1 <- x[,1]
# x2 <- x[,2]
# # control group
# xc <- mvrnorm(nc , mu= c(base,base ) ,Sigma = sigma)
# x1c <- xc[,1]
# x2c <- xc[,2]
#
# if (t.test(x1-x2,x1c-x2c)$p.val < 0.05) {
# pow <- pow + 1
# }
# }
# pow/N.SIMS
# }
#
# pows <- vector()
# for (nc in 2:158) {
# nt <- 160 - nc
# pows[nt] <- calculatePowerUnbalanced(n = nt, nc=nc, diff = -0.4,sd=1.4, rho = 0.8, N.SIMS=1000)
# }
# plot(pows,xlab='n treatment',ylab='power')
# abline(h=0.80,col='blue')
| /power.R | no_license | OldMortality/grainsize | R | false | false | 2,009 | r |
## power calculation for Andrew Reynolds
rm(list=ls())
library(pipeR)
library(tmvtnorm)
calculatePower <- function(n, diff, sd, rho, N.SIMS = 1000) {
d.cov <- rho * sd ^ 2
sigma <- matrix(c(sd^2,d.cov,d.cov,sd^2),nrow = 2)
pow <- 0
base <- 8.6 # irrelevant, really
for (sim in 1:N.SIMS) {
# treatment group
x <- mvrnorm(n , mu= c(base,base + diff), Sigma = sigma)
# control group
xc <- mvrnorm(n , mu= c(base,base), Sigma = sigma)
if (t.test(x[,1]-x[,2],xc[,1]-xc[,2])$p.val < 0.05) {
pow <- pow + 1
}
}
pow/N.SIMS
}
# x <- rtmvnorm(n, mean=c(base,base + diff),
# sigma=sigma, lower=c(6,0))
# xc <- rtmvnorm(n, mean=c(base,base),
# sigma=sigma, lower=c(6,0))
par(mfrow=c(1,1))
lowest <- 50
highest <- 100
plot('',xlim=c(lowest,highest),ylim=c(0,1),
xlab='N (per group)',ylab='power')
ns <- seq(lowest,highest,1)
ns %>% map(calculatePower,diff = -0.4,sd=1.4, rho = 0.8, N.SIMS=3000) %>%
unlist %>%
( function(x) { c(rep(NA,lowest),x) } ) %>>%
(~ powers) %>%
lines
abline(h=0.8,col='blue')
which(powers > 0.8)
powers[60]
grid()
## how does the power vary with unbalanced design
# calculatePowerUnbalanced <- function(n, nc, diff, sd, rho, N.SIMS = 1000) {
#
# d.cov <- rho * sd ^ 2
# sigma <- matrix(c(sd^2,d.cov,d.cov,sd^2),nrow = 2)
# pow <- 0
# x.m <- vector()
# s.m <- vector()
# base <- 8.6
# for (sim in 1:N.SIMS) {
# x <- mvrnorm(n , mu= c(base,base + diff) ,Sigma = sigma)
# x1 <- x[,1]
# x2 <- x[,2]
# # control group
# xc <- mvrnorm(nc , mu= c(base,base ) ,Sigma = sigma)
# x1c <- xc[,1]
# x2c <- xc[,2]
#
# if (t.test(x1-x2,x1c-x2c)$p.val < 0.05) {
# pow <- pow + 1
# }
# }
# pow/N.SIMS
# }
#
# pows <- vector()
# for (nc in 2:158) {
# nt <- 160 - nc
# pows[nt] <- calculatePowerUnbalanced(n = nt, nc=nc, diff = -0.4,sd=1.4, rho = 0.8, N.SIMS=1000)
# }
# plot(pows,xlab='n treatment',ylab='power')
# abline(h=0.80,col='blue')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process.R
\name{stratifiedSampling}
\alias{stratifiedSampling}
\title{stratifiedSampling}
\usage{
stratifiedSampling(object = NULL, prime.factor = NULL,
sample.size = NULL, name = NULL)
}
\arguments{
\item{object}{Cookie object}
\item{prime.factor}{The unique prime factor.}
\item{sample.size}{Sample size}
\item{name}{A name for this run. e.g. test1}
}
\description{
Stratified Sampling from current dataset
}
| /man/stratifiedSampling.Rd | no_license | WilsonImmunologyLab/Cookie | R | false | true | 494 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process.R
\name{stratifiedSampling}
\alias{stratifiedSampling}
\title{stratifiedSampling}
\usage{
stratifiedSampling(object = NULL, prime.factor = NULL,
sample.size = NULL, name = NULL)
}
\arguments{
\item{object}{Cookie object}
\item{prime.factor}{The unique prime factor.}
\item{sample.size}{Sample size}
\item{name}{A name for this run. e.g. test1}
}
\description{
Stratified Sampling from current dataset
}
|
## The makeCacheMatrix is used to capture the inverse of a set of numbers in
## a matrix
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse of the matrix
## 4. get the value of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get,
setinverse = setinverse,
getinverse = getinverse)
}
## The cacheSolve function allows you to view the inverse of the
## values of the matrix.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- inverse(data, ...)
x$setinverse(inv)
}
| /cachematrix.R | no_license | spease1981/ProgrammingAssignment2 | R | false | false | 884 | r | ## The makeCacheMatrix is used to capture the inverse of a set of numbers in
## a matrix
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse of the matrix
## 4. get the value of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get,
setinverse = setinverse,
getinverse = getinverse)
}
## The cacheSolve function allows you to view the inverse of the
## values of the matrix.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- inverse(data, ...)
x$setinverse(inv)
}
|
library(ape)
testtree <- read.tree("1646_12.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1646_12_unrooted.txt") | /codeml_files/newick_trees_processed/1646_12/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("1646_12.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1646_12_unrooted.txt") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/power_ftest.R
\name{power.ftest}
\alias{power.ftest}
\title{Power Calculations for an F-test}
\usage{
power.ftest(
num_df = NULL,
den_df = NULL,
cohen_f = NULL,
alpha_level = Superpower_options("alpha_level"),
beta_level = NULL,
liberal_lambda = Superpower_options("liberal_lambda")
)
}
\arguments{
\item{num_df}{degrees of freedom for numerator}
\item{den_df}{degrees of freedom for denominator}
\item{cohen_f}{Cohen's f effect size. Note: this is the sqrt(f2) if you are used to using pwr.f2.test}
\item{alpha_level}{Alpha level used to determine statistical significance.}
\item{beta_level}{Type II error probability (power/100-1)}
\item{liberal_lambda}{Logical indictor of whether to use the liberal (cohen_f^2\*(num_df+den_df)) or conservative (cohen_f^2\*den_df) calculation of the noncentrality (lambda) parameter estimate. Default is FALSE.}
}
\value{
num_df = degrees of freedom for numerator,
den_df = degress of freedom for denominator,
cohen_f = Cohen's f effect size,
alpha_level = Type 1 error probability,
beta_level = Type 2 error probability,
power = Power of test (1-beta_level\*100%),
lambda = Noncentrality parameter estimate (default = cohen_f^2\*den_df, liberal = cohen_f^2\*(num_df+den_df))
}
\description{
Compute power of test or determine parameters to obtain target power. Inspired by the pwr.f2.test function in the pwr package, but allows for varying noncentrality parameter estimates for a more liberal (default in pwr.f2.test) or conservative (default in this function) estimates (see Aberson, Chapter 5, pg 72).
}
\section{References}{
Cohen, J. (1988). Statistical power analysis for the behavioral sciences (2nd ed.). Hillsdale,NJ: Lawrence Erlbaum.
Aberson, C. (2019). Applied Power Analysis for the Behaviorial Sciences (2nd ed.). New York,NY: Routledge.
}
\examples{
design_result <- ANOVA_design(design = "2b",
n = 65,
mu = c(0,.5),
sd = 1,
plot = FALSE)
x1 = ANOVA_exact2(design_result, verbose = FALSE)
ex = power.ftest(num_df = x1$anova_table$num_df,
den_df = x1$anova_table$den_df,
cohen_f = x1$main_result$cohen_f,
alpha_level = 0.05,
liberal_lambda = FALSE)
}
| /man/power.ftest.Rd | permissive | DPCscience/Superpower | R | false | true | 2,208 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/power_ftest.R
\name{power.ftest}
\alias{power.ftest}
\title{Power Calculations for an F-test}
\usage{
power.ftest(
num_df = NULL,
den_df = NULL,
cohen_f = NULL,
alpha_level = Superpower_options("alpha_level"),
beta_level = NULL,
liberal_lambda = Superpower_options("liberal_lambda")
)
}
\arguments{
\item{num_df}{degrees of freedom for numerator}
\item{den_df}{degrees of freedom for denominator}
\item{cohen_f}{Cohen's f effect size. Note: this is the sqrt(f2) if you are used to using pwr.f2.test}
\item{alpha_level}{Alpha level used to determine statistical significance.}
\item{beta_level}{Type II error probability (power/100-1)}
\item{liberal_lambda}{Logical indictor of whether to use the liberal (cohen_f^2\*(num_df+den_df)) or conservative (cohen_f^2\*den_df) calculation of the noncentrality (lambda) parameter estimate. Default is FALSE.}
}
\value{
num_df = degrees of freedom for numerator,
den_df = degress of freedom for denominator,
cohen_f = Cohen's f effect size,
alpha_level = Type 1 error probability,
beta_level = Type 2 error probability,
power = Power of test (1-beta_level\*100%),
lambda = Noncentrality parameter estimate (default = cohen_f^2\*den_df, liberal = cohen_f^2\*(num_df+den_df))
}
\description{
Compute power of test or determine parameters to obtain target power. Inspired by the pwr.f2.test function in the pwr package, but allows for varying noncentrality parameter estimates for a more liberal (default in pwr.f2.test) or conservative (default in this function) estimates (see Aberson, Chapter 5, pg 72).
}
\section{References}{
Cohen, J. (1988). Statistical power analysis for the behavioral sciences (2nd ed.). Hillsdale,NJ: Lawrence Erlbaum.
Aberson, C. (2019). Applied Power Analysis for the Behaviorial Sciences (2nd ed.). New York,NY: Routledge.
}
\examples{
design_result <- ANOVA_design(design = "2b",
n = 65,
mu = c(0,.5),
sd = 1,
plot = FALSE)
x1 = ANOVA_exact2(design_result, verbose = FALSE)
ex = power.ftest(num_df = x1$anova_table$num_df,
den_df = x1$anova_table$den_df,
cohen_f = x1$main_result$cohen_f,
alpha_level = 0.05,
liberal_lambda = FALSE)
}
|
#' Data: principal component analysis model for period of record
#'
#' @description A list with 5 elements.
#'
#' @format results of a call to FactoMineR::PCA():
#' \describe{
#' \item{eig}{eigenvalues for the PCA}
#' \item{var}{variable names and }
#' \item{ind}{lagged flow data}
#' \item{svd}{the response variable: the sum of observed flows across Tamiami Trail}
#' \item{call}{}
#'}
#' @docType data
#' @keywords rainfall PET stage flow
#' @name pca1
#' @examples
#' summary(pca1)
#'
#' \dontrun{
#' ### code used to generate object
#' colsToUse <- 2:55
#' pca.dat <- por
#'
#' pca1 <- FactoMineR::PCA(pca.dat[, colsToUse], ncp = 4, scale.unit = TRUE)
#' # save("pca1", file = paste0(here(), "/data/pca1.RData"))
#'
#' }
#'
"pca1"
| /R/pca1.R | no_license | troyhill/TTFF | R | false | false | 746 | r | #' Data: principal component analysis model for period of record
#'
#' @description A list with 5 elements.
#'
#' @format results of a call to FactoMineR::PCA():
#' \describe{
#' \item{eig}{eigenvalues for the PCA}
#' \item{var}{variable names and }
#' \item{ind}{lagged flow data}
#' \item{svd}{the response variable: the sum of observed flows across Tamiami Trail}
#' \item{call}{}
#'}
#' @docType data
#' @keywords rainfall PET stage flow
#' @name pca1
#' @examples
#' summary(pca1)
#'
#' \dontrun{
#' ### code used to generate object
#' colsToUse <- 2:55
#' pca.dat <- por
#'
#' pca1 <- FactoMineR::PCA(pca.dat[, colsToUse], ncp = 4, scale.unit = TRUE)
#' # save("pca1", file = paste0(here(), "/data/pca1.RData"))
#'
#' }
#'
"pca1"
|
# dumping and dputing make textual format editable
# dump and dput preserve the metadata
# text formats adhere to Unix philosophy
# text formats are long-lived
# textual formats work better with version control
# con: not very space-efficient
setwd("/datascience/projects/statisticallyfit/github/learningprogramming/R/RProgramming/coursera/videos")
y = data.frame(a=1, b="a")
y
dput(y)
dput(y, file="outputFiles/y.R")
new.y = dget("outputFiles/y.R")
new.y
# dump can only be used on single objects
x = "foo"
y = data.frame(a=1, b="a")
dump(c("x", "y"), file="outputFiles/data.R")
rm(x, y) # remove from workspace
# now reconstruct the objects in workspace
source("outputFiles/data.R")
y
x
| /RCourse/videos/Textual Formats.R | no_license | statisticallyfit/R | R | false | false | 692 | r | # dumping and dputing make textual format editable
# dump and dput preserve the metadata
# text formats adhere to Unix philosophy
# text formats are long-lived
# textual formats work better with version control
# con: not very space-efficient
setwd("/datascience/projects/statisticallyfit/github/learningprogramming/R/RProgramming/coursera/videos")
y = data.frame(a=1, b="a")
y
dput(y)
dput(y, file="outputFiles/y.R")
new.y = dget("outputFiles/y.R")
new.y
# dump can only be used on single objects
x = "foo"
y = data.frame(a=1, b="a")
dump(c("x", "y"), file="outputFiles/data.R")
rm(x, y) # remove from workspace
# now reconstruct the objects in workspace
source("outputFiles/data.R")
y
x
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/KingProbe.R
\name{KingProbe}
\alias{KingProbe}
\title{KingProbe}
\usage{
KingProbe(Power, TAS, p, T, N)
}
\arguments{
\item{Power}{The power in watts measured by the King probe (Watts)}
\item{TAS}{The true airspeed (m/s)}
\item{p}{Pressure (hPa)}
\item{T}{Temperature (deg. C)}
\item{N}{Droplet concentration (e.g., from a CDP)}
}
\value{
Liquid water content (g/m^3)
}
\description{
Calculates liquid water content from King Probe
}
\details{
Calculates an estimate of the dry-probe power and
subtracts this from the total power, then converts the resulting
residual power to a measure of the liquid water content. If applied
to successive measurements, the routine maintains an estimate of
the dry-air relationship between Nusselt number and Reynolds number
and uses that to adjust the zero reading when out of cloud (as
indicated by a measured droplet concentration falling below some
threshold).
}
\examples{
\dontrun{lwc <- KingProbe (25, 180, 700, 10, 0) }
\dontrun{lwc <- KingProbe (RAFdata$PLWC, RAFdata$TASX, RAFdata$PSXC,
RAFdata$ATX, RAFdata$CONCD_LWII)}
}
\author{
William Cooper
}
| /man/KingProbe.Rd | permissive | NCAR/Ranadu | R | false | true | 1,177 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/KingProbe.R
\name{KingProbe}
\alias{KingProbe}
\title{KingProbe}
\usage{
KingProbe(Power, TAS, p, T, N)
}
\arguments{
\item{Power}{The power in watts measured by the King probe (Watts)}
\item{TAS}{The true airspeed (m/s)}
\item{p}{Pressure (hPa)}
\item{T}{Temperature (deg. C)}
\item{N}{Droplet concentration (e.g., from a CDP)}
}
\value{
Liquid water content (g/m^3)
}
\description{
Calculates liquid water content from King Probe
}
\details{
Calculates an estimate of the dry-probe power and
subtracts this from the total power, then converts the resulting
residual power to a measure of the liquid water content. If applied
to successive measurements, the routine maintains an estimate of
the dry-air relationship between Nusselt number and Reynolds number
and uses that to adjust the zero reading when out of cloud (as
indicated by a measured droplet concentration falling below some
threshold).
}
\examples{
\dontrun{lwc <- KingProbe (25, 180, 700, 10, 0) }
\dontrun{lwc <- KingProbe (RAFdata$PLWC, RAFdata$TASX, RAFdata$PSXC,
RAFdata$ATX, RAFdata$CONCD_LWII)}
}
\author{
William Cooper
}
|
## Matrix inversion is usually a costly computation and there's a good opportunity to use caching
## for recurring calculations of matrix inversion.
## There are two functions in this file. One is for make caching easy and another for
## actual matrix inversion calculation
## Makes caching of matrix inversion easy to use. Returns a list of function for control access
## to data and matrix inversion and return cached value.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## Uses cached value if possible.
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinv(inv)
inv
} | /cachematrix.R | no_license | zaaath/ProgrammingAssignment2 | R | false | false | 1,077 | r | ## Matrix inversion is usually a costly computation and there's a good opportunity to use caching
## for recurring calculations of matrix inversion.
## There are two functions in this file. One is for make caching easy and another for
## actual matrix inversion calculation
## Makes caching of matrix inversion easy to use. Returns a list of function for control access
## to data and matrix inversion and return cached value.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## Uses cached value if possible.
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinv(inv)
inv
} |
#' Update an existing Team Drive
#'
#' Update the metadata of an existing Team Drive, e.g. its background image or
#' theme.
#' @template team-drives-description
#'
#' @seealso Wraps the `teamdrives.update` endpoint:
#' * <https://developers.google.com/drive/v3/reference/teamdrives/update>
#'
#' @template team_drive-singular
#' @param ... Named parameters to pass along to the Drive API. See the "Request
#' body" section of the Drive API docs for the associated endpoint.
#' @template verbose
#'
#' @template dribble-return
#' @export
#' @examples
#' \dontrun{
#' ## create a Team Drive
#' td <- team_drive_create("I love themes!")
#'
#' ## see the themes available to you
#' themes <- drive_user(fields = "teamDriveThemes")$teamDriveThemes
#' purrr::map_chr(themes, "id")
#'
#' ## cycle through various themes for this Team Drive
#' td <- team_drive_update(td, themeId = "bok_choy")
#' td <- team_drive_update(td, themeId = "cocktails")
#'
#' ## clean up
#' team_drive_rm(td)
#' }
team_drive_update <- function(team_drive, ..., verbose = TRUE) {
team_drive <- as_team_drive(team_drive)
if (no_file(team_drive) && verbose) {
message("No such Team Drives found to update.")
return(invisible(dribble()))
}
if (!single_file(team_drive)) {
drives <- glue_data(team_drive, " * {name}: {id}")
stop_collapse(c("Can't update multiple Team Drives at once:", team_drive))
}
meta <- toCamel(list(...))
if (length(meta) == 0) {
if (verbose) message("No updates specified.")
return(invisible(team_drive))
}
meta$fields <- meta$fields %||% "*"
request <- request_generate(
endpoint = "drive.teamdrives.update",
params = c(
teamDriveId = as_id(team_drive),
meta
)
)
response <- request_make(request, encode = "json")
out <- as_dribble(list(gargle::response_process(response)))
if (verbose) {
message_glue("\nTeam Drive updated:\n * {out$name}: {out$id}")
}
invisible(out)
}
| /R/team_drive_update.R | no_license | ianmcook/googledrive | R | false | false | 1,956 | r | #' Update an existing Team Drive
#'
#' Update the metadata of an existing Team Drive, e.g. its background image or
#' theme.
#' @template team-drives-description
#'
#' @seealso Wraps the `teamdrives.update` endpoint:
#' * <https://developers.google.com/drive/v3/reference/teamdrives/update>
#'
#' @template team_drive-singular
#' @param ... Named parameters to pass along to the Drive API. See the "Request
#' body" section of the Drive API docs for the associated endpoint.
#' @template verbose
#'
#' @template dribble-return
#' @export
#' @examples
#' \dontrun{
#' ## create a Team Drive
#' td <- team_drive_create("I love themes!")
#'
#' ## see the themes available to you
#' themes <- drive_user(fields = "teamDriveThemes")$teamDriveThemes
#' purrr::map_chr(themes, "id")
#'
#' ## cycle through various themes for this Team Drive
#' td <- team_drive_update(td, themeId = "bok_choy")
#' td <- team_drive_update(td, themeId = "cocktails")
#'
#' ## clean up
#' team_drive_rm(td)
#' }
team_drive_update <- function(team_drive, ..., verbose = TRUE) {
team_drive <- as_team_drive(team_drive)
if (no_file(team_drive) && verbose) {
message("No such Team Drives found to update.")
return(invisible(dribble()))
}
if (!single_file(team_drive)) {
drives <- glue_data(team_drive, " * {name}: {id}")
stop_collapse(c("Can't update multiple Team Drives at once:", team_drive))
}
meta <- toCamel(list(...))
if (length(meta) == 0) {
if (verbose) message("No updates specified.")
return(invisible(team_drive))
}
meta$fields <- meta$fields %||% "*"
request <- request_generate(
endpoint = "drive.teamdrives.update",
params = c(
teamDriveId = as_id(team_drive),
meta
)
)
response <- request_make(request, encode = "json")
out <- as_dribble(list(gargle::response_process(response)))
if (verbose) {
message_glue("\nTeam Drive updated:\n * {out$name}: {out$id}")
}
invisible(out)
}
|
# Concatinate function that is just a "better" paste0
concat <- function(text) {
if (length(text) > 1) {
return(stringr::str_flatten(text))
} else {
return(text)
}
}
# concatinate plus function that is just a "better" paste
concatPlus <- function(text) {
if (length(text) > 1) {
return(stringr::str_flatten(text, collapse = " "))
} else {
return(text)
}
}
# Check whether element exists and is not NA
# (or at least that the first element of an object is not NA)
elementExists <- function( full_index_path ){
tryCatch({
len_element = length(full_index_path)
if(is.na(full_index_path)[[1]]) {return(F)}
exists_indicator = ifelse(len_element > 0, T, F)
return(exists_indicator)
}, error = function(e) {
return(F)
})
}
# This function is adapted from statcheck https://github.com/MicheleNuijten/statcheck/blob/master/R/htmlImport.R,
# does some final extra cleaning if any tags / weird characters remain
cleanText <- function(strings){
# Replace html codes:
strings <- lapply(strings, gsub, pattern = "<", replacement = "<", fixed = TRUE)
strings <- lapply(strings, gsub, pattern = "<", replacement = "<", fixed = TRUE)
strings <- lapply(strings, gsub, pattern = "=", replacement = "=", fixed = TRUE)
strings <- lapply(strings, gsub, pattern = ">", replacement = ">", fixed = TRUE)
strings <- lapply(strings, gsub, pattern = ">", replacement = ">", fixed = TRUE)
strings <- lapply(strings, gsub, pattern = "(", replacement = "(", fixed = TRUE)
strings <- lapply(strings, gsub, pattern = ")", replacement = ")", fixed = TRUE)
strings <- lapply(strings, gsub, pattern = " ", replacement = " ", fixed = TRUE)
strings <- lapply(strings, gsub, pattern = " ", replacement = " ", fixed = TRUE)
strings <- lapply(strings, gsub, pattern = "\n", replacement = "")
strings <- lapply(strings, gsub, pattern = "\r", replacement = "")
strings <- lapply(strings, gsub, pattern = "\\s+", replacement = " ")
strings <- lapply(strings, gsub, pattern = "−", replacement = "-", fixed = TRUE)
# removing newline breaks, non-breaking spaces, ' ', &
strings <- lapply(strings, gsub, pattern = "[Ââˆ\\’Ï„œ€$!\\“\u009d]", replacement = " ")
# replacing unicode minus sign with R recognised minus sign
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\u2212", replacement = "-")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\u8211", replacement = "-")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\u8212", replacement = "-")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\u8210", replacement = "-")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\u65112", replacement = "-")
strings <- lapply(strings, stringr::str_replace_all, pattern = "–", replacement = "-")
# replcaing unicode short spaces that are not always picked up above
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\u2009", replacement = " ")
# replcaing mathematical symbols with interpretable ones
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\U0001d443", replacement = "p")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\U0001d45b", replacement = "n")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\U0001d6fd", replacement = "beta")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\U0001d6fc", replacement = "alpha")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\U0001d712", replacement = "chi")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\U0001d712", replacement = "chi")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\u2439", replacement = "chi")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\U00002AFD", replacement = "=")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\u2b0d", replacement = "<")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\ufb01", replacement = "fi")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\ufb00", replacement = "ff")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\ufb02", replacement = "fl")
return(strings)
}
| /R/utilities.R | permissive | fsingletonthorn/EffectSizeScraping | R | false | false | 4,345 | r | # Concatinate function that is just a "better" paste0
concat <- function(text) {
if (length(text) > 1) {
return(stringr::str_flatten(text))
} else {
return(text)
}
}
# concatinate plus function that is just a "better" paste
concatPlus <- function(text) {
if (length(text) > 1) {
return(stringr::str_flatten(text, collapse = " "))
} else {
return(text)
}
}
# Check whether element exists and is not NA
# (or at least that the first element of an object is not NA)
elementExists <- function( full_index_path ){
tryCatch({
len_element = length(full_index_path)
if(is.na(full_index_path)[[1]]) {return(F)}
exists_indicator = ifelse(len_element > 0, T, F)
return(exists_indicator)
}, error = function(e) {
return(F)
})
}
# This function is adapted from statcheck https://github.com/MicheleNuijten/statcheck/blob/master/R/htmlImport.R,
# does some final extra cleaning if any tags / weird characters remain
cleanText <- function(strings){
# Replace html codes:
strings <- lapply(strings, gsub, pattern = "<", replacement = "<", fixed = TRUE)
strings <- lapply(strings, gsub, pattern = "<", replacement = "<", fixed = TRUE)
strings <- lapply(strings, gsub, pattern = "=", replacement = "=", fixed = TRUE)
strings <- lapply(strings, gsub, pattern = ">", replacement = ">", fixed = TRUE)
strings <- lapply(strings, gsub, pattern = ">", replacement = ">", fixed = TRUE)
strings <- lapply(strings, gsub, pattern = "(", replacement = "(", fixed = TRUE)
strings <- lapply(strings, gsub, pattern = ")", replacement = ")", fixed = TRUE)
strings <- lapply(strings, gsub, pattern = " ", replacement = " ", fixed = TRUE)
strings <- lapply(strings, gsub, pattern = " ", replacement = " ", fixed = TRUE)
strings <- lapply(strings, gsub, pattern = "\n", replacement = "")
strings <- lapply(strings, gsub, pattern = "\r", replacement = "")
strings <- lapply(strings, gsub, pattern = "\\s+", replacement = " ")
strings <- lapply(strings, gsub, pattern = "−", replacement = "-", fixed = TRUE)
# removing newline breaks, non-breaking spaces, ' ', &
strings <- lapply(strings, gsub, pattern = "[Ââˆ\\’Ï„œ€$!\\“\u009d]", replacement = " ")
# replacing unicode minus sign with R recognised minus sign
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\u2212", replacement = "-")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\u8211", replacement = "-")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\u8212", replacement = "-")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\u8210", replacement = "-")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\u65112", replacement = "-")
strings <- lapply(strings, stringr::str_replace_all, pattern = "–", replacement = "-")
# replcaing unicode short spaces that are not always picked up above
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\u2009", replacement = " ")
# replcaing mathematical symbols with interpretable ones
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\U0001d443", replacement = "p")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\U0001d45b", replacement = "n")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\U0001d6fd", replacement = "beta")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\U0001d6fc", replacement = "alpha")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\U0001d712", replacement = "chi")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\U0001d712", replacement = "chi")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\u2439", replacement = "chi")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\U00002AFD", replacement = "=")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\u2b0d", replacement = "<")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\ufb01", replacement = "fi")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\ufb00", replacement = "ff")
strings <- lapply(strings, stringr::str_replace_all, pattern = "\\ufb02", replacement = "fl")
return(strings)
}
|
library(knitr)
library(kableExtra)
library(dplyr)
library(readxl)
# read the source file
ds <- read_excel("ts_lastname_firstname.xlsx", sheet = "ts_lastname_firstname")
#Conversion to Date format from text
ds$Day <- as.Date(ds$Day)
# Subset for 2019 11
part_201911 <- filter(ds, Day >= "2019-11-01", Day <= "2019-11-30")
# A column with row sums is added
print_201911 <- transform(part_201911, Total = rowSums(part_201911[, -1]))
#creation of a row with totals
s <-t(colSums(print_201911[-1]))
q <- cbind("Total hours", s)
colnames(q)[1] <- "Task/Activity"
# total worked hours in the month on the Project
total_ph_201911 <- sum(part_201911[, 2:46])
# total worked hours in the month
total_wh_201911 <- sum(part_201911[, 2:51])
#part_201911 <- subset(ds, ds$Day >= "2019-11-01" & ds$Day <= "2019-11-30") | /201911.R | no_license | InflatableGull/timesheet | R | false | false | 812 | r | library(knitr)
library(kableExtra)
library(dplyr)
library(readxl)
# read the source file
ds <- read_excel("ts_lastname_firstname.xlsx", sheet = "ts_lastname_firstname")
#Conversion to Date format from text
ds$Day <- as.Date(ds$Day)
# Subset for 2019 11
part_201911 <- filter(ds, Day >= "2019-11-01", Day <= "2019-11-30")
# A column with row sums is added
print_201911 <- transform(part_201911, Total = rowSums(part_201911[, -1]))
#creation of a row with totals
s <-t(colSums(print_201911[-1]))
q <- cbind("Total hours", s)
colnames(q)[1] <- "Task/Activity"
# total worked hours in the month on the Project
total_ph_201911 <- sum(part_201911[, 2:46])
# total worked hours in the month
total_wh_201911 <- sum(part_201911[, 2:51])
#part_201911 <- subset(ds, ds$Day >= "2019-11-01" & ds$Day <= "2019-11-30") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query_bib.R
\name{load_betterbiblatex_bib}
\alias{load_betterbiblatex_bib}
\title{Load bibliography from Zotero}
\usage{
load_betterbiblatex_bib(
encoding,
betterbiblatex_format = "bibtex",
exclude_betterbiblatex_library = NULL,
increment_progress = FALSE
)
}
\arguments{
\item{encoding}{Character. Encoding of the Bib(La)TeX-file. See the \code{\link{connection}}.}
\item{betterbiblatex_format}{Character. Bibliography format to export from Zotero/Juris-M. Can be either \code{"bibtex"} or \code{"biblatex"}. Ignored if \code{use_betterbiblatex = FALSE}.
Requires that the \href{https://github.com/retorquere/zotero-better-bibtex}{Better BibTeX} is installed and
Zotero/Juris-M is running.}
\item{exclude_betterbiblatex_library}{Character. A vector of Zotero/Juris-M library names not to query.}
\item{increment_progress}{logical switch which will use the shiny function
\code{incProgress} when \code{TRUE}. \code{FALSE} by default.}
}
\description{
Load bibliography from Zotero
}
\details{
This function loads into RAM the bibliography stored in Zotero.
May take a several seconds if you have many hundreds of references.
}
\examples{
\dontrun{
b <- load_betterbiblatex_bib(encoding = "UTF-8")
}
}
| /man/load_betterbiblatex_bib.Rd | no_license | papezneuroO/citr | R | false | true | 1,290 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query_bib.R
\name{load_betterbiblatex_bib}
\alias{load_betterbiblatex_bib}
\title{Load bibliography from Zotero}
\usage{
load_betterbiblatex_bib(
encoding,
betterbiblatex_format = "bibtex",
exclude_betterbiblatex_library = NULL,
increment_progress = FALSE
)
}
\arguments{
\item{encoding}{Character. Encoding of the Bib(La)TeX-file. See the \code{\link{connection}}.}
\item{betterbiblatex_format}{Character. Bibliography format to export from Zotero/Juris-M. Can be either \code{"bibtex"} or \code{"biblatex"}. Ignored if \code{use_betterbiblatex = FALSE}.
Requires that the \href{https://github.com/retorquere/zotero-better-bibtex}{Better BibTeX} is installed and
Zotero/Juris-M is running.}
\item{exclude_betterbiblatex_library}{Character. A vector of Zotero/Juris-M library names not to query.}
\item{increment_progress}{logical switch which will use the shiny function
\code{incProgress} when \code{TRUE}. \code{FALSE} by default.}
}
\description{
Load bibliography from Zotero
}
\details{
This function loads into RAM the bibliography stored in Zotero.
May take a several seconds if you have many hundreds of references.
}
\examples{
\dontrun{
b <- load_betterbiblatex_bib(encoding = "UTF-8")
}
}
|
## wrapper
## NULL arguments are dropped
## internal function
.bsims_all <- function(Settings) {
Functions <- list(
bsims_init=bsims_init,
bsims_populate=bsims_populate,
bsims_animate=bsims_animate,
bsims_detect=bsims_detect,
bsims_transcribe=bsims_transcribe)
Formals <- lapply(Functions, formals)
Formals <- lapply(Formals, function(z) z[names(z) != "..."])
Formals <- lapply(Formals, function(z) z[names(z) != "x"])
for (i in seq_len(length(Functions))) {
Call <- if (i == 1L)
Functions[[i]]()$call else Functions[[i]](Last)$call
Call[[1L]] <- as.name(names(Functions)[i])
if (i > 1L)
Call[["x"]] <- as.name("Last")
for (j in names(Settings)) {
if (j %in% names(Formals[[i]])) {
Formals[[i]][[j]] <- Settings[[j]]
if (!is.null(Settings[[j]]))
Call[[j]] <- Settings[[j]]
}
}
Last <- eval(Call)
}
Last
}
bsims_all <- function(...) {
Settings <- list(...)
if (length(Settings) == 1L && is.list(Settings[[1L]]))
Settings <- Settings[[1L]]
out <- list()
out$settings <- function() Settings
out$new <- function(recover=FALSE) {
if (recover)
try(.bsims_all(Settings)) else .bsims_all(Settings)
}
out$replicate <- function(B=1, recover=FALSE, cl=NULL) {
if (!is.null(cl) && inherits(cl, "cluster")) {
isLoaded <- all(unlist(clusterEvalQ(cl, "bSims" %in% .packages())))
if (!isLoaded)
clusterEvalQ(cl, library(bSims))
clusterExport(cl, c(".bsims_all"))
}
z <- if (recover) {
pbreplicate(B, try(.bsims_all(Settings)), simplify=FALSE, cl=cl)
} else {
pbreplicate(B, .bsims_all(Settings), simplify=FALSE, cl=cl)
}
if (!is.null(cl) && inherits(cl, "cluster")) {
clusterEvalQ(cl, rm(.bsims_all))
if (!isLoaded)
clusterEvalQ(cl, detach("package:bSims", unload=TRUE))
}
z
}
class(out) <- c("bsims_all",
"bsims_transcript",
"bsims_detections",
"bsims_events",
"bsims_population",
"bsims_landscape",
"bsims")
out
}
| /R/bsims_all.R | no_license | cran/bSims | R | false | false | 2,131 | r | ## wrapper
## NULL arguments are dropped
## internal function
.bsims_all <- function(Settings) {
Functions <- list(
bsims_init=bsims_init,
bsims_populate=bsims_populate,
bsims_animate=bsims_animate,
bsims_detect=bsims_detect,
bsims_transcribe=bsims_transcribe)
Formals <- lapply(Functions, formals)
Formals <- lapply(Formals, function(z) z[names(z) != "..."])
Formals <- lapply(Formals, function(z) z[names(z) != "x"])
for (i in seq_len(length(Functions))) {
Call <- if (i == 1L)
Functions[[i]]()$call else Functions[[i]](Last)$call
Call[[1L]] <- as.name(names(Functions)[i])
if (i > 1L)
Call[["x"]] <- as.name("Last")
for (j in names(Settings)) {
if (j %in% names(Formals[[i]])) {
Formals[[i]][[j]] <- Settings[[j]]
if (!is.null(Settings[[j]]))
Call[[j]] <- Settings[[j]]
}
}
Last <- eval(Call)
}
Last
}
bsims_all <- function(...) {
Settings <- list(...)
if (length(Settings) == 1L && is.list(Settings[[1L]]))
Settings <- Settings[[1L]]
out <- list()
out$settings <- function() Settings
out$new <- function(recover=FALSE) {
if (recover)
try(.bsims_all(Settings)) else .bsims_all(Settings)
}
out$replicate <- function(B=1, recover=FALSE, cl=NULL) {
if (!is.null(cl) && inherits(cl, "cluster")) {
isLoaded <- all(unlist(clusterEvalQ(cl, "bSims" %in% .packages())))
if (!isLoaded)
clusterEvalQ(cl, library(bSims))
clusterExport(cl, c(".bsims_all"))
}
z <- if (recover) {
pbreplicate(B, try(.bsims_all(Settings)), simplify=FALSE, cl=cl)
} else {
pbreplicate(B, .bsims_all(Settings), simplify=FALSE, cl=cl)
}
if (!is.null(cl) && inherits(cl, "cluster")) {
clusterEvalQ(cl, rm(.bsims_all))
if (!isLoaded)
clusterEvalQ(cl, detach("package:bSims", unload=TRUE))
}
z
}
class(out) <- c("bsims_all",
"bsims_transcript",
"bsims_detections",
"bsims_events",
"bsims_population",
"bsims_landscape",
"bsims")
out
}
|
## Caching the inverse of a Matrix: Below are a pair of functions that are used
## to create a special object that stores a matrix and catches its inverse.
# First, I create the function "makeCacheMatrix" that creates a special Matrix
# object that can cache its inverse.
## I set the input x as a matrix and then set the solved value "Inv" as a null.
## Then I changed every reference to "mean" to "solve".
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function (y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInv <- function(inv) inv <<- inv
getInv <- function() inv
list(set = set, get = get,
setInv = setInv, getInv = getInv)
}
## Secondly, I created the function "cacheSolve" to compute the inverse of the
## special "matrix" returned by "makeCacheMatrix" above. If the inverse has
## already been calculated (and the matrix has not changed), then "cacheSolve"
## should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInv()
if(!is.null(inv)) {
message("getting inversed matrix")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setInv(inv)
inv
}
| /ProgrammingAssignment2_cachematrix.R | no_license | Trochillianne/01.-R-Programming_Example | R | false | false | 1,420 | r | ## Caching the inverse of a Matrix: Below are a pair of functions that are used
## to create a special object that stores a matrix and catches its inverse.
# First, I create the function "makeCacheMatrix" that creates a special Matrix
# object that can cache its inverse.
## I set the input x as a matrix and then set the solved value "Inv" as a null.
## Then I changed every reference to "mean" to "solve".
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function (y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInv <- function(inv) inv <<- inv
getInv <- function() inv
list(set = set, get = get,
setInv = setInv, getInv = getInv)
}
## Secondly, I created the function "cacheSolve" to compute the inverse of the
## special "matrix" returned by "makeCacheMatrix" above. If the inverse has
## already been calculated (and the matrix has not changed), then "cacheSolve"
## should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInv()
if(!is.null(inv)) {
message("getting inversed matrix")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setInv(inv)
inv
}
|
## Plot6
library(plyr)
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
SCC.motor <- grep("motor", SCC$Short.Name, ignore.case = TRUE)
SCC.motor <- SCC[SCC.motor, ]
SCC.identifiers <- as.character(SCC.motor$SCC)
NEI$SCC <- as.character(NEI$SCC)
NEI.motor <- NEI[NEI$SCC %in% SCC.identifiers, ]
NEI.motor.24510 <- NEI.motor[which(NEI.motor$fips == "24510"), ]
aggregate.motor.24510 <- with(NEI.motor.24510, aggregate(Emissions, by = list(year),
sum))
SCC.motor <- grep("motor", SCC$Short.Name, ignore.case = TRUE)
SCC.motor <- SCC[SCC.motor, ]
SCC.identifiers <- as.character(SCC.motor$SCC)
NEI$SCC <- as.character(NEI$SCC)
NEI.motor <- NEI[NEI$SCC %in% SCC.identifiers, ]
NEI.motor.24510 <- NEI.motor[which(NEI.motor$fips == "24510"), ]
NEI.motor.06037 <- NEI.motor[which(NEI.motor$fips == "06037"), ]
aggregate.motor.24510 <- with(NEI.motor.24510, aggregate(Emissions, by = list(year),
sum))
aggregate.motor.24510$group <- rep("Baltimore County", length(aggregate.motor.24510[,
1]))
aggregate.motor.06037 <- with(NEI.motor.06037, aggregate(Emissions, by = list(year),
sum))
aggregate.motor.06037$group <- rep("Los Angeles County", length(aggregate.motor.06037[,
1]))
aggregated.motor.zips <- rbind(aggregate.motor.06037, aggregate.motor.24510)
aggregated.motor.zips$group <- as.factor(aggregated.motor.zips$group)
colnames(aggregated.motor.zips) <- c("Year", "Emissions", "Group")
qplot(Year, Emissions, data = aggregated.motor.zips, group = Group, color = Group,
geom = c("point", "line"), ylab = expression("Total Emissions, PM"[2.5]),
xlab = "Year", main = "Comparison of Total Emissions by County") | /Plot6.R | no_license | aashraf451/Exploratory_Data_Analysis_Project_2 | R | false | false | 2,079 | r | ## Plot6
library(plyr)
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
SCC.motor <- grep("motor", SCC$Short.Name, ignore.case = TRUE)
SCC.motor <- SCC[SCC.motor, ]
SCC.identifiers <- as.character(SCC.motor$SCC)
NEI$SCC <- as.character(NEI$SCC)
NEI.motor <- NEI[NEI$SCC %in% SCC.identifiers, ]
NEI.motor.24510 <- NEI.motor[which(NEI.motor$fips == "24510"), ]
aggregate.motor.24510 <- with(NEI.motor.24510, aggregate(Emissions, by = list(year),
sum))
SCC.motor <- grep("motor", SCC$Short.Name, ignore.case = TRUE)
SCC.motor <- SCC[SCC.motor, ]
SCC.identifiers <- as.character(SCC.motor$SCC)
NEI$SCC <- as.character(NEI$SCC)
NEI.motor <- NEI[NEI$SCC %in% SCC.identifiers, ]
NEI.motor.24510 <- NEI.motor[which(NEI.motor$fips == "24510"), ]
NEI.motor.06037 <- NEI.motor[which(NEI.motor$fips == "06037"), ]
aggregate.motor.24510 <- with(NEI.motor.24510, aggregate(Emissions, by = list(year),
sum))
aggregate.motor.24510$group <- rep("Baltimore County", length(aggregate.motor.24510[,
1]))
aggregate.motor.06037 <- with(NEI.motor.06037, aggregate(Emissions, by = list(year),
sum))
aggregate.motor.06037$group <- rep("Los Angeles County", length(aggregate.motor.06037[,
1]))
aggregated.motor.zips <- rbind(aggregate.motor.06037, aggregate.motor.24510)
aggregated.motor.zips$group <- as.factor(aggregated.motor.zips$group)
colnames(aggregated.motor.zips) <- c("Year", "Emissions", "Group")
qplot(Year, Emissions, data = aggregated.motor.zips, group = Group, color = Group,
geom = c("point", "line"), ylab = expression("Total Emissions, PM"[2.5]),
xlab = "Year", main = "Comparison of Total Emissions by County") |
source( "masternegloglikereduced1.R" )
source("eudicottree.R" )
library( "expm" )
source( "Qmatrixwoodherb3.R" )
source("Pruning2.R")
bichrom.dataset<-read.table( "eudicotvals.txt",header=FALSE,sep=",",stringsAsFactors=FALSE)
last.state=50
uniform.samples<-read.csv("sample309.csv",header=FALSE)
a<- as.numeric(t(uniform.samples))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,10)
mle<-try(optim(par=a,fn=negloglikelihood.wh, method= "Nelder-Mead", bichrom.phy=angiosperm.tree, bichrom.data=bichrom.dataset,max.chromosome=last.state,pi.0=p.0),silent=TRUE)
print(mle)
if(class(mle)=="try-error"){results<-rep(NA,10)}else{
results[1:9]<-exp(mle$par)
results[10]<-mle$value}
write.table(results,file="results309.csv",sep=",")
| /Reduced model optimizations/explorelikereduced309.R | no_license | roszenil/Bichromdryad | R | false | false | 750 | r | source( "masternegloglikereduced1.R" )
source("eudicottree.R" )
library( "expm" )
source( "Qmatrixwoodherb3.R" )
source("Pruning2.R")
bichrom.dataset<-read.table( "eudicotvals.txt",header=FALSE,sep=",",stringsAsFactors=FALSE)
last.state=50
uniform.samples<-read.csv("sample309.csv",header=FALSE)
a<- as.numeric(t(uniform.samples))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,10)
mle<-try(optim(par=a,fn=negloglikelihood.wh, method= "Nelder-Mead", bichrom.phy=angiosperm.tree, bichrom.data=bichrom.dataset,max.chromosome=last.state,pi.0=p.0),silent=TRUE)
print(mle)
if(class(mle)=="try-error"){results<-rep(NA,10)}else{
results[1:9]<-exp(mle$par)
results[10]<-mle$value}
write.table(results,file="results309.csv",sep=",")
|
library(forecast)
### Name: checkresiduals
### Title: Check that residuals from a time series model look like white
### noise
### Aliases: checkresiduals
### ** Examples
fit <- ets(WWWusage)
checkresiduals(fit)
| /data/genthat_extracted_code/forecast/examples/checkresiduals.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 222 | r | library(forecast)
### Name: checkresiduals
### Title: Check that residuals from a time series model look like white
### noise
### Aliases: checkresiduals
### ** Examples
fit <- ets(WWWusage)
checkresiduals(fit)
|
#install.packages("MatrixEQTL")
# source("Matrix_eQTL_R/Matrix_eQTL_engine.r");
library(MatrixEQTL)
## Location of the package with the data files.
base.dir = find.package('MatrixEQTL');
# base.dir = '.';
## Settings
# Linear model to use, modelANOVA, modelLINEAR, or modelLINEAR_CROSS
useModel = modelLINEAR; # modelANOVA, modelLINEAR, or modelLINEAR_CROSS
# Genotype file name
SNP_file_name = "../HDL_significant_SNPs_QCd_filtered.dosage";
snps_location_file_name = "../HDL_significant_SNPs_locations_QCd.txt";
# Gene expression file name
expression_file_name = "../../Adipose\ expression\ data/FINAL_logTPMs_and_activities/Filtered_Eurobats_adipose_qnorm_INT_logTPMs_for_4213_regulators.txt";
gene_location_file_name = "../../Adipose\ expression\ data/FINAL_logTPMs_and_activities/Hg19_gene_map_for_13776_expressed_genes_in_Eurobats_adipose.map";
# Covariates file name
# Set to character() for no covariates
covariates_file_name = "../Filtered_Eurobats_adipose_covars_no_PEER.txt";
# Output file name
output_file_name_cis = tempfile();
output_file_name_tra = tempfile();
# Only associations significant at this level will be saved
pvOutputThreshold_cis = 1;
pvOutputThreshold_tra = 1e-08;
# Error covariance matrix
# Set to numeric() for identity.
errorCovariance = numeric();
# errorCovariance = read.table("Sample_Data/errorCovariance.txt");
# Distance for local gene-SNP pairs
cisDist = 1e6;
## Load gene expression data
gene = SlicedData$new();
gene$fileDelimiter = "\t"; # the TAB character
gene$fileOmitCharacters = "NA"; # denote missing values;
gene$fileSkipRows = 1; # one row of column labels
gene$fileSkipColumns = 1; # one column of row labels
gene$fileSliceSize = 2000; # read file in slices of 2,000 rows
gene$LoadFile(expression_file_name);
## Load genotype data
snps = SlicedData$new();
snps$fileDelimiter = "\t"; # the TAB character
snps$fileOmitCharacters = "NA"; # denote missing values;
snps$fileSkipRows = 1; # one row of column labels
snps$fileSkipColumns = 1; # one column of row labels
snps$fileSliceSize = 2000; # read file in slices of 2,000 rows
snps$LoadFile(SNP_file_name);
snps$ColumnSubsample(na.omit(match(gene$columnNames,snps$columnNames)));
# Subset the expression data
gene$ColumnSubsample(na.omit(match(snps$columnNames,gene$columnNames)));
## Load covariates
cvrt = SlicedData$new();
cvrt$fileDelimiter = "\t"; # the TAB character
cvrt$fileOmitCharacters = "NA"; # denote missing values;
cvrt$fileSkipRows = 1; # one row of column labels
cvrt$fileSkipColumns = 1; # one column of row labels
if(length(covariates_file_name)>0) {
cvrt$LoadFile(covariates_file_name);
}
cvrt$ColumnSubsample(na.omit(match(gene$columnNames,cvrt$columnNames)));
## Check that samples are properly filtered and ordered between expression, genotype and covariate data. If not, quit!
if(!(all(gene$columnNames==snps$columnNames) & all(gene$columnNames==cvrt$columnNames))){
print("Samples do not match between input data!!!")
q(save="no")
}
## Normal quantile transformation of gene expression data
for( sl in 1:length(gene) ) {
mat = gene[[sl]];
mat = t(apply(mat, 1, rank, ties.method = "average"));
mat = qnorm(mat / (ncol(gene)+1));
gene[[sl]] = mat;
}
rm(sl, mat);
## Run the analysis
snpspos = read.table(snps_location_file_name, header = TRUE, stringsAsFactors = FALSE);
genepos = read.table(gene_location_file_name, header = TRUE, stringsAsFactors = FALSE);
me = Matrix_eQTL_main(
snps = snps,
gene = gene,
cvrt = cvrt,
output_file_name = output_file_name_tra,
pvOutputThreshold = pvOutputThreshold_tra,
useModel = useModel,
errorCovariance = errorCovariance,
verbose = TRUE,
output_file_name.cis = output_file_name_cis,
pvOutputThreshold.cis = pvOutputThreshold_cis,
snpspos = snpspos,
genepos = genepos,
cisDist = cisDist,
pvalue.hist = "qqplot",
min.pv.by.genesnp = FALSE,
noFDRsaveMemory = FALSE);
unlink(output_file_name_tra);
unlink(output_file_name_cis);
## Results:
cat('Analysis done in: ', me$time.in.sec, ' seconds', '\n');
cat('Detected local eQTLs:', '\n');
cis_eqtls<-me$cis$eqtls
cat('Detected distant eQTLs:', '\n');
trans_eqtls<-me$trans$eqtls
## Plot the Q-Q plot of local and distant p-values
jpeg("Eurobats_adipose_HDL_sig_eQTLs_from_INT_logTPM_of_4213_regulators_plot.jpg")
plot(me)
dev.off()
write.table(cis_eqtls,"Eurobats_adipose_HDL_sig_cis-eQTLs_from_INT_logTPM_of_4213_regulators.txt",sep="\t",quote = FALSE,row.names=FALSE)
write.table(trans_eqtls,"Eurobats_adipose_HDL_sig_trans-eQTLs_from_INT_logTPM_of_4213_regulators.txt",sep="\t",quote = FALSE,row.names=FALSE)
q(save="no") | /eQTL_analyses/R_script_for_Eurobats_adipose_HDL_significant_4213_regulators_matrix_eQTL.R | no_license | hoskinsjw/aQTL2021 | R | false | false | 4,685 | r | #install.packages("MatrixEQTL")
# source("Matrix_eQTL_R/Matrix_eQTL_engine.r");
library(MatrixEQTL)
## Location of the package with the data files.
base.dir = find.package('MatrixEQTL');
# base.dir = '.';
## Settings
# Linear model to use, modelANOVA, modelLINEAR, or modelLINEAR_CROSS
useModel = modelLINEAR; # modelANOVA, modelLINEAR, or modelLINEAR_CROSS
# Genotype file name
SNP_file_name = "../HDL_significant_SNPs_QCd_filtered.dosage";
snps_location_file_name = "../HDL_significant_SNPs_locations_QCd.txt";
# Gene expression file name
expression_file_name = "../../Adipose\ expression\ data/FINAL_logTPMs_and_activities/Filtered_Eurobats_adipose_qnorm_INT_logTPMs_for_4213_regulators.txt";
gene_location_file_name = "../../Adipose\ expression\ data/FINAL_logTPMs_and_activities/Hg19_gene_map_for_13776_expressed_genes_in_Eurobats_adipose.map";
# Covariates file name
# Set to character() for no covariates
covariates_file_name = "../Filtered_Eurobats_adipose_covars_no_PEER.txt";
# Output file name
output_file_name_cis = tempfile();
output_file_name_tra = tempfile();
# Only associations significant at this level will be saved
pvOutputThreshold_cis = 1;
pvOutputThreshold_tra = 1e-08;
# Error covariance matrix
# Set to numeric() for identity.
errorCovariance = numeric();
# errorCovariance = read.table("Sample_Data/errorCovariance.txt");
# Distance for local gene-SNP pairs
cisDist = 1e6;
## Load gene expression data
gene = SlicedData$new();
gene$fileDelimiter = "\t"; # the TAB character
gene$fileOmitCharacters = "NA"; # denote missing values;
gene$fileSkipRows = 1; # one row of column labels
gene$fileSkipColumns = 1; # one column of row labels
gene$fileSliceSize = 2000; # read file in slices of 2,000 rows
gene$LoadFile(expression_file_name);
## Load genotype data
snps = SlicedData$new();
snps$fileDelimiter = "\t"; # the TAB character
snps$fileOmitCharacters = "NA"; # denote missing values;
snps$fileSkipRows = 1; # one row of column labels
snps$fileSkipColumns = 1; # one column of row labels
snps$fileSliceSize = 2000; # read file in slices of 2,000 rows
snps$LoadFile(SNP_file_name);
snps$ColumnSubsample(na.omit(match(gene$columnNames,snps$columnNames)));
# Subset the expression data
gene$ColumnSubsample(na.omit(match(snps$columnNames,gene$columnNames)));
## Load covariates
cvrt = SlicedData$new();
cvrt$fileDelimiter = "\t"; # the TAB character
cvrt$fileOmitCharacters = "NA"; # denote missing values;
cvrt$fileSkipRows = 1; # one row of column labels
cvrt$fileSkipColumns = 1; # one column of row labels
if(length(covariates_file_name)>0) {
cvrt$LoadFile(covariates_file_name);
}
cvrt$ColumnSubsample(na.omit(match(gene$columnNames,cvrt$columnNames)));
## Check that samples are properly filtered and ordered between expression, genotype and covariate data. If not, quit!
if(!(all(gene$columnNames==snps$columnNames) & all(gene$columnNames==cvrt$columnNames))){
print("Samples do not match between input data!!!")
q(save="no")
}
## Normal quantile transformation of gene expression data
for( sl in 1:length(gene) ) {
mat = gene[[sl]];
mat = t(apply(mat, 1, rank, ties.method = "average"));
mat = qnorm(mat / (ncol(gene)+1));
gene[[sl]] = mat;
}
rm(sl, mat);
## Run the analysis
snpspos = read.table(snps_location_file_name, header = TRUE, stringsAsFactors = FALSE);
genepos = read.table(gene_location_file_name, header = TRUE, stringsAsFactors = FALSE);
me = Matrix_eQTL_main(
snps = snps,
gene = gene,
cvrt = cvrt,
output_file_name = output_file_name_tra,
pvOutputThreshold = pvOutputThreshold_tra,
useModel = useModel,
errorCovariance = errorCovariance,
verbose = TRUE,
output_file_name.cis = output_file_name_cis,
pvOutputThreshold.cis = pvOutputThreshold_cis,
snpspos = snpspos,
genepos = genepos,
cisDist = cisDist,
pvalue.hist = "qqplot",
min.pv.by.genesnp = FALSE,
noFDRsaveMemory = FALSE);
unlink(output_file_name_tra);
unlink(output_file_name_cis);
## Results:
cat('Analysis done in: ', me$time.in.sec, ' seconds', '\n');
cat('Detected local eQTLs:', '\n');
cis_eqtls<-me$cis$eqtls
cat('Detected distant eQTLs:', '\n');
trans_eqtls<-me$trans$eqtls
## Plot the Q-Q plot of local and distant p-values
jpeg("Eurobats_adipose_HDL_sig_eQTLs_from_INT_logTPM_of_4213_regulators_plot.jpg")
plot(me)
dev.off()
write.table(cis_eqtls,"Eurobats_adipose_HDL_sig_cis-eQTLs_from_INT_logTPM_of_4213_regulators.txt",sep="\t",quote = FALSE,row.names=FALSE)
write.table(trans_eqtls,"Eurobats_adipose_HDL_sig_trans-eQTLs_from_INT_logTPM_of_4213_regulators.txt",sep="\t",quote = FALSE,row.names=FALSE)
q(save="no") |
# Lexical vs Dynamic Scoping
# R uses Lexical Scoping
# y as defined in the global enviroment
y<-10
f<-function(x){
y<-2
y^2+g(x)
# Note that both y and g are free variables.
# the y here is defined within the function
# g is defined outside of the function f
}
g<-function(x){
x*y
# the free variable is defined in the global enviroment (i.e., 10)
}
# If we use other languages with dynamic scoping, the y value
# in the g function is looked up in the enviroment from which the
# function was called (which is within f function). In that case,
# y value is g(x) would be 2 as defined in f function.
| /wk3/Scoping_Lexical_vs_Dynamic.r | no_license | chriszeng8/R_Programming | R | false | false | 621 | r | # Lexical vs Dynamic Scoping
# R uses Lexical Scoping
# y as defined in the global enviroment
y<-10
f<-function(x){
y<-2
y^2+g(x)
# Note that both y and g are free variables.
# the y here is defined within the function
# g is defined outside of the function f
}
g<-function(x){
x*y
# the free variable is defined in the global enviroment (i.e., 10)
}
# If we use other languages with dynamic scoping, the y value
# in the g function is looked up in the enviroment from which the
# function was called (which is within f function). In that case,
# y value is g(x) would be 2 as defined in f function.
|
data(iris)
d=iris
head(d)
names(d)
d=d[,1:4]
names(d)=c("SL","SW","PL","PW")
#hotelsters T^2 test
n=150
muo=c(4,5,10,2) #mu guess
xbar=colMeans(d)
S=var(d)
T2=n*t(xbar-muo)%*%solve(S)%*%(xbar-muo)
Ft=(n-1)*4/(n-4)*qf(.95,3,n-4)
#if they fail, then must do 4 univariate t-tests at alpha/4
#CR
tq=qt(1-(.05/(2*4)),n-1)
c1=c(xbar[1]-tq*S[1,1]/sqrt(n),xbar[1]+tq*S[1,1]/sqrt(n))
c2=c(xbar[2]-tq*S[2,2]/sqrt(n),xbar[2]+tq*S[2,2]/sqrt(n))
c3=c(xbar[3]-tq*S[3,3]/sqrt(n),xbar[3]+tq*S[3,3]/sqrt(n))
c4=c(xbar[4]-tq*S[4,4]/sqrt(n),xbar[4]+tq*S[4,4]/sqrt(n))
c=c(c1,c2,c3,c4)
c
# PG.276 in book
# this is two of the same sample studied at different labs
# can use paired design
setwd("/home/evan/Downloads")
d=read.table("T6-1.dat")
d
names(d)=c("b1","s1","b2","s2")
boxplot(d)
diff=d[,3:4] - d[,1:2]
dim=dim(diff)
n=dim[1]
p=dim[2]
d_bar=colMeans(diff)
S=var(diff)
T2=n*t(d_bar)%*%solve(S)%*%d_bar
T2
(n-1)*p/(n-p)*qf(.95,p,n-p)
t.test(d$b1,d$b2,paired = TRUE)
t.test(d$s1,d$s2,paired = TRUE)
library(MVN)
mvn(diff,mvnTest = "royston")
#_________________ANOVA TESTING____________
data(iris)
d=iris
head(d)
names(d)
names(d)=c("SL","SW","PL","PW","SPEC")
library(MVN)
summary(aov(d$SW~d$SPEC))
pairwise.t.test(d$SW,d$SPEC,p.adj="bonf")
#^^all p-vals < .05, then all three species have diferent SW
#now by hand, still SW
x_bar=colMeans(d[,1:4])
x_barbar=x_bar[2]
xbar=tapply(d$SW,d$SPEC,mean)
tapply(d$SW,d$SPEC,length)
v1=d$SW-x_bar
#instead of tapply, maybe rep(,c(50,50,50))
v1 = as.vector(rep(xbar-x_barbar,c(50,50,50)))
v2 = as.vector(d$SW-rep(xbar,tapply(d$SW,d$SPEC,length)))
SStr=t(v1)%*%v1
SSres=t(v2)%*%v2
F=((SStr/(3-1))/(SSres/(150-3)))
pval=1-pf(F,3-1,150-3)
##_______________profile testing____________
#Pg.325
x1bar=c(6.8,7,3.9,4.7)
x2bar=c(6.6,7,4,4.5)
n1=30
n2=30
Spool=matrix(c(.606,.262,.066,.161,
.262,.637,.173,.143,
.066,.173,.81,.029,
.161,143,.029,.306),ncol=4)
plot.ts(x1bar)
plot.ts(x2bar)
C=matrix(c(-1,1,0,0,
0,-1,1,0,
0,0,-1,1),byrow=T,ncol=4)
#parrallel testing
T1=t(x1bar-x2bar)%*%t(C)%*%solve(C%*%Spool%*%t(C)*((1/n1)+(1/n2)))%*%C%*%(x1bar-x2bar)
T1
p=4
critValue=(n1+n2-2)*(p-1)/(n1+n2-p)*qf(.95,p-1,n1+n2-p)
#Coincidence testing
one=rep(1,4)
T2=t(one)%*%(x1bar-x2bar)/sqrt((1/n1 + 1/n2)*t(one)%*%Spool%*%one)
critval=qt(.975,n1+n2-2)
#_______________MANOVA____________________
library(MVN)
data(iris)
d=iris
head(d)
names(d)
#d=d[,1:4]
names(d)=c("SL","SW","PL","PW","SPEC")
#need to check homogeniety of variances of the 3 species, and MVN
s1=d[which(d$SPEC=="setosa"),1:3]
s2=d[which(d$SPEC=="versicolor"),1:3]
s3=d[which(d$SPEC=="virginica"),1:3]
mvn(s1,mvnTest = "royston")
mvn(s1,mvnTest = "mardia")
mvn(s1,mvnTest = "hz")
mvn(s2,mvnTest = "royston")
mvn(s2,mvnTest = "mardia")
mvn(s2,mvnTest = "hz")
mvn(s3,mvnTest = "royston")
mvn(s3,mvnTest = "mardia")
mvn(s3,mvnTest = "hz")
p=3
g=3
n=rep(50,3)
u=(sum(1/(n-1))-1/sum(n-1))*(2*p^2+3*p-1)/(6(p+1)*(g-1))
S1=var(s1)
S2=var(s2)
S3=var(s3)
Spooled=(S1+S2+S3)/3
D=c(det(S1),det(S2),det(S3))
M=sum(n-1)*log(det(Spooled))-sum((n-1)*log(D))
C=(1-u)*M
cr.value=qchisq(.95,p*(p+1)*(g-1)/2)
#___________________EXTRACREDIT_______________
setwd("D:\\Mathemagic\\Fall2019\\CS555_Multivar_Stats\\ExtraCerdit")
d=read.table('T6-6.dat')
d1=read.table('T6-5.dat')
summary(d)
summary(d1)
head(d)
library(MVN)
mvn(d1,mvnTest = "mardia")
mvn(d,mvnTest = "mardia")
S1=var(d)
S2=var(d1)
x1bar=colMeans(d)
x2bar=colMeans(d1)
g=2
N1=15
N2=16
p=4
q=2
#Spoold1=(1/(N1-g))*((N1-1)*S1+(N2-1)*S2)
Spoold1=(1/(N1+N2-8))*((N1-1)*S1+(N2-1)*S2)
#Spoold=(1/(N2-g))*((N1-1)*S1+(N2-1)*S2)
B=matrix(c(1,1,1,
1,2,4,
1,3,9,
1,4,16),byrow=T,ncol=3)
W1=(N1+N2-8)*S1
W2=(N2+N2-8)*S2
W1
#LINEAR MODELING
library(MASS)
d=Cars93
d$MPG=(d$MPG.city+d$MPG.highway)/2
head(d)
rm=pmatch(c('Manufacturer',"Model","MPG.city","MPG.highway","Make"),names(d))
d=d[,-rm]
str(d)
summary(lm(MPG~Type,data=d))
# all:
# lm(formula = MPG ~ Type, data = d)
#
# Residuals:
# Min 1Q Median 3Q Max
# -7.1667 -1.7812 -0.4444 1.2188 15.3333
#
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 26.2813 0.8677 30.288 < 2e-16 ***
# TypeLarge -3.7358 1.3594 -2.748 0.00729 **
# TypeMidsize -3.1449 1.1404 -2.758 0.00709 **
# TypeSmall 6.3854 1.1518 5.544 3.12e-07 ***
# TypeSporty -0.9955 1.2702 -0.784 0.43530
# TypeVan -6.8368 1.4462 -4.728 8.69e-06 ***
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#
# Residual standard error: 3.471 on 87 degrees of freedom
# Multiple R-squared: 0.6091, Adjusted R-squared: 0.5867
# F-statistic: 27.12 on 5 and 87 DF, p-value: < 2.2e-16
#average 26 MPG for compact, type Large is -3.37 less mpg than compact, etc
d$Type=relevel(d$Type,ref="Small")
summary(lm(MPG~Type,data=d))
summary(lm(MPG~Weight,data=d))
#mpg vs weight, negatively correlated by -.007
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 49.3248593 1.6323248 30.22 <2e-16 ***
# Weight -0.0076797 0.0005218 -14.72 <2e-16 ***
d=d[,-c(19,20)]
base.model=lm(MPG~ ., data=d)
summary(base.model)
final.model=stepAIC(base.model)
summary(final.model)
plot(final.model)
| /MultivarTesting2.R | no_license | Ewalk21/CS_555_Mulitvariate_Statistics | R | false | false | 5,309 | r | data(iris)
d=iris
head(d)
names(d)
d=d[,1:4]
names(d)=c("SL","SW","PL","PW")
#hotelsters T^2 test
n=150
muo=c(4,5,10,2) #mu guess
xbar=colMeans(d)
S=var(d)
T2=n*t(xbar-muo)%*%solve(S)%*%(xbar-muo)
Ft=(n-1)*4/(n-4)*qf(.95,3,n-4)
#if they fail, then must do 4 univariate t-tests at alpha/4
#CR
tq=qt(1-(.05/(2*4)),n-1)
c1=c(xbar[1]-tq*S[1,1]/sqrt(n),xbar[1]+tq*S[1,1]/sqrt(n))
c2=c(xbar[2]-tq*S[2,2]/sqrt(n),xbar[2]+tq*S[2,2]/sqrt(n))
c3=c(xbar[3]-tq*S[3,3]/sqrt(n),xbar[3]+tq*S[3,3]/sqrt(n))
c4=c(xbar[4]-tq*S[4,4]/sqrt(n),xbar[4]+tq*S[4,4]/sqrt(n))
c=c(c1,c2,c3,c4)
c
# PG.276 in book
# this is two of the same sample studied at different labs
# can use paired design
setwd("/home/evan/Downloads")
d=read.table("T6-1.dat")
d
names(d)=c("b1","s1","b2","s2")
boxplot(d)
diff=d[,3:4] - d[,1:2]
dim=dim(diff)
n=dim[1]
p=dim[2]
d_bar=colMeans(diff)
S=var(diff)
T2=n*t(d_bar)%*%solve(S)%*%d_bar
T2
(n-1)*p/(n-p)*qf(.95,p,n-p)
t.test(d$b1,d$b2,paired = TRUE)
t.test(d$s1,d$s2,paired = TRUE)
library(MVN)
mvn(diff,mvnTest = "royston")
#_________________ANOVA TESTING____________
data(iris)
d=iris
head(d)
names(d)
names(d)=c("SL","SW","PL","PW","SPEC")
library(MVN)
summary(aov(d$SW~d$SPEC))
pairwise.t.test(d$SW,d$SPEC,p.adj="bonf")
#^^all p-vals < .05, then all three species have diferent SW
#now by hand, still SW
x_bar=colMeans(d[,1:4])
x_barbar=x_bar[2]
xbar=tapply(d$SW,d$SPEC,mean)
tapply(d$SW,d$SPEC,length)
v1=d$SW-x_bar
#instead of tapply, maybe rep(,c(50,50,50))
v1 = as.vector(rep(xbar-x_barbar,c(50,50,50)))
v2 = as.vector(d$SW-rep(xbar,tapply(d$SW,d$SPEC,length)))
SStr=t(v1)%*%v1
SSres=t(v2)%*%v2
F=((SStr/(3-1))/(SSres/(150-3)))
pval=1-pf(F,3-1,150-3)
##_______________profile testing____________
#Pg.325
x1bar=c(6.8,7,3.9,4.7)
x2bar=c(6.6,7,4,4.5)
n1=30
n2=30
Spool=matrix(c(.606,.262,.066,.161,
.262,.637,.173,.143,
.066,.173,.81,.029,
.161,143,.029,.306),ncol=4)
plot.ts(x1bar)
plot.ts(x2bar)
C=matrix(c(-1,1,0,0,
0,-1,1,0,
0,0,-1,1),byrow=T,ncol=4)
#parrallel testing
T1=t(x1bar-x2bar)%*%t(C)%*%solve(C%*%Spool%*%t(C)*((1/n1)+(1/n2)))%*%C%*%(x1bar-x2bar)
T1
p=4
critValue=(n1+n2-2)*(p-1)/(n1+n2-p)*qf(.95,p-1,n1+n2-p)
#Coincidence testing
one=rep(1,4)
T2=t(one)%*%(x1bar-x2bar)/sqrt((1/n1 + 1/n2)*t(one)%*%Spool%*%one)
critval=qt(.975,n1+n2-2)
#_______________MANOVA____________________
library(MVN)
data(iris)
d=iris
head(d)
names(d)
#d=d[,1:4]
names(d)=c("SL","SW","PL","PW","SPEC")
#need to check homogeniety of variances of the 3 species, and MVN
s1=d[which(d$SPEC=="setosa"),1:3]
s2=d[which(d$SPEC=="versicolor"),1:3]
s3=d[which(d$SPEC=="virginica"),1:3]
mvn(s1,mvnTest = "royston")
mvn(s1,mvnTest = "mardia")
mvn(s1,mvnTest = "hz")
mvn(s2,mvnTest = "royston")
mvn(s2,mvnTest = "mardia")
mvn(s2,mvnTest = "hz")
mvn(s3,mvnTest = "royston")
mvn(s3,mvnTest = "mardia")
mvn(s3,mvnTest = "hz")
p=3
g=3
n=rep(50,3)
u=(sum(1/(n-1))-1/sum(n-1))*(2*p^2+3*p-1)/(6(p+1)*(g-1))
S1=var(s1)
S2=var(s2)
S3=var(s3)
Spooled=(S1+S2+S3)/3
D=c(det(S1),det(S2),det(S3))
M=sum(n-1)*log(det(Spooled))-sum((n-1)*log(D))
C=(1-u)*M
cr.value=qchisq(.95,p*(p+1)*(g-1)/2)
#___________________EXTRACREDIT_______________
setwd("D:\\Mathemagic\\Fall2019\\CS555_Multivar_Stats\\ExtraCerdit")
d=read.table('T6-6.dat')
d1=read.table('T6-5.dat')
summary(d)
summary(d1)
head(d)
library(MVN)
mvn(d1,mvnTest = "mardia")
mvn(d,mvnTest = "mardia")
S1=var(d)
S2=var(d1)
x1bar=colMeans(d)
x2bar=colMeans(d1)
g=2
N1=15
N2=16
p=4
q=2
#Spoold1=(1/(N1-g))*((N1-1)*S1+(N2-1)*S2)
Spoold1=(1/(N1+N2-8))*((N1-1)*S1+(N2-1)*S2)
#Spoold=(1/(N2-g))*((N1-1)*S1+(N2-1)*S2)
B=matrix(c(1,1,1,
1,2,4,
1,3,9,
1,4,16),byrow=T,ncol=3)
W1=(N1+N2-8)*S1
W2=(N2+N2-8)*S2
W1
#LINEAR MODELING
library(MASS)
d=Cars93
d$MPG=(d$MPG.city+d$MPG.highway)/2
head(d)
rm=pmatch(c('Manufacturer',"Model","MPG.city","MPG.highway","Make"),names(d))
d=d[,-rm]
str(d)
summary(lm(MPG~Type,data=d))
# all:
# lm(formula = MPG ~ Type, data = d)
#
# Residuals:
# Min 1Q Median 3Q Max
# -7.1667 -1.7812 -0.4444 1.2188 15.3333
#
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 26.2813 0.8677 30.288 < 2e-16 ***
# TypeLarge -3.7358 1.3594 -2.748 0.00729 **
# TypeMidsize -3.1449 1.1404 -2.758 0.00709 **
# TypeSmall 6.3854 1.1518 5.544 3.12e-07 ***
# TypeSporty -0.9955 1.2702 -0.784 0.43530
# TypeVan -6.8368 1.4462 -4.728 8.69e-06 ***
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#
# Residual standard error: 3.471 on 87 degrees of freedom
# Multiple R-squared: 0.6091, Adjusted R-squared: 0.5867
# F-statistic: 27.12 on 5 and 87 DF, p-value: < 2.2e-16
#average 26 MPG for compact, type Large is -3.37 less mpg than compact, etc
d$Type=relevel(d$Type,ref="Small")
summary(lm(MPG~Type,data=d))
summary(lm(MPG~Weight,data=d))
#mpg vs weight, negatively correlated by -.007
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 49.3248593 1.6323248 30.22 <2e-16 ***
# Weight -0.0076797 0.0005218 -14.72 <2e-16 ***
d=d[,-c(19,20)]
base.model=lm(MPG~ ., data=d)
summary(base.model)
final.model=stepAIC(base.model)
summary(final.model)
plot(final.model)
|
sprmda <-
function (formula, data, a, eta, fun="Hampel", probp1 = .95, hampelp2 = .975, hampelp3 = .999, probp4=0.01, yweights=TRUE, class=c("regfit", "lda"), prior=c(0.5,0.5), center = "median", scale = "qn", print=FALSE, numit=100, prec=0.01)
## 15/09/01 IH
## Sparse partial robust M regression for discriminant analysis
##
## uses daprpr, snipls, biweight, brokenstick, ldafitfun
##
## Inputs:
## formula .... an object of class formula.
## data ....... a data frame or list which contains the variables given in formula.
## a .......... the number of PRM components to be estimated in the model.
## eta ........ sparsity parameter; value between 0 and 1
## fun ........ an internal weighting function for case weights. Choices are "Hampel" (preferred), "Huber" or "Fair".
## probp1 ..... the 1-alpha value at which to set the first outlier cutoff for the weighting function.
## hampelp2 ... the 1-alpha values for second cutoff. Only applies to fun="Hampel".
## hampelp3 ... the 1-alpha values for third cutoff. Only applies to fun="Hampel".
## yweights ... logical; if TRUE y weights are calculated.
## class ...... type of classification; choices are "regfit" or "lda".
## prior ...... vector of length 2 with proir porbabilities of the groups; only used if class="lda".
## center ..... type of centering of the data in form of a string that matches an R function, e.g. "mean" or "median".
## scale ...... type of scaling for the data in form of a string that matches an R function, e.g. "sd" or "qn" or alternatively "no" for no scaling.
## print ...... logical; if TRUE variables included in the model for each component are printed in the console.
## numit ...... the number of maximal iterations for the convergence of the coefficient estimates.
## prec ....... a value for the precision of estimation of the coefficients.
{
if(!class(formula)=="formula"){formula <- formula(formula)}
if(is.data.frame(data) | is.list(data)){
mt <- terms(formula, data=data)
yname <- dimnames(attr(mt,"factors"))[[1]][1]
ic <- attr(mt, "intercept")
if (ic==0){
data <- tryCatch({data <- cbind(data[,which(colnames(data)==yname)], model.matrix(mt, data))},
error=function(err){
error <- TRUE
return(error)
})
} else{
data <- tryCatch({data <- cbind(data[,which(colnames(data)==yname)],model.matrix(mt, data)[,-1])},
error=function(err){
error <- TRUE
return(error)
})
}
if (is.logical(data)){
stop("Data cannot be matched with formula.")
} else {
colnames(data)[1] <- dimnames(attr(mt,"factors"))[[1]][1]
}
} else {
stop("Wrong data fromat.")
}
if (length(unique(data[,1]))!=2){
stop("Wrong class labels. Only two factor levels or -1 and 1 are allowed.")
}
yorig <- data[,1]
if (sum(unique(data[,1])%in%c(-1,1))<2){
yfac <- as.factor(data[,1])
levels(yfac) <- c("-1", "1")
data[,1] <- as.numeric(as.character(yfac))
}
data <- as.matrix(data)
n <- nrow(data)
q <- ncol(data)
rnames <- rownames(data) # restore original rownames in the end
rownames(data) <- 1:n # 1:n are names of w etc.
p <- q - 1
if(length(a)>1){
warning("Only the first element of a is used.")
a <- a[1]
}
if(a>n|a>p){
stop("The number of components is too large.")
}
if (a<=0){
stop("The number of components has to be positive.")
}
if (length(eta)>1){
warning("Only the first element of eta is used.")
eta <- eta[1]
}
if(eta<0|eta>=1){
stop("eta has to come from the intervall [0,1)")
}
if(!any(fun == c("Hampel", "Huber", "Fair"))){
stop("Invalid weighting function. Choose Hampel, Huber or Fair for parameter fun.")
}
if(probp1>1|probp1<=0){
stop("probp1 is a probability. Choose a value between 0 and 1")
}
if(fun=="Hampel"){
if (!(probp1<hampelp2 & hampelp2<hampelp3 & hampelp3<=1)){
stop("Wrong choise of parameters for Hampel function. Use 0<probp1<hampelp2<hampelp3<=1")
}
}
if (sum(prior)!=1|any(prior<=0)|length(prior)!=2){
stop("Invalid prior probabilities. Choose two values between 0 and 1 with sum 1 for parameter prior.")
}
if(!any(class == c("regfit", "lda"))){
stop("Invalid classification method. Choose regfit or lda for parameter class.")
}
scalet <- scale
if(scale=="no"){scalet <- "qn"}
ind1 <- which(data[,1]==1)
ind2 <- which(data[,1]==-1)
datamcX <- daprpr(data[,-1],center,scale)
datamcy <- daprpr(as.matrix(data[,1]), center.type="mean", scale.type="sd")
datac <- c(attr(datamcy,"Center"), attr(datamcX,"Center"))
datas <- c(attr(datamcy,"Scale"), attr(datamcX,"Scale"))
attr(datac,"Type") <- center
names(datac) <- colnames(data)
names(datas) <- colnames(data)
datamc <- cbind(datamcy, datamcX)
rm(datamcX)
y0 <- data[,1]
ys <- datamc[,1]
n1s <- length(ind1)
n2s <- length(ind2)
zerows <- vector(length=0)
pcx1 <- prcomp(daprpr(data[ind1,-1],center.type=center,scale.type=scalet))
spc1 <- pcx1$sdev^2
spc1 <- spc1/sum(spc1)
relcomp1 <- which(spc1 - brokenstick(min(q-1,n1s)) <=0)[1]-1
if(relcomp1==0){relcomp1 <- 1}
r1 <- covMcd(as.matrix(pcx1$x[,1:relcomp1]))
wx1 <- mahalanobis(as.matrix(pcx1$x[,1:relcomp1]),r1$center, r1$cov)
# wx1 <- sqrt(apply(daprpr(as.matrix(pcx1$x[,1:relcomp1]),center,scalet)^2,1,sum))
wx1 <- wx1/median(wx1) *qchisq(0.5,relcomp1)
pcx2 <- prcomp(daprpr(data[ind2,-1],center.type=center,scale.type=scalet))
spc2 <- pcx2$sdev^2
spc2 <- spc2/sum(spc2)
relcomp2 <- which(spc2 - brokenstick(min(q-1,n2s)) <=0)[1]-1
if(relcomp2==0){relcomp2 <- 1}
r2 <- covMcd(as.matrix(pcx2$x[,1:relcomp2]))
wx2 <- mahalanobis(as.matrix(pcx2$x[,1:relcomp2]),r2$center, r2$cov)
# wx2 <- sqrt(apply(daprpr(as.matrix(pcx2$x[,1:relcomp2]),center,scalet)^2,1,sum))
wx2 <- wx2/median(wx2) *qchisq(0.5,relcomp2)
if(fun=="Fair"){
wx1 <- 1/((1 + abs(wx1/(qchisq(probp1,relcomp1)*2)))^2) # mod: wx/(probct*2) statt wx/probct*2
wx2 <- 1/((1 + abs(wx2/(qchisq(probp1,relcomp2)*2)))^2)
} else if(fun =="Huber") {
probct <- qchisq(probp1,relcomp1)
wx1[which(wx1 <= probct)] <- 1
wx1[which(wx1 > probct)] <- probct/abs(wx1[which(wx1 > probct)])
probct <- qchisq(probp1,relcomp2)
wx2[which(wx2 <= probct)] <- 1
wx2[which(wx2 > probct)] <- probct/abs(wx2[which(wx2 > probct)])
} else if(fun =="Hampel") {
probct <- qchisq(probp1,relcomp1)
hampelb <- qchisq(hampelp2,relcomp1)
hampelr <- qchisq(hampelp3,relcomp1)
wx1[which(wx1 <= probct)] <- 1
wx1[which(wx1 > probct & wx1 <= hampelb)] <- probct/abs(wx1[which(wx1 > probct & wx1 <= hampelb)])
wx1[which(wx1 > hampelb & wx1 <= hampelr)] <- probct*(hampelr-abs(wx1[which(wx1 > hampelb & wx1 <= hampelr)]))/(hampelr -hampelb)*1/abs(wx1[which(wx1 > hampelb & wx1 <= hampelr)])
wx1[which(wx1 > hampelr)] <- 0
probct <- qchisq(probp1,relcomp2)
hampelb <- qchisq(hampelp2,relcomp2)
hampelr <- qchisq(hampelp3,relcomp2)
wx2[which(wx2 <= probct)] <- 1
wx2[which(wx2 > probct & wx2 <= hampelb)] <- probct/abs(wx2[which(wx2 > probct & wx2 <= hampelb)])
wx2[which(wx2 > hampelb & wx2 <= hampelr)] <- probct*(hampelr-abs(wx2[which(wx2 > hampelb & wx2 <= hampelr)]))/(hampelr -hampelb)*1/abs(wx2[which(wx2 > hampelb & wx2 <= hampelr)])
wx2[which(wx2 > hampelr)] <- 0
}
w <- vector(length=n)
w[ind2] <- wx2
w[ind1] <- wx1
names(w) <- 1:n
if(any(w<1e-6)){
w0 <- which(w<1e-6)
w <- replace(w,list=w0,values=1e-6)
we <- w
} else {
we <- w
}
dataw <- as.data.frame(datamc * we)
colnames(dataw) <- colnames(data)
loops <- 1
weold <- 10^-5
difference <- 1
wnorm <- vector(length=0)
while ((difference > prec) && (loops < numit)) {
res.snipls <- snipls(data=dataw,eta,a,print=FALSE)
Tpls <- datamc[,-1]%*%res.snipls$R
cov1 <- covMcd(as.matrix(Tpls[ind1,]))
cov2 <- covMcd(as.matrix(Tpls[ind2,]))
wlist <- int_weight(as.matrix(Tpls[ind1,]), as.matrix(Tpls[ind2,]), ind1, ind2, y0, fun, probp1, hampelp2, hampelp3, probp4, yweights, center1=cov1$center, cov1=cov1$cov,center2=cov2$center,cov2=cov2$cov)
we <- wlist$we
dataw <- as.data.frame(datamc * we)
colnames(dataw) <- colnames(data)
difference <- abs(sum(we^2) - weold)/weold
weold <- sum(we^2)
wnorm <- c(wnorm, difference)
loops <- loops + 1
}
if (difference > prec){
warning(paste("Method did not converge. The scaled difference between norms of the vectors of case weights is ", round(difference, digits=4)))
}
w <- wlist$we
wt <- wlist$wte
if (yweights){
wy <- wlist$wye
} else {
wy <- rep(1,n)
}
res.snipls <- snipls(data=dataw,eta,a,print=FALSE)
b <- coef(res.snipls)
P <- res.snipls$loadings
W <- res.snipls$W
R <- res.snipls$R
Tpls <- datamc[,-1]%*% R
if(print==TRUE){
for(i in 1:a){cat("Variables retained for ",i," latent variables: \n",res.snipls$Vars[[2*(i-1)+1]],"\n")}
}
coef <- datas[1]/datas[2:q]*b
if (class=="lda"){
# LDA im score Raum
ind1 <- which(data[,1]==1)
ind2 <- which(data[,1]==-1)
mt1 <- apply(as.matrix(w[ind1]*Tpls[ind1,]),2,sum)/sum(w[ind1])
mt2 <- apply(as.matrix(w[ind2]*Tpls[ind2,]),2,sum)/sum(w[ind2])
Si <- matrix(0, ncol=a, nrow=a)
for (i in 1:n){
if (i %in% ind1){
Si <- Si + w[i]*matrix(Tpls[i,]-mt1, ncol=1)%*% matrix(Tpls[i,]-mt1, nrow=1)
} else if (i %in% ind2){
Si <- Si + w[i]*matrix(Tpls[i,]-mt2, ncol=1)%*% matrix(Tpls[i,]-mt2, nrow=1)
}
}
covt <- 1/(sum(w)-2)*Si
ldafit <- apply(Tpls, 1, ldafitfun, covt, mt1, mt2, prior)
ldaclass <- (apply(ldafit, 2, which.max)-1.5)*(-2)
ldamod <- list(cov=covt, m1=mt1, m2=mt2)
} else {
ldafit <- NULL
ldaclass <- NULL
ldamod <- NULL
}
if(center=="mean"){
intercept <- mean(data[,1] - as.matrix(data[,2:q])%*%coef)
} else {
intercept <- median(data[,1] - as.matrix(data[,2:q])%*%coef)
}
if(!scale=="no"){
if (center=="mean"){
b0 <- mean(data[,1]- as.matrix(datamc[,-1])%*%b) # ? y wird nicht skaliert
} else {
b0 <- median(data[,1]- as.matrix(datamc[,-1])%*%b) # ?
}
} else {
if (center == "mean") {
b0 <- mean(data[,1] - as.matrix(data[,2:q]) %*% b)
} else {
b0 <- median(data[,1] - as.matrix(data[,2:q]) %*% b)
}
}
yfit <- as.vector(as.matrix(data[,2:q]) %*% coef + intercept)
resid <- as.vector(data[,1] - yfit)
constants <- paste("cutoff1 =",probp1)
cutoff <- probp1
if(fun == "Hampel"){
constants <- c(constants, paste("cutoff2 =",hampelp2), paste("cutoff3 =",hampelp3))
cutoff <- c(cutoff, hampelp2, hampelp3)
}
names(ys) <- rnames
names(y0) <- rnames
names(wy) <- rnames
names(wt) <- rnames
names(w) <- rnames
dimnames(Tpls)[[1]] <- rnames
dimnames(Tpls)[[2]] <- paste0("Comp", 1:(dim(Tpls)[2]))
dimnames(W)[[2]] <- paste0("Comp", 1:(dim(W)[2]))
dimnames(P)[[2]] <- paste0("Comp", 1:(dim(P)[2]))
names(yfit) <- rnames
names(resid) <- rnames
inputs <- list(a=a,eta=eta,formula=formula, fun=fun,constants =cutoff,X0=data[,2:q], Xs=datamc[,-1], ys=ys, y0=y0, center=center,scale=scale, prior=prior)
attr(coef,"Call") <- c("Sparse PRM Regression", paste(a, "component(s)"), paste("eta = ",eta), fun, constants)
attr(b,"Call") <- c("Sparse PRM Regression", paste(a, "component(s)"), paste("eta = ",eta), fun, constants, paste(center,"centering"), paste(scale,"scaling"))
output <- list(scores = Tpls, R=R,loadings = P,
wy = wy, wt = wt, w = w, used.vars=res.snipls$Vars, Yvar = as.vector(res.snipls$Yev), Xvar=as.vector(res.snipls$Xev),
ldamod=ldamod, ldafit=ldafit, ldaclass=ldaclass,
coefficients = coef, intercept = intercept, residuals = resid, fitted.values = yfit,
coefficients.scaled=b, intercept.scaled=b0,
YMeans = datac[1], XMeans = datac[2:q], Yscales = datas[1], Xscales = datas[2:q],
inputs=inputs)
if(class=="lda"){
class(output) <- "sprmda"
} else if (class=="regfit"){
class(output) <- "sprm"
}
return(output)
}
| /R/sprmda.R | no_license | cran/sprm | R | false | false | 11,923 | r | sprmda <-
function (formula, data, a, eta, fun="Hampel", probp1 = .95, hampelp2 = .975, hampelp3 = .999, probp4=0.01, yweights=TRUE, class=c("regfit", "lda"), prior=c(0.5,0.5), center = "median", scale = "qn", print=FALSE, numit=100, prec=0.01)
## 15/09/01 IH
## Sparse partial robust M regression for discriminant analysis
##
## uses daprpr, snipls, biweight, brokenstick, ldafitfun
##
## Inputs:
## formula .... an object of class formula.
## data ....... a data frame or list which contains the variables given in formula.
## a .......... the number of PRM components to be estimated in the model.
## eta ........ sparsity parameter; value between 0 and 1
## fun ........ an internal weighting function for case weights. Choices are "Hampel" (preferred), "Huber" or "Fair".
## probp1 ..... the 1-alpha value at which to set the first outlier cutoff for the weighting function.
## hampelp2 ... the 1-alpha values for second cutoff. Only applies to fun="Hampel".
## hampelp3 ... the 1-alpha values for third cutoff. Only applies to fun="Hampel".
## yweights ... logical; if TRUE y weights are calculated.
## class ...... type of classification; choices are "regfit" or "lda".
## prior ...... vector of length 2 with proir porbabilities of the groups; only used if class="lda".
## center ..... type of centering of the data in form of a string that matches an R function, e.g. "mean" or "median".
## scale ...... type of scaling for the data in form of a string that matches an R function, e.g. "sd" or "qn" or alternatively "no" for no scaling.
## print ...... logical; if TRUE variables included in the model for each component are printed in the console.
## numit ...... the number of maximal iterations for the convergence of the coefficient estimates.
## prec ....... a value for the precision of estimation of the coefficients.
{
if(!class(formula)=="formula"){formula <- formula(formula)}
if(is.data.frame(data) | is.list(data)){
mt <- terms(formula, data=data)
yname <- dimnames(attr(mt,"factors"))[[1]][1]
ic <- attr(mt, "intercept")
if (ic==0){
data <- tryCatch({data <- cbind(data[,which(colnames(data)==yname)], model.matrix(mt, data))},
error=function(err){
error <- TRUE
return(error)
})
} else{
data <- tryCatch({data <- cbind(data[,which(colnames(data)==yname)],model.matrix(mt, data)[,-1])},
error=function(err){
error <- TRUE
return(error)
})
}
if (is.logical(data)){
stop("Data cannot be matched with formula.")
} else {
colnames(data)[1] <- dimnames(attr(mt,"factors"))[[1]][1]
}
} else {
stop("Wrong data fromat.")
}
if (length(unique(data[,1]))!=2){
stop("Wrong class labels. Only two factor levels or -1 and 1 are allowed.")
}
yorig <- data[,1]
if (sum(unique(data[,1])%in%c(-1,1))<2){
yfac <- as.factor(data[,1])
levels(yfac) <- c("-1", "1")
data[,1] <- as.numeric(as.character(yfac))
}
data <- as.matrix(data)
n <- nrow(data)
q <- ncol(data)
rnames <- rownames(data) # restore original rownames in the end
rownames(data) <- 1:n # 1:n are names of w etc.
p <- q - 1
if(length(a)>1){
warning("Only the first element of a is used.")
a <- a[1]
}
if(a>n|a>p){
stop("The number of components is too large.")
}
if (a<=0){
stop("The number of components has to be positive.")
}
if (length(eta)>1){
warning("Only the first element of eta is used.")
eta <- eta[1]
}
if(eta<0|eta>=1){
stop("eta has to come from the intervall [0,1)")
}
if(!any(fun == c("Hampel", "Huber", "Fair"))){
stop("Invalid weighting function. Choose Hampel, Huber or Fair for parameter fun.")
}
if(probp1>1|probp1<=0){
stop("probp1 is a probability. Choose a value between 0 and 1")
}
if(fun=="Hampel"){
if (!(probp1<hampelp2 & hampelp2<hampelp3 & hampelp3<=1)){
stop("Wrong choise of parameters for Hampel function. Use 0<probp1<hampelp2<hampelp3<=1")
}
}
if (sum(prior)!=1|any(prior<=0)|length(prior)!=2){
stop("Invalid prior probabilities. Choose two values between 0 and 1 with sum 1 for parameter prior.")
}
if(!any(class == c("regfit", "lda"))){
stop("Invalid classification method. Choose regfit or lda for parameter class.")
}
scalet <- scale
if(scale=="no"){scalet <- "qn"}
ind1 <- which(data[,1]==1)
ind2 <- which(data[,1]==-1)
datamcX <- daprpr(data[,-1],center,scale)
datamcy <- daprpr(as.matrix(data[,1]), center.type="mean", scale.type="sd")
datac <- c(attr(datamcy,"Center"), attr(datamcX,"Center"))
datas <- c(attr(datamcy,"Scale"), attr(datamcX,"Scale"))
attr(datac,"Type") <- center
names(datac) <- colnames(data)
names(datas) <- colnames(data)
datamc <- cbind(datamcy, datamcX)
rm(datamcX)
y0 <- data[,1]
ys <- datamc[,1]
n1s <- length(ind1)
n2s <- length(ind2)
zerows <- vector(length=0)
pcx1 <- prcomp(daprpr(data[ind1,-1],center.type=center,scale.type=scalet))
spc1 <- pcx1$sdev^2
spc1 <- spc1/sum(spc1)
relcomp1 <- which(spc1 - brokenstick(min(q-1,n1s)) <=0)[1]-1
if(relcomp1==0){relcomp1 <- 1}
r1 <- covMcd(as.matrix(pcx1$x[,1:relcomp1]))
wx1 <- mahalanobis(as.matrix(pcx1$x[,1:relcomp1]),r1$center, r1$cov)
# wx1 <- sqrt(apply(daprpr(as.matrix(pcx1$x[,1:relcomp1]),center,scalet)^2,1,sum))
wx1 <- wx1/median(wx1) *qchisq(0.5,relcomp1)
pcx2 <- prcomp(daprpr(data[ind2,-1],center.type=center,scale.type=scalet))
spc2 <- pcx2$sdev^2
spc2 <- spc2/sum(spc2)
relcomp2 <- which(spc2 - brokenstick(min(q-1,n2s)) <=0)[1]-1
if(relcomp2==0){relcomp2 <- 1}
r2 <- covMcd(as.matrix(pcx2$x[,1:relcomp2]))
wx2 <- mahalanobis(as.matrix(pcx2$x[,1:relcomp2]),r2$center, r2$cov)
# wx2 <- sqrt(apply(daprpr(as.matrix(pcx2$x[,1:relcomp2]),center,scalet)^2,1,sum))
wx2 <- wx2/median(wx2) *qchisq(0.5,relcomp2)
if(fun=="Fair"){
wx1 <- 1/((1 + abs(wx1/(qchisq(probp1,relcomp1)*2)))^2) # mod: wx/(probct*2) statt wx/probct*2
wx2 <- 1/((1 + abs(wx2/(qchisq(probp1,relcomp2)*2)))^2)
} else if(fun =="Huber") {
probct <- qchisq(probp1,relcomp1)
wx1[which(wx1 <= probct)] <- 1
wx1[which(wx1 > probct)] <- probct/abs(wx1[which(wx1 > probct)])
probct <- qchisq(probp1,relcomp2)
wx2[which(wx2 <= probct)] <- 1
wx2[which(wx2 > probct)] <- probct/abs(wx2[which(wx2 > probct)])
} else if(fun =="Hampel") {
probct <- qchisq(probp1,relcomp1)
hampelb <- qchisq(hampelp2,relcomp1)
hampelr <- qchisq(hampelp3,relcomp1)
wx1[which(wx1 <= probct)] <- 1
wx1[which(wx1 > probct & wx1 <= hampelb)] <- probct/abs(wx1[which(wx1 > probct & wx1 <= hampelb)])
wx1[which(wx1 > hampelb & wx1 <= hampelr)] <- probct*(hampelr-abs(wx1[which(wx1 > hampelb & wx1 <= hampelr)]))/(hampelr -hampelb)*1/abs(wx1[which(wx1 > hampelb & wx1 <= hampelr)])
wx1[which(wx1 > hampelr)] <- 0
probct <- qchisq(probp1,relcomp2)
hampelb <- qchisq(hampelp2,relcomp2)
hampelr <- qchisq(hampelp3,relcomp2)
wx2[which(wx2 <= probct)] <- 1
wx2[which(wx2 > probct & wx2 <= hampelb)] <- probct/abs(wx2[which(wx2 > probct & wx2 <= hampelb)])
wx2[which(wx2 > hampelb & wx2 <= hampelr)] <- probct*(hampelr-abs(wx2[which(wx2 > hampelb & wx2 <= hampelr)]))/(hampelr -hampelb)*1/abs(wx2[which(wx2 > hampelb & wx2 <= hampelr)])
wx2[which(wx2 > hampelr)] <- 0
}
w <- vector(length=n)
w[ind2] <- wx2
w[ind1] <- wx1
names(w) <- 1:n
if(any(w<1e-6)){
w0 <- which(w<1e-6)
w <- replace(w,list=w0,values=1e-6)
we <- w
} else {
we <- w
}
dataw <- as.data.frame(datamc * we)
colnames(dataw) <- colnames(data)
loops <- 1
weold <- 10^-5
difference <- 1
wnorm <- vector(length=0)
while ((difference > prec) && (loops < numit)) {
res.snipls <- snipls(data=dataw,eta,a,print=FALSE)
Tpls <- datamc[,-1]%*%res.snipls$R
cov1 <- covMcd(as.matrix(Tpls[ind1,]))
cov2 <- covMcd(as.matrix(Tpls[ind2,]))
wlist <- int_weight(as.matrix(Tpls[ind1,]), as.matrix(Tpls[ind2,]), ind1, ind2, y0, fun, probp1, hampelp2, hampelp3, probp4, yweights, center1=cov1$center, cov1=cov1$cov,center2=cov2$center,cov2=cov2$cov)
we <- wlist$we
dataw <- as.data.frame(datamc * we)
colnames(dataw) <- colnames(data)
difference <- abs(sum(we^2) - weold)/weold
weold <- sum(we^2)
wnorm <- c(wnorm, difference)
loops <- loops + 1
}
if (difference > prec){
warning(paste("Method did not converge. The scaled difference between norms of the vectors of case weights is ", round(difference, digits=4)))
}
w <- wlist$we
wt <- wlist$wte
if (yweights){
wy <- wlist$wye
} else {
wy <- rep(1,n)
}
res.snipls <- snipls(data=dataw,eta,a,print=FALSE)
b <- coef(res.snipls)
P <- res.snipls$loadings
W <- res.snipls$W
R <- res.snipls$R
Tpls <- datamc[,-1]%*% R
if(print==TRUE){
for(i in 1:a){cat("Variables retained for ",i," latent variables: \n",res.snipls$Vars[[2*(i-1)+1]],"\n")}
}
coef <- datas[1]/datas[2:q]*b
if (class=="lda"){
# LDA im score Raum
ind1 <- which(data[,1]==1)
ind2 <- which(data[,1]==-1)
mt1 <- apply(as.matrix(w[ind1]*Tpls[ind1,]),2,sum)/sum(w[ind1])
mt2 <- apply(as.matrix(w[ind2]*Tpls[ind2,]),2,sum)/sum(w[ind2])
Si <- matrix(0, ncol=a, nrow=a)
for (i in 1:n){
if (i %in% ind1){
Si <- Si + w[i]*matrix(Tpls[i,]-mt1, ncol=1)%*% matrix(Tpls[i,]-mt1, nrow=1)
} else if (i %in% ind2){
Si <- Si + w[i]*matrix(Tpls[i,]-mt2, ncol=1)%*% matrix(Tpls[i,]-mt2, nrow=1)
}
}
covt <- 1/(sum(w)-2)*Si
ldafit <- apply(Tpls, 1, ldafitfun, covt, mt1, mt2, prior)
ldaclass <- (apply(ldafit, 2, which.max)-1.5)*(-2)
ldamod <- list(cov=covt, m1=mt1, m2=mt2)
} else {
ldafit <- NULL
ldaclass <- NULL
ldamod <- NULL
}
if(center=="mean"){
intercept <- mean(data[,1] - as.matrix(data[,2:q])%*%coef)
} else {
intercept <- median(data[,1] - as.matrix(data[,2:q])%*%coef)
}
if(!scale=="no"){
if (center=="mean"){
b0 <- mean(data[,1]- as.matrix(datamc[,-1])%*%b) # ? y wird nicht skaliert
} else {
b0 <- median(data[,1]- as.matrix(datamc[,-1])%*%b) # ?
}
} else {
if (center == "mean") {
b0 <- mean(data[,1] - as.matrix(data[,2:q]) %*% b)
} else {
b0 <- median(data[,1] - as.matrix(data[,2:q]) %*% b)
}
}
yfit <- as.vector(as.matrix(data[,2:q]) %*% coef + intercept)
resid <- as.vector(data[,1] - yfit)
constants <- paste("cutoff1 =",probp1)
cutoff <- probp1
if(fun == "Hampel"){
constants <- c(constants, paste("cutoff2 =",hampelp2), paste("cutoff3 =",hampelp3))
cutoff <- c(cutoff, hampelp2, hampelp3)
}
names(ys) <- rnames
names(y0) <- rnames
names(wy) <- rnames
names(wt) <- rnames
names(w) <- rnames
dimnames(Tpls)[[1]] <- rnames
dimnames(Tpls)[[2]] <- paste0("Comp", 1:(dim(Tpls)[2]))
dimnames(W)[[2]] <- paste0("Comp", 1:(dim(W)[2]))
dimnames(P)[[2]] <- paste0("Comp", 1:(dim(P)[2]))
names(yfit) <- rnames
names(resid) <- rnames
inputs <- list(a=a,eta=eta,formula=formula, fun=fun,constants =cutoff,X0=data[,2:q], Xs=datamc[,-1], ys=ys, y0=y0, center=center,scale=scale, prior=prior)
attr(coef,"Call") <- c("Sparse PRM Regression", paste(a, "component(s)"), paste("eta = ",eta), fun, constants)
attr(b,"Call") <- c("Sparse PRM Regression", paste(a, "component(s)"), paste("eta = ",eta), fun, constants, paste(center,"centering"), paste(scale,"scaling"))
output <- list(scores = Tpls, R=R,loadings = P,
wy = wy, wt = wt, w = w, used.vars=res.snipls$Vars, Yvar = as.vector(res.snipls$Yev), Xvar=as.vector(res.snipls$Xev),
ldamod=ldamod, ldafit=ldafit, ldaclass=ldaclass,
coefficients = coef, intercept = intercept, residuals = resid, fitted.values = yfit,
coefficients.scaled=b, intercept.scaled=b0,
YMeans = datac[1], XMeans = datac[2:q], Yscales = datas[1], Xscales = datas[2:q],
inputs=inputs)
if(class=="lda"){
class(output) <- "sprmda"
} else if (class=="regfit"){
class(output) <- "sprm"
}
return(output)
}
|
library(prefeR)
### Name: infer
### Title: A function that estimates the user's underlying utility
### function.
### Aliases: infer
### ** Examples
p <- prefEl(data = data.frame(c(1,0,1), c(0,1,1), c(1,1,1)),
priors = c(Normal(0, 1), Exp(0.5), Flat()))
p$addPref(1 %>% 2)
infer(p, estimate = "recommended")
| /data/genthat_extracted_code/prefeR/examples/infer.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 332 | r | library(prefeR)
### Name: infer
### Title: A function that estimates the user's underlying utility
### function.
### Aliases: infer
### ** Examples
p <- prefEl(data = data.frame(c(1,0,1), c(0,1,1), c(1,1,1)),
priors = c(Normal(0, 1), Exp(0.5), Flat()))
p$addPref(1 %>% 2)
infer(p, estimate = "recommended")
|
library(tidyverse)
library(zeallot)
library(lubridate)
extrafont::loadfonts(device = 'win', quiet = TRUE)
theme_set(theme_minimal())
theme_update(
text = element_text(family = 'Karla'),
title = element_text('Karla', size = 14, color = 'gray20'),
plot.title = ggtext::element_markdown('Karla', face = 'bold', size = 18, color = 'gray20'),
plot.title.position = 'plot',
plot.subtitle = ggtext::element_markdown('Karla', face = 'bold', size = 16, color = 'gray50'),
axis.text = element_text('Karla', size = 14),
axis.title = element_text(size = 14, face = 'bold', hjust = 0.99),
axis.line = element_blank(),
panel.grid.major = element_line(color = 'gray80'),
panel.grid.minor = element_line(color = 'gray80'),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank(),
plot.margin = margin(10, 10, 10, 10),
plot.background = element_rect(fill = '#ffffff', color = NA),
plot.caption = ggtext::element_markdown('Karla', size = 12, color = 'gray20', hjust = 1),
# plot.caption = element_text('Karla', size = 12, color = 'gray20', hjust = 1),
plot.caption.position = 'plot',
plot.tag = ggtext::element_markdown('Karla', size = 12, color = 'gray20', hjust = 0),
plot.tag.position = c(.01, 0.015),
legend.text = element_text(color = 'gray20', size = 12),
# strip.text = element_text(color = 'gray20', size = 14),
# strip.background = element_blank(),
panel.background = element_rect(fill = '#ffffff', color = NA)
)
update_geom_defaults('text', list(family = 'Karla', size = 4))
get_dir_proj <- memoise::memoise({
function() '33-2020_euros_ratings'
})
import_csv <- function(x) {
file.path(dir_proj, sprintf('%s.csv', x)) %>%
read_csv()
}
# made this
import_league_mapping <- function() {
import_csv('league_mapping') %>%
mutate(path = ifelse(!is.na(file_png), file.path('25-202021_soccer_refs', sprintf('%s.png', file_png)), NA_character_)) %>%
select(-file_png)
}
league_mapping <- import_league_mapping()
do_import <- memoise::memoise({
function(col = 'vaep', adjust = FALSE) {
col_sym <- sym(sprintf('%s_p90', col))
leagues_init <-
import_csv('leagues') %>%
rename(league_name = name) %>%
mutate(
# Some manual adjustments prior to using some "auto" name correction (by descending season)
across(
league_name,
~case_when(
country == 'USA' & league_name %>% str_detect('Major League Soccer') ~ 'Major League Soccer',
country == 'China' & league_name %>% str_detect('Super League') ~ 'Super League',
country == 'Spain' & league_name == 'Primera Division' ~ 'LaLiga',
TRUE ~ .x
)
)
)
league_names <-
leagues_init %>%
mutate(
across(league_name, ~str_remove_all(.x, '\\s+[Gg]rp.*'))
) %>%
mutate(n_char = nchar(league_name)) %>%
arrange(league_id, n_char, desc(season)) %>%
group_by(league_id) %>%
filter(row_number() == 1L) %>%
ungroup() %>%
select(-season)
league_names
leagues <- leagues_init %>% select(-league_name) %>% inner_join(league_names)
leagues
teams <- import_csv('teams')
players <-
import_csv('players') %>%
mutate(across(dob, lubridate::date)) %>%
rename(player_id = id, player_name = name)
player_xg <- import_csv('player_xg_by_season') %>% rename(xg = xg_value)
player_ratings <-
import_csv('player_ratings_by_season') %>%
rename(ovaep = offensive_value, dvaep = defensive_value) %>%
mutate(
vaep = ovaep + dvaep
)
# player_ratings %>%
# select(player_id, team_id, league_id, season, gp1 = games_played, mp1 = minutes) %>%
# full_join(
# player_xg %>% select(player_id, team_id, league_id, season, gp2 = games_played, mp2 = minutes)
# ) -> z
# z %>%
# filter(gp1 < gp2)
leagues_n <-
leagues %>%
filter(!(country %in% c('International', 'Europe'))) %>%
filter(season >= 2012) %>%
distinct(country, league_id, league_name, season) %>%
# arrange(country, league_id, league_name, season) %>%
count(country, league_id, league_name, sort = TRUE) %>%
filter(n >= 5L)
leagues_n
age_grps <-
tibble(
from = c(18L, 24L, 27L, 30L),
to = c(24L, 27L, 30L, 36L)
)
age_grps
position_mapping <-
tibble(
position = c('AM', 'FW', 'M', 'DM', 'D'),
position_grp = c('A', 'A', 'M', 'M', 'D')
)
position_mapping
df_init <-
list(
leagues_n %>% select(-n),
teams %>% distinct(),
player_ratings,
player_xg %>% rename(games_played_xg = games_played, minutes_xg = minutes),
players %>%
drop_na(position, dob) %>%
select(player_id, player_name, position, dob) %>%
mutate(across(position, ~str_remove_all(.x, '\\(.*\\)') %>% str_remove_all('\\,.*$'))) %>%
filter(position != 'GK') %>%
inner_join(position_mapping)
) %>%
reduce(inner_join) %>%
ungroup() %>%
mutate(
xg_pm = xg / minutes,
xg_p90 = 90 * xg_pm,
vaep_pm = vaep / minutes,
vaep_p90 = 90 * vaep_pm,
v = !!col_sym,
minutes_cutoff =
case_when(
country %in% c('Europe', 'International') ~ 2 * 90,
TRUE ~ 10 * 90
)
) %>%
filter(minutes >= minutes_cutoff) %>%
# filter(position == 'A') %>%
mutate(age = lubridate::time_length(lubridate::ymd(sprintf('%s-08-01', season)) - dob, 'year') %>% floor() %>% as.integer()) %>%
filter(age >= 18 & age <= 35) %>%
distinct()
df_init
df_init <-
data.table::as.data.table(df_init %>% mutate(age2 = age, age3 = age))[
data.table::as.data.table(age_grps),
on=.(age2 >= from, age3 < to)
] %>%
as_tibble() %>%
unite('age_grp', age2, age3, sep = '<=x<')
df_init
baseline <-
df_init %>%
summarize(
across(c(v), list(baseline = median), .names = '{fn}')
)
baseline_by_pos <-
df_init %>%
group_by(position, age_grp) %>%
summarize(
across(c(v), list(baseline = median), .names = '{fn}')
) %>%
ungroup()
if(!adjust) {
df <-
df_init %>%
mutate(idx = row_number(), v_orig = v) %>%
relocate(idx)
return(list(data = df, adjust = NULL, baseline = baseline, baseline_by_pos = baseline_by_pos))
}
minute_cutoffs <-
df_init %>%
group_by(season, league_id) %>%
summarize(across(minutes, list(cutoff = ~quantile(.x, 0.25)))) %>%
ungroup()
minute_cutoffs
df_filt <-
df_init %>%
# mutate(
# minutes_cutoff =
# case_when(
# country %in% c('Europe', 'International') ~ 6 * 90,
# TRUE ~ 20 * 90
# )
# ) %>%
# filter(minutes >= (20 * 90))
left_join(minute_cutoffs) %>%
filter(minutes >= minutes_cutoff)
estimate_beta <- function(x) {
mu <- mean(x)
var <- var(x)
alpha <- ((1 - mu) / var - 1 / mu) * mu ^ 2
beta <- alpha * (1 / mu - 1)
list(alpha = alpha, beta = beta)
}
lst <- estimate_beta(df_filt[[sprintf('%s_pm', col)]])
lst
df <-
df_init %>%
rename(v_orig = v) %>%
mutate(
idx = row_number(),
v = 90 * (v_orig + lst$alpha) / (minutes + lst$alpha + lst$beta)
) %>%
relocate(idx, v)
df
list(data = df, adjust = lst, baseline = baseline, baseline_by_pos = baseline_by_pos)
}})
do_modify_v_col <- function(data, direct = FALSE) {
if(direct) {
return(
list(
data = data %>% mutate(z = v) %>% relocate(idx, z, v),
agg = NULL,
v_min = NULL
)
)
}
v_min <-
data %>%
drop_na(v) %>%
filter(!is.infinite(v)) %>%
summarize(`_` = min(v)) %>%
pull(`_`)
v_min
data <- data %>% mutate(across(v, ~log(.x + abs(!!v_min) + 0.001)))
agg <-
data %>%
group_by(league_id, league_name, country, season, position_grp, age_grp) %>%
summarize(
n = n(),
across(
v,
list(mean = mean, sd = sd), na.rm = TRUE, .names = '{.fn}'
)
) %>%
ungroup() %>%
filter(n > 1L)
agg
res <-
data %>%
inner_join(agg) %>%
mutate(z = (v - mean) / sd) %>%
relocate(idx, v_orig, v, z, mean, sd)
list(data = res, agg = agg, v_min = v_min)
}
do_plots <- function(data, col = 'vaep', direct = FALSE) {
if(col == 'vaep' & direct) {
lims <- c(0, 1.2)
binwidth <- 0.01
} else if (col == 'vaep' & !direct) {
lims <- c(-3, 3)
binwidth <- 0.1
} else if (col == 'xg' & direct) {
lims <- c(0, 1)
binwidth <- 0.01
}
p1 <-
data %>%
mutate(across(age_grp, factor)) %>%
ggplot() +
aes(x = z, color = age_grp) +
geom_density() +
coord_cartesian(xlim = lims)
p2 <-
data %>%
mutate(across(position_grp, factor)) %>%
ggplot() +
aes(x = z, color = position_grp) +
geom_density() +
coord_cartesian(xlim = lims)
p3 <-
data %>%
ggplot() +
aes(x = z) +
geom_histogram(binwidth = binwidth) +
coord_cartesian(xlim = lims)
list(p1 = p1, p2 = p2, p3 = p3)
}
do_get_data <- function(data, normalize = FALSE, strict = TRUE) {
ids <-
data %>%
distinct(player_id, league_id)
ids
ids_gt1 <-
ids %>%
count(player_id, sort = TRUE) %>%
filter(n > 1L) %>%
select(-n) %>%
inner_join(ids)
ids_gt1
rgx_rename <- '^(season|idx|team_name|league|z)'
ids_gt1_meta <-
ids_gt1 %>%
left_join(
data %>%
select(player_id, player_name, matches('position'), age_grp, country, matches(rgx_rename)) %>%
unite('league', country, league_name, sep = '_') %>%
mutate(across(league, ~str_replace_all(.x, '\\s|[.]', '_') %>% str_replace_all('[_]+', '_'))) %>%
left_join(league_mapping %>% select(-path)) %>%
select(-league) %>%
rename(league = league_lab)
)
# Position is always the same, so can join on it
# df_xg %>% distinct(player_id, player_name, position) %>% count(player_id, player_name, sort = TRUE)
# df_xg %>% distinct(player_id, player_name, season, age_grp) %>% count(player_id, player_name, season, sort = TRUE)
# df_xg %>% distinct(player_id, player_name, season, age_grp) %>% filter(player_name == 'Sergio Ramos')
f_rename <- function(suffix) {
ids_gt1_meta %>%
mutate(dummy = 0) %>%
select(
player_id,
player_name,
position,
position_grp,
age_grp,
matches(rgx_rename),
dummy
) %>%
rename_with(
~ sprintf('%s_%s', .x, suffix),
c(matches(rgx_rename))
)
}
res <-
full_join(
f_rename(1),
f_rename(2)
) %>%
select(-dummy) %>%
filter(league_1 != league_2) %>%
mutate(z_diff = z_1 - z_2)
if(!normalize) {
return(list(data = res, agg = NULL))
}
agg <-
res %>%
group_by(position, age_grp) %>%
summarize(
n = n(),
across(
z_diff,
list(mean = mean, sd = sd, median = median), na.rm = TRUE, .names = '{.fn}'
)
) %>%
ungroup()
agg
df <-
res %>%
rename(z_diff_orig = z_diff) %>%
inner_join(agg) %>%
mutate(z_diff = (z_diff_orig - mean) / sd) %>%
relocate(z_diff_orig, z_diff)
df_filt <- .do_filter_season(df, strict = strict)
list(
data = df_filt,
agg = agg
)
}
extract_coefs <- function(fit) {
fit %>%
broom::tidy() %>%
select(league = term, estimate) %>%
mutate(across(league, ~str_remove_all(.x, '`'))) %>%
arrange(desc(estimate))
}
.do_filter_season <- function(data, strict = TRUE) {
if(!strict) {
return(data)
}
data %>%
filter((season_2 == (season_1 + 1)) | (season_2 == season_1))
}
`%||%` <- function(x, y) {
if (is.null(x)) {
y
} else {
x
}
}
do_fit_dummy <-
function(data,
agg,
baseline,
baseline_by_pos,
col = 'vaep',
suffix = str_remove(deparse(substitute(data)), '^df_')) {
rgx <- 'idx|season|player|team'
res <-
data %>%
# select(player_name, matches('idx'), matches('season'), matches('team'), matches('league'), z_diff) %>%
select(player_name, matches(rgx), matches('league_[12]$'), z_diff) %>%
pivot_longer(-c(matches(rgx), z_diff)) %>%
mutate(across(name, ~str_remove(.x, 'league_') %>% as.integer())) %>%
mutate(across(name, ~if_else(.x == 1L, -1L, 1L))) %>%
pivot_wider(names_from = value, values_from = name, values_fill = 0L) %>%
# Make this the NA coefficient
relocate(matches('Eredivisie'), .after = last_col())
fit <- lm(formula(z_diff ~ .), data = res %>% select(z_diff, matches('\\(')))
coefs <- fit %>% extract_coefs()
rnks <-
coefs %>%
mutate(rnk = row_number(desc(estimate))) %>%
arrange(rnk)
rnks
f_select <- function(suffix, op = 1) {
rnks %>%
mutate(across(league, ~forcats::fct_reorder(.x, op * rnk))) %>%
rename_all(~sprintf('%s_%s', .x, suffix)) %>%
mutate(dummy = 0L)
}
agg_agg <-
agg %>%
mutate(total = sum(n), frac = n / total) %>%
summarize(sd = sum(sd * frac), mean = sum(mean * frac), median = sum(median * frac))
vps_init <-
full_join(
f_select(1, -1),
f_select(2, -1)
) %>%
select(-dummy)
vps_init
vps <-
vps_init %>%
mutate(
vp = agg_agg$sd * ((estimate_1 - estimate_2) + agg_agg$mean),
p = vp / baseline$baseline
)
vps
vps_by_grp <-
vps_init %>%
mutate(dummy = 0) %>%
left_join(agg %>% mutate(dummy = 0)) %>%
select(-dummy) %>%
left_join(baseline_by_pos) %>%
mutate(
vp = sd * ((estimate_1 - estimate_2) + mean),
p = vp / baseline
)
vps_by_grp
.filter <- function(data) {
data %>%
filter(rnk_1 <= rnk_2) %>%
filter(league_2 != '(Intercept)')
}
vps_filt <- vps %>% .filter()
vps_filt
vps_by_grp_filt <- vps_by_grp %>% .filter()
vps_by_grp_filt
subtitle <- 'All Field Positions, Age 18-35'
viz_diff_v <-
vps_filt %>%
plot_heatmap(
which = 'vp',
suffix = suffix,
col = col,
subtitle = subtitle
)
viz_diff_v
viz_diff_rel <-
vps_filt %>%
plot_heatmap(
which = 'p',
suffix = suffix,
col = col,
subtitle = subtitle,
baseline = baseline$baseline
)
viz_diff_rel
.filter_f <- function(data) {
data %>%
filter(position == 'FW', age_grp == '18<=x<24')
}
baseline_f <- baseline_by_pos %>% .filter_f() %>% pull(baseline)
subtitle_f <- 'Forwards, Age 18-24'
vps_filt_f <- vps_by_grp_filt %>% .filter_f()
viz_diff_v_f <-
vps_filt_f %>%
plot_heatmap(
which = 'vp',
suffix = sprintf('%s_fw_young', suffix),
col = col,
subtitle = subtitle_f
)
viz_diff_v_f
viz_diff_rel_f <-
vps_filt_f %>%
plot_heatmap(
which = 'p',
suffix = sprintf('%s_fw_young', suffix),
col = col,
subtitle = subtitle_f,
baseline = baseline_f
)
viz_diff_rel_f
list(
data = res,
fit = fit,
coefs = coefs,
vps = vps,
vps_by_grp = vps_by_grp,
viz_diff_v = viz_diff_v,
viz_diff_rel = viz_diff_rel,
viz_diff_v_f = viz_diff_v_f,
viz_diff_rel_f = viz_diff_rel_f
)
}
pts <- function(x) {
as.numeric(grid::convertUnit(grid::unit(x, 'pt'), 'mm'))
}
lab_tag <- '**Viz** + **Model**: Tony ElHabr | **Data**: @canzhiye'
plot_heatmap <- function(vps_filt, which = 'vp', col = 'vaep', baseline = 0.3, suffix = NULL, subtitle = NULL, sep = '_') {
# file <- sprintf('%s_p90', col)
lab <- sprintf('%s/90', ifelse(col == 'vaep', 'VAEP', 'xG'))
if(which == 'vp') {
f_scale <- scales::number
.option <- 'D'
file <- 'vp' # sprintf('%s_vp', file)
.acc <- 0.01
title <- sprintf('Expected change in %s when transitioning from league A to B', lab)
} else if(which == 'p') {
f_scale <- scales::percent
.option <- 'H'
file <- 'p' # sprintf('%s_p', file)
.acc <- 1
title <- 'Relative increase in competition in league A compared to league B'
subtitle <- sprintf('Using %s baseline of %0.2f%s', lab, baseline, ifelse(is.null(suffix), '', sprintf(', %s', subtitle)))
}
if(!is.null(suffix)) {
suffix <- sprintf('%s%s', sep, suffix)
} else {
suffix <- ''
}
col_sym <- sym(which)
p <-
vps_filt %>%
ggplot() +
aes(x = league_2, y = league_1) +
geom_tile(aes(fill = !!col_sym), alpha = 0.7, height = 0.95, width = 0.95, show.legend = FALSE) +
geom_text(aes(label = f_scale(!!col_sym, accuracy = .acc)), size = pts(14), fontface = 'bold') +
scale_fill_viridis_c(option = .option, begin = 0.1, end = 1) +
scale_x_discrete(labels = function(x) str_wrap(x, width = 12)) +
theme(
plot.title = ggtext::element_markdown(size = 18),
plot.subtitle = ggtext::element_markdown(size = 16),
panel.grid.major.x = element_blank(),
panel.grid.major.y = element_blank(),
axis.text.x = element_text(size = 9),
axis.title.y = element_text(size = 16),
) +
labs(
title = title,
subtitle = subtitle,
tag = lab_tag,
y = 'League A',
x = 'League B'
)
ggsave(
plot = p,
filename = file.path(dir_proj, sprintf('viz_relative_%s%s.png', file, suffix)),
width = 16,
height = 8,
type = 'cairo'
)
p
}
| /33-2020_euros_ratings/helpers.R | no_license | tRackRR/sports_viz | R | false | false | 18,037 | r |
library(tidyverse)
library(zeallot)
library(lubridate)
extrafont::loadfonts(device = 'win', quiet = TRUE)
theme_set(theme_minimal())
theme_update(
text = element_text(family = 'Karla'),
title = element_text('Karla', size = 14, color = 'gray20'),
plot.title = ggtext::element_markdown('Karla', face = 'bold', size = 18, color = 'gray20'),
plot.title.position = 'plot',
plot.subtitle = ggtext::element_markdown('Karla', face = 'bold', size = 16, color = 'gray50'),
axis.text = element_text('Karla', size = 14),
axis.title = element_text(size = 14, face = 'bold', hjust = 0.99),
axis.line = element_blank(),
panel.grid.major = element_line(color = 'gray80'),
panel.grid.minor = element_line(color = 'gray80'),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank(),
plot.margin = margin(10, 10, 10, 10),
plot.background = element_rect(fill = '#ffffff', color = NA),
plot.caption = ggtext::element_markdown('Karla', size = 12, color = 'gray20', hjust = 1),
# plot.caption = element_text('Karla', size = 12, color = 'gray20', hjust = 1),
plot.caption.position = 'plot',
plot.tag = ggtext::element_markdown('Karla', size = 12, color = 'gray20', hjust = 0),
plot.tag.position = c(.01, 0.015),
legend.text = element_text(color = 'gray20', size = 12),
# strip.text = element_text(color = 'gray20', size = 14),
# strip.background = element_blank(),
panel.background = element_rect(fill = '#ffffff', color = NA)
)
update_geom_defaults('text', list(family = 'Karla', size = 4))
get_dir_proj <- memoise::memoise({
function() '33-2020_euros_ratings'
})
import_csv <- function(x) {
file.path(dir_proj, sprintf('%s.csv', x)) %>%
read_csv()
}
# made this
import_league_mapping <- function() {
import_csv('league_mapping') %>%
mutate(path = ifelse(!is.na(file_png), file.path('25-202021_soccer_refs', sprintf('%s.png', file_png)), NA_character_)) %>%
select(-file_png)
}
league_mapping <- import_league_mapping()
do_import <- memoise::memoise({
function(col = 'vaep', adjust = FALSE) {
col_sym <- sym(sprintf('%s_p90', col))
leagues_init <-
import_csv('leagues') %>%
rename(league_name = name) %>%
mutate(
# Some manual adjustments prior to using some "auto" name correction (by descending season)
across(
league_name,
~case_when(
country == 'USA' & league_name %>% str_detect('Major League Soccer') ~ 'Major League Soccer',
country == 'China' & league_name %>% str_detect('Super League') ~ 'Super League',
country == 'Spain' & league_name == 'Primera Division' ~ 'LaLiga',
TRUE ~ .x
)
)
)
league_names <-
leagues_init %>%
mutate(
across(league_name, ~str_remove_all(.x, '\\s+[Gg]rp.*'))
) %>%
mutate(n_char = nchar(league_name)) %>%
arrange(league_id, n_char, desc(season)) %>%
group_by(league_id) %>%
filter(row_number() == 1L) %>%
ungroup() %>%
select(-season)
league_names
leagues <- leagues_init %>% select(-league_name) %>% inner_join(league_names)
leagues
teams <- import_csv('teams')
players <-
import_csv('players') %>%
mutate(across(dob, lubridate::date)) %>%
rename(player_id = id, player_name = name)
player_xg <- import_csv('player_xg_by_season') %>% rename(xg = xg_value)
player_ratings <-
import_csv('player_ratings_by_season') %>%
rename(ovaep = offensive_value, dvaep = defensive_value) %>%
mutate(
vaep = ovaep + dvaep
)
# player_ratings %>%
# select(player_id, team_id, league_id, season, gp1 = games_played, mp1 = minutes) %>%
# full_join(
# player_xg %>% select(player_id, team_id, league_id, season, gp2 = games_played, mp2 = minutes)
# ) -> z
# z %>%
# filter(gp1 < gp2)
leagues_n <-
leagues %>%
filter(!(country %in% c('International', 'Europe'))) %>%
filter(season >= 2012) %>%
distinct(country, league_id, league_name, season) %>%
# arrange(country, league_id, league_name, season) %>%
count(country, league_id, league_name, sort = TRUE) %>%
filter(n >= 5L)
leagues_n
age_grps <-
tibble(
from = c(18L, 24L, 27L, 30L),
to = c(24L, 27L, 30L, 36L)
)
age_grps
position_mapping <-
tibble(
position = c('AM', 'FW', 'M', 'DM', 'D'),
position_grp = c('A', 'A', 'M', 'M', 'D')
)
position_mapping
df_init <-
list(
leagues_n %>% select(-n),
teams %>% distinct(),
player_ratings,
player_xg %>% rename(games_played_xg = games_played, minutes_xg = minutes),
players %>%
drop_na(position, dob) %>%
select(player_id, player_name, position, dob) %>%
mutate(across(position, ~str_remove_all(.x, '\\(.*\\)') %>% str_remove_all('\\,.*$'))) %>%
filter(position != 'GK') %>%
inner_join(position_mapping)
) %>%
reduce(inner_join) %>%
ungroup() %>%
mutate(
xg_pm = xg / minutes,
xg_p90 = 90 * xg_pm,
vaep_pm = vaep / minutes,
vaep_p90 = 90 * vaep_pm,
v = !!col_sym,
minutes_cutoff =
case_when(
country %in% c('Europe', 'International') ~ 2 * 90,
TRUE ~ 10 * 90
)
) %>%
filter(minutes >= minutes_cutoff) %>%
# filter(position == 'A') %>%
mutate(age = lubridate::time_length(lubridate::ymd(sprintf('%s-08-01', season)) - dob, 'year') %>% floor() %>% as.integer()) %>%
filter(age >= 18 & age <= 35) %>%
distinct()
df_init
df_init <-
data.table::as.data.table(df_init %>% mutate(age2 = age, age3 = age))[
data.table::as.data.table(age_grps),
on=.(age2 >= from, age3 < to)
] %>%
as_tibble() %>%
unite('age_grp', age2, age3, sep = '<=x<')
df_init
baseline <-
df_init %>%
summarize(
across(c(v), list(baseline = median), .names = '{fn}')
)
baseline_by_pos <-
df_init %>%
group_by(position, age_grp) %>%
summarize(
across(c(v), list(baseline = median), .names = '{fn}')
) %>%
ungroup()
if(!adjust) {
df <-
df_init %>%
mutate(idx = row_number(), v_orig = v) %>%
relocate(idx)
return(list(data = df, adjust = NULL, baseline = baseline, baseline_by_pos = baseline_by_pos))
}
minute_cutoffs <-
df_init %>%
group_by(season, league_id) %>%
summarize(across(minutes, list(cutoff = ~quantile(.x, 0.25)))) %>%
ungroup()
minute_cutoffs
df_filt <-
df_init %>%
# mutate(
# minutes_cutoff =
# case_when(
# country %in% c('Europe', 'International') ~ 6 * 90,
# TRUE ~ 20 * 90
# )
# ) %>%
# filter(minutes >= (20 * 90))
left_join(minute_cutoffs) %>%
filter(minutes >= minutes_cutoff)
estimate_beta <- function(x) {
mu <- mean(x)
var <- var(x)
alpha <- ((1 - mu) / var - 1 / mu) * mu ^ 2
beta <- alpha * (1 / mu - 1)
list(alpha = alpha, beta = beta)
}
lst <- estimate_beta(df_filt[[sprintf('%s_pm', col)]])
lst
df <-
df_init %>%
rename(v_orig = v) %>%
mutate(
idx = row_number(),
v = 90 * (v_orig + lst$alpha) / (minutes + lst$alpha + lst$beta)
) %>%
relocate(idx, v)
df
list(data = df, adjust = lst, baseline = baseline, baseline_by_pos = baseline_by_pos)
}})
do_modify_v_col <- function(data, direct = FALSE) {
if(direct) {
return(
list(
data = data %>% mutate(z = v) %>% relocate(idx, z, v),
agg = NULL,
v_min = NULL
)
)
}
v_min <-
data %>%
drop_na(v) %>%
filter(!is.infinite(v)) %>%
summarize(`_` = min(v)) %>%
pull(`_`)
v_min
data <- data %>% mutate(across(v, ~log(.x + abs(!!v_min) + 0.001)))
agg <-
data %>%
group_by(league_id, league_name, country, season, position_grp, age_grp) %>%
summarize(
n = n(),
across(
v,
list(mean = mean, sd = sd), na.rm = TRUE, .names = '{.fn}'
)
) %>%
ungroup() %>%
filter(n > 1L)
agg
res <-
data %>%
inner_join(agg) %>%
mutate(z = (v - mean) / sd) %>%
relocate(idx, v_orig, v, z, mean, sd)
list(data = res, agg = agg, v_min = v_min)
}
do_plots <- function(data, col = 'vaep', direct = FALSE) {
if(col == 'vaep' & direct) {
lims <- c(0, 1.2)
binwidth <- 0.01
} else if (col == 'vaep' & !direct) {
lims <- c(-3, 3)
binwidth <- 0.1
} else if (col == 'xg' & direct) {
lims <- c(0, 1)
binwidth <- 0.01
}
p1 <-
data %>%
mutate(across(age_grp, factor)) %>%
ggplot() +
aes(x = z, color = age_grp) +
geom_density() +
coord_cartesian(xlim = lims)
p2 <-
data %>%
mutate(across(position_grp, factor)) %>%
ggplot() +
aes(x = z, color = position_grp) +
geom_density() +
coord_cartesian(xlim = lims)
p3 <-
data %>%
ggplot() +
aes(x = z) +
geom_histogram(binwidth = binwidth) +
coord_cartesian(xlim = lims)
list(p1 = p1, p2 = p2, p3 = p3)
}
do_get_data <- function(data, normalize = FALSE, strict = TRUE) {
ids <-
data %>%
distinct(player_id, league_id)
ids
ids_gt1 <-
ids %>%
count(player_id, sort = TRUE) %>%
filter(n > 1L) %>%
select(-n) %>%
inner_join(ids)
ids_gt1
rgx_rename <- '^(season|idx|team_name|league|z)'
ids_gt1_meta <-
ids_gt1 %>%
left_join(
data %>%
select(player_id, player_name, matches('position'), age_grp, country, matches(rgx_rename)) %>%
unite('league', country, league_name, sep = '_') %>%
mutate(across(league, ~str_replace_all(.x, '\\s|[.]', '_') %>% str_replace_all('[_]+', '_'))) %>%
left_join(league_mapping %>% select(-path)) %>%
select(-league) %>%
rename(league = league_lab)
)
# Position is always the same, so can join on it
# df_xg %>% distinct(player_id, player_name, position) %>% count(player_id, player_name, sort = TRUE)
# df_xg %>% distinct(player_id, player_name, season, age_grp) %>% count(player_id, player_name, season, sort = TRUE)
# df_xg %>% distinct(player_id, player_name, season, age_grp) %>% filter(player_name == 'Sergio Ramos')
f_rename <- function(suffix) {
ids_gt1_meta %>%
mutate(dummy = 0) %>%
select(
player_id,
player_name,
position,
position_grp,
age_grp,
matches(rgx_rename),
dummy
) %>%
rename_with(
~ sprintf('%s_%s', .x, suffix),
c(matches(rgx_rename))
)
}
res <-
full_join(
f_rename(1),
f_rename(2)
) %>%
select(-dummy) %>%
filter(league_1 != league_2) %>%
mutate(z_diff = z_1 - z_2)
if(!normalize) {
return(list(data = res, agg = NULL))
}
agg <-
res %>%
group_by(position, age_grp) %>%
summarize(
n = n(),
across(
z_diff,
list(mean = mean, sd = sd, median = median), na.rm = TRUE, .names = '{.fn}'
)
) %>%
ungroup()
agg
df <-
res %>%
rename(z_diff_orig = z_diff) %>%
inner_join(agg) %>%
mutate(z_diff = (z_diff_orig - mean) / sd) %>%
relocate(z_diff_orig, z_diff)
df_filt <- .do_filter_season(df, strict = strict)
list(
data = df_filt,
agg = agg
)
}
extract_coefs <- function(fit) {
fit %>%
broom::tidy() %>%
select(league = term, estimate) %>%
mutate(across(league, ~str_remove_all(.x, '`'))) %>%
arrange(desc(estimate))
}
.do_filter_season <- function(data, strict = TRUE) {
if(!strict) {
return(data)
}
data %>%
filter((season_2 == (season_1 + 1)) | (season_2 == season_1))
}
`%||%` <- function(x, y) {
if (is.null(x)) {
y
} else {
x
}
}
do_fit_dummy <-
function(data,
agg,
baseline,
baseline_by_pos,
col = 'vaep',
suffix = str_remove(deparse(substitute(data)), '^df_')) {
rgx <- 'idx|season|player|team'
res <-
data %>%
# select(player_name, matches('idx'), matches('season'), matches('team'), matches('league'), z_diff) %>%
select(player_name, matches(rgx), matches('league_[12]$'), z_diff) %>%
pivot_longer(-c(matches(rgx), z_diff)) %>%
mutate(across(name, ~str_remove(.x, 'league_') %>% as.integer())) %>%
mutate(across(name, ~if_else(.x == 1L, -1L, 1L))) %>%
pivot_wider(names_from = value, values_from = name, values_fill = 0L) %>%
# Make this the NA coefficient
relocate(matches('Eredivisie'), .after = last_col())
fit <- lm(formula(z_diff ~ .), data = res %>% select(z_diff, matches('\\(')))
coefs <- fit %>% extract_coefs()
rnks <-
coefs %>%
mutate(rnk = row_number(desc(estimate))) %>%
arrange(rnk)
rnks
f_select <- function(suffix, op = 1) {
rnks %>%
mutate(across(league, ~forcats::fct_reorder(.x, op * rnk))) %>%
rename_all(~sprintf('%s_%s', .x, suffix)) %>%
mutate(dummy = 0L)
}
agg_agg <-
agg %>%
mutate(total = sum(n), frac = n / total) %>%
summarize(sd = sum(sd * frac), mean = sum(mean * frac), median = sum(median * frac))
vps_init <-
full_join(
f_select(1, -1),
f_select(2, -1)
) %>%
select(-dummy)
vps_init
vps <-
vps_init %>%
mutate(
vp = agg_agg$sd * ((estimate_1 - estimate_2) + agg_agg$mean),
p = vp / baseline$baseline
)
vps
vps_by_grp <-
vps_init %>%
mutate(dummy = 0) %>%
left_join(agg %>% mutate(dummy = 0)) %>%
select(-dummy) %>%
left_join(baseline_by_pos) %>%
mutate(
vp = sd * ((estimate_1 - estimate_2) + mean),
p = vp / baseline
)
vps_by_grp
.filter <- function(data) {
data %>%
filter(rnk_1 <= rnk_2) %>%
filter(league_2 != '(Intercept)')
}
vps_filt <- vps %>% .filter()
vps_filt
vps_by_grp_filt <- vps_by_grp %>% .filter()
vps_by_grp_filt
subtitle <- 'All Field Positions, Age 18-35'
viz_diff_v <-
vps_filt %>%
plot_heatmap(
which = 'vp',
suffix = suffix,
col = col,
subtitle = subtitle
)
viz_diff_v
viz_diff_rel <-
vps_filt %>%
plot_heatmap(
which = 'p',
suffix = suffix,
col = col,
subtitle = subtitle,
baseline = baseline$baseline
)
viz_diff_rel
.filter_f <- function(data) {
data %>%
filter(position == 'FW', age_grp == '18<=x<24')
}
baseline_f <- baseline_by_pos %>% .filter_f() %>% pull(baseline)
subtitle_f <- 'Forwards, Age 18-24'
vps_filt_f <- vps_by_grp_filt %>% .filter_f()
viz_diff_v_f <-
vps_filt_f %>%
plot_heatmap(
which = 'vp',
suffix = sprintf('%s_fw_young', suffix),
col = col,
subtitle = subtitle_f
)
viz_diff_v_f
viz_diff_rel_f <-
vps_filt_f %>%
plot_heatmap(
which = 'p',
suffix = sprintf('%s_fw_young', suffix),
col = col,
subtitle = subtitle_f,
baseline = baseline_f
)
viz_diff_rel_f
list(
data = res,
fit = fit,
coefs = coefs,
vps = vps,
vps_by_grp = vps_by_grp,
viz_diff_v = viz_diff_v,
viz_diff_rel = viz_diff_rel,
viz_diff_v_f = viz_diff_v_f,
viz_diff_rel_f = viz_diff_rel_f
)
}
pts <- function(x) {
as.numeric(grid::convertUnit(grid::unit(x, 'pt'), 'mm'))
}
lab_tag <- '**Viz** + **Model**: Tony ElHabr | **Data**: @canzhiye'
plot_heatmap <- function(vps_filt, which = 'vp', col = 'vaep', baseline = 0.3, suffix = NULL, subtitle = NULL, sep = '_') {
# file <- sprintf('%s_p90', col)
lab <- sprintf('%s/90', ifelse(col == 'vaep', 'VAEP', 'xG'))
if(which == 'vp') {
f_scale <- scales::number
.option <- 'D'
file <- 'vp' # sprintf('%s_vp', file)
.acc <- 0.01
title <- sprintf('Expected change in %s when transitioning from league A to B', lab)
} else if(which == 'p') {
f_scale <- scales::percent
.option <- 'H'
file <- 'p' # sprintf('%s_p', file)
.acc <- 1
title <- 'Relative increase in competition in league A compared to league B'
subtitle <- sprintf('Using %s baseline of %0.2f%s', lab, baseline, ifelse(is.null(suffix), '', sprintf(', %s', subtitle)))
}
if(!is.null(suffix)) {
suffix <- sprintf('%s%s', sep, suffix)
} else {
suffix <- ''
}
col_sym <- sym(which)
p <-
vps_filt %>%
ggplot() +
aes(x = league_2, y = league_1) +
geom_tile(aes(fill = !!col_sym), alpha = 0.7, height = 0.95, width = 0.95, show.legend = FALSE) +
geom_text(aes(label = f_scale(!!col_sym, accuracy = .acc)), size = pts(14), fontface = 'bold') +
scale_fill_viridis_c(option = .option, begin = 0.1, end = 1) +
scale_x_discrete(labels = function(x) str_wrap(x, width = 12)) +
theme(
plot.title = ggtext::element_markdown(size = 18),
plot.subtitle = ggtext::element_markdown(size = 16),
panel.grid.major.x = element_blank(),
panel.grid.major.y = element_blank(),
axis.text.x = element_text(size = 9),
axis.title.y = element_text(size = 16),
) +
labs(
title = title,
subtitle = subtitle,
tag = lab_tag,
y = 'League A',
x = 'League B'
)
ggsave(
plot = p,
filename = file.path(dir_proj, sprintf('viz_relative_%s%s.png', file, suffix)),
width = 16,
height = 8,
type = 'cairo'
)
p
}
|
## The function catches matric inverse value
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(matrx){
if (!identical(matrx, x)){
x <<- matrx
inv <<- NULL
}
}
get <- function(){
x
}
setinverse <- function(inverted){
inv <<- inverted
}
getinverse <- function(){
inv
}
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## The function that resolves the caches value
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if (!is.null(inv)){
return (inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
} | /cachematrix.R | no_license | otkoth/ProgrammingAssignment2 | R | false | false | 795 | r | ## The function catches matric inverse value
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(matrx){
if (!identical(matrx, x)){
x <<- matrx
inv <<- NULL
}
}
get <- function(){
x
}
setinverse <- function(inverted){
inv <<- inverted
}
getinverse <- function(){
inv
}
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## The function that resolves the caches value
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if (!is.null(inv)){
return (inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
} |
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/blacklist_id.R
\name{missing_genotypes}
\alias{missing_genotypes}
\title{Missing genotypes information and blacklist of individuals based
on a threshold of missing genotype.}
\usage{
missing_genotypes(haplotypes.file, whitelist.loci = NULL, pop.id.start,
pop.id.end, missing.geno.threshold)
}
\arguments{
\item{haplotypes.file}{The 'batch_x.haplotypes.tsv' created by STACKS.}
\item{whitelist.loci}{(optional) A whitelist of loci with a column header
'LOCUS'. If the whitelist is written in the directory
\code{whitelist.loci = "whitelist.txt"}. If the whitelist is in
the global environment \code{whitelist.loci = whitelist.1000loci}}
\item{pop.id.start}{The start of your population id
in the name of your individual sample.}
\item{pop.id.end}{The end of your population id
in the name of your individual sample.}
\item{missing.geno.threshold}{(integer) Percentage of missing genotype
allowed per individuals. e.g. for a maximum of 30% of missing genotype
per individuals \code{missing.geno.threshold = 30}.}
}
\value{
a list with 4 data frames: $missing.genotypes.ind,
$missing.genotypes.pop, $blacklisted.id, $plot.missing.
}
\description{
Missing genotypes information summary per individuals and
per population. Create a blacklist of individuals based
on a threshold of missing genotype.
This function accept a whitelist of loci to create a blacklist
of individuals before or after filtering of loci.
Paralogs are automatically removed from STACKS haplotype file.
}
\details{
For the plot, to see the information with the popualtion in
different facet, use \code{+facet_wrap(~POP_ID, nrow=2,ncol=5)}
after the object of the plot, e.g. \code{fig <- missing$plot.missing}, to have
facet by pop \code{fig +facet_wrap(~POP_ID, nrow=2,ncol=5)}
where \code{nrow} and \code{ncol} in this exeample would spread
the 10 populations on 2 rows and 5 columns.
}
\author{
Thierry Gosselin \email{thierrygosselin@icloud.com}
}
| /man/missing_genotypes.Rd | no_license | anne-laureferchaud/stackr | R | false | false | 2,011 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/blacklist_id.R
\name{missing_genotypes}
\alias{missing_genotypes}
\title{Missing genotypes information and blacklist of individuals based
on a threshold of missing genotype.}
\usage{
missing_genotypes(haplotypes.file, whitelist.loci = NULL, pop.id.start,
pop.id.end, missing.geno.threshold)
}
\arguments{
\item{haplotypes.file}{The 'batch_x.haplotypes.tsv' created by STACKS.}
\item{whitelist.loci}{(optional) A whitelist of loci with a column header
'LOCUS'. If the whitelist is written in the directory
\code{whitelist.loci = "whitelist.txt"}. If the whitelist is in
the global environment \code{whitelist.loci = whitelist.1000loci}}
\item{pop.id.start}{The start of your population id
in the name of your individual sample.}
\item{pop.id.end}{The end of your population id
in the name of your individual sample.}
\item{missing.geno.threshold}{(integer) Percentage of missing genotype
allowed per individuals. e.g. for a maximum of 30% of missing genotype
per individuals \code{missing.geno.threshold = 30}.}
}
\value{
a list with 4 data frames: $missing.genotypes.ind,
$missing.genotypes.pop, $blacklisted.id, $plot.missing.
}
\description{
Missing genotypes information summary per individuals and
per population. Create a blacklist of individuals based
on a threshold of missing genotype.
This function accept a whitelist of loci to create a blacklist
of individuals before or after filtering of loci.
Paralogs are automatically removed from STACKS haplotype file.
}
\details{
For the plot, to see the information with the popualtion in
different facet, use \code{+facet_wrap(~POP_ID, nrow=2,ncol=5)}
after the object of the plot, e.g. \code{fig <- missing$plot.missing}, to have
facet by pop \code{fig +facet_wrap(~POP_ID, nrow=2,ncol=5)}
where \code{nrow} and \code{ncol} in this exeample would spread
the 10 populations on 2 rows and 5 columns.
}
\author{
Thierry Gosselin \email{thierrygosselin@icloud.com}
}
|
lsf <- function(package_name) {
package_name = as.character(substitute(package_name))
function_list = paste0('package', ':', package_name)
x = lsf.str(function_list)
y = head(x, n=length(x))
return(y)
}
########### ALT CODE #############################
# lsfunc <- function(package_name) {
#
# function_list = paste0('package', ':', package_name)
# x = ls(getNamespace('package_name'))
# return(x)
# }
| /R/listFunctions.r | no_license | tpopenfoose/milk | R | false | false | 462 | r | lsf <- function(package_name) {
package_name = as.character(substitute(package_name))
function_list = paste0('package', ':', package_name)
x = lsf.str(function_list)
y = head(x, n=length(x))
return(y)
}
########### ALT CODE #############################
# lsfunc <- function(package_name) {
#
# function_list = paste0('package', ':', package_name)
# x = ls(getNamespace('package_name'))
# return(x)
# }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{plot.heatmap}
\alias{plot.heatmap}
\title{Plot a clustered heatmap of samples}
\usage{
\method{plot}{heatmap}(rld, colData, cols.for.grouping)
}
\arguments{
\item{rld}{DESeqTransform object, typically output from running rlog()}
\item{colData}{Dataframe of metadata, used for annotating heatmap}
\item{cols.for.grouping}{Columns in colData to annotate heatmap with}
}
\value{
matrix of sample distances
}
\description{
Plot a clustered heatmap of samples
}
| /lib/lcdbwf/man/plot.heatmap.Rd | no_license | fridells51/lcdb-wf | R | false | true | 553 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{plot.heatmap}
\alias{plot.heatmap}
\title{Plot a clustered heatmap of samples}
\usage{
\method{plot}{heatmap}(rld, colData, cols.for.grouping)
}
\arguments{
\item{rld}{DESeqTransform object, typically output from running rlog()}
\item{colData}{Dataframe of metadata, used for annotating heatmap}
\item{cols.for.grouping}{Columns in colData to annotate heatmap with}
}
\value{
matrix of sample distances
}
\description{
Plot a clustered heatmap of samples
}
|
data<-read.csv("map_data_GRI.csv", header = TRUE, sep = ";",comment.char= "")
inputYears <- colnames(data)[2:length(data)]
colnames(data)[2:length(data)]<-sub(pattern = "X*", replacement = "", x = inputYears)
colnames(data)[1]<-"Country"
minYear <- as.integer(colnames(data)[2])
maxYear <- as.integer(colnames(data)[length(colnames(data))])
shinyUI (pageWithSidebar (
headerPanel ("GRI reports prediction"),
sidebarPanel (
helpText(a("Global reporting initiative (GRI)",
href="https://www.globalreporting.org/Pages/default.aspx",target="_blank"),
"is an organization, which helps companies all over the world to unify
their non-financial reporting in terms of",
a("sustainable development", href = "http://en.wikipedia.org/wiki/Sustainable_development",target="_blank"),
". This app uses pre-processed data, obtained from appropriate",
a("database", href = "http://database.globalreporting.org/search",target="_blank"),
"about numbers of reports, published by companies of different countries in the world.",
br(),
br(),
p("Application shows prediction interval of possible report numbers at the year right
after the last one from the selected interval."),
p("'Prediction' is quite theoretical, just for completing the course project.
It is based on the simpliest linear regression model and, of course, can be improved:)
Enjoy!")),
br(),
selectInput("country",
label = "Please, choose a country",
choices = levels(data$Country),
selected = "United States"),
sliderInput('years', 'Years to show info about',
value = c(minYear, maxYear),
min = minYear, max = maxYear,
step = 1, format = "####")
),
mainPanel (
textOutput("selectedInfo"),
plotOutput("plot"),
strong((textOutput("predictionText")))
)
)
) | /ui.R | no_license | omaksymov/DataProducts_Coursera | R | false | false | 2,020 | r | data<-read.csv("map_data_GRI.csv", header = TRUE, sep = ";",comment.char= "")
inputYears <- colnames(data)[2:length(data)]
colnames(data)[2:length(data)]<-sub(pattern = "X*", replacement = "", x = inputYears)
colnames(data)[1]<-"Country"
minYear <- as.integer(colnames(data)[2])
maxYear <- as.integer(colnames(data)[length(colnames(data))])
shinyUI (pageWithSidebar (
headerPanel ("GRI reports prediction"),
sidebarPanel (
helpText(a("Global reporting initiative (GRI)",
href="https://www.globalreporting.org/Pages/default.aspx",target="_blank"),
"is an organization, which helps companies all over the world to unify
their non-financial reporting in terms of",
a("sustainable development", href = "http://en.wikipedia.org/wiki/Sustainable_development",target="_blank"),
". This app uses pre-processed data, obtained from appropriate",
a("database", href = "http://database.globalreporting.org/search",target="_blank"),
"about numbers of reports, published by companies of different countries in the world.",
br(),
br(),
p("Application shows prediction interval of possible report numbers at the year right
after the last one from the selected interval."),
p("'Prediction' is quite theoretical, just for completing the course project.
It is based on the simpliest linear regression model and, of course, can be improved:)
Enjoy!")),
br(),
selectInput("country",
label = "Please, choose a country",
choices = levels(data$Country),
selected = "United States"),
sliderInput('years', 'Years to show info about',
value = c(minYear, maxYear),
min = minYear, max = maxYear,
step = 1, format = "####")
),
mainPanel (
textOutput("selectedInfo"),
plotOutput("plot"),
strong((textOutput("predictionText")))
)
)
) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/b_reaction_time.R
\name{b_reaction_time}
\alias{b_reaction_time}
\title{b_reaction_time}
\usage{
b_reaction_time(
t,
s,
priors = NULL,
warmup = 1000,
iter = 2000,
chains = 4,
seed = NULL,
refresh = NULL,
control = NULL,
suppress_warnings = TRUE
)
}
\arguments{
\item{t}{a vector containing reaction times for each measurement.}
\item{s}{a vector containing subject indexes. Starting index should be 1 and the largest subject index should equal the number of subjects.}
\item{priors}{List of parameters and their priors - b_prior objects. You can put a prior on the mu_m (mean), sigma_m (variance of mu_m), mu_s (variance), sigma_s (variance of mu_s), mu_l (mean of the exponent factor) and sigma_l (variance of mu_l) parameters (default = NULL).}
\item{warmup}{Integer specifying the number of warmup iterations per chain (default = 1000).}
\item{iter}{Integer specifying the number of iterations (including warmup, default = 2000).}
\item{chains}{Integer specifying the number of parallel chains (default = 4).}
\item{seed}{Random number generator seed (default = NULL).}
\item{refresh}{Frequency of output (default = NULL).}
\item{control}{A named list of parameters to control the sampler's behavior (default = NULL).}
\item{suppress_warnings}{Suppress warnings returned by Stan (default = TRUE).}
}
\value{
An object of class `reaction_time_class`
}
\description{
Bayesian model for comparing reaction times.
}
\examples{
\donttest{
# priors
mu_prior <- b_prior(family="normal", pars=c(0, 100))
sigma_prior <- b_prior(family="uniform", pars=c(0, 500))
lambda_prior <- b_prior(family="uniform", pars=c(0.05, 5))
# attach priors to relevant parameters
priors <- list(c("mu_m", mu_prior),
c("sigma_m", sigma_prior),
c("mu_s", sigma_prior),
c("sigma_s", sigma_prior),
c("mu_l", lambda_prior),
c("sigma_l", sigma_prior))
# generate data
s <- rep(1:5, 20)
rt <- emg::remg(100, mu=10, sigma=1, lambda=0.4)
# fit
fit <- b_reaction_time(t=rt, s=s, priors=priors, chains=1)
}
}
| /man/b_reaction_time.Rd | no_license | bstatcomp/bayes4psy | R | false | true | 2,153 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/b_reaction_time.R
\name{b_reaction_time}
\alias{b_reaction_time}
\title{b_reaction_time}
\usage{
b_reaction_time(
t,
s,
priors = NULL,
warmup = 1000,
iter = 2000,
chains = 4,
seed = NULL,
refresh = NULL,
control = NULL,
suppress_warnings = TRUE
)
}
\arguments{
\item{t}{a vector containing reaction times for each measurement.}
\item{s}{a vector containing subject indexes. Starting index should be 1 and the largest subject index should equal the number of subjects.}
\item{priors}{List of parameters and their priors - b_prior objects. You can put a prior on the mu_m (mean), sigma_m (variance of mu_m), mu_s (variance), sigma_s (variance of mu_s), mu_l (mean of the exponent factor) and sigma_l (variance of mu_l) parameters (default = NULL).}
\item{warmup}{Integer specifying the number of warmup iterations per chain (default = 1000).}
\item{iter}{Integer specifying the number of iterations (including warmup, default = 2000).}
\item{chains}{Integer specifying the number of parallel chains (default = 4).}
\item{seed}{Random number generator seed (default = NULL).}
\item{refresh}{Frequency of output (default = NULL).}
\item{control}{A named list of parameters to control the sampler's behavior (default = NULL).}
\item{suppress_warnings}{Suppress warnings returned by Stan (default = TRUE).}
}
\value{
An object of class `reaction_time_class`
}
\description{
Bayesian model for comparing reaction times.
}
\examples{
\donttest{
# priors
mu_prior <- b_prior(family="normal", pars=c(0, 100))
sigma_prior <- b_prior(family="uniform", pars=c(0, 500))
lambda_prior <- b_prior(family="uniform", pars=c(0.05, 5))
# attach priors to relevant parameters
priors <- list(c("mu_m", mu_prior),
c("sigma_m", sigma_prior),
c("mu_s", sigma_prior),
c("sigma_s", sigma_prior),
c("mu_l", lambda_prior),
c("sigma_l", sigma_prior))
# generate data
s <- rep(1:5, 20)
rt <- emg::remg(100, mu=10, sigma=1, lambda=0.4)
# fit
fit <- b_reaction_time(t=rt, s=s, priors=priors, chains=1)
}
}
|
#' @details Because of the Reporting API structure, this function first requests
#' the report, then checks the reporting queue to see if the report is completed,
#' and when the report returns as "done" pulls the report from the API. This checking process
#' will occur up to the specified number of times (default 120), with a delay between
#' status checks (default 5 seconds). If the report does not return as "done" after the
#' number of tries have completed, the function will return an error message.
#'
#' @description A QueueOvertime report is a report where the only granularity allowed is time. This report allows for a single report suite, time granularity,
#' multiple metrics, and a single segment. It is similar to the "Key Metrics" report or a Custom Event report
#' within the Adobe Reports & Analytics interface. To get a summary report with no time granularity (i.e. a single row),
#' pass an empty string to the date.granularity function parameter.
#'
#' @title Run an Overtime Report
#'
#' @param reportsuite.id Report suite id
#' @param date.from Start date for the report (YYYY-MM-DD)
#' @param date.to End date for the report (YYYY-MM-DD)
#' @param metrics List of metrics to include in the report
#' @param date.granularity Time granularity of the report (year/month/week/day/hour/''), default to 'day'
#' @param segment.id Id(s) of Adobe Analytics segment to retrieve the report for
#' @param segment.inline Inline segment definition
#' @param anomaly.detection Set to TRUE to include forecast data (only valid for day granularity with small date ranges)
#' @param data.current TRUE or FALSE - Whether to include current data for reports that include today's date
#' @param expedite Set to TRUE to expedite the processing of this report
#' @param interval.seconds How long to wait between attempts
#' @param max.attempts Number of API attempts before stopping
#' @param validate Weather to submit report definition for validation before requesting the data.
#'
#' @importFrom jsonlite toJSON unbox
#'
#' @return Data frame
#'
#' @examples
#' \dontrun{
#'
#' overtime1 <- QueueOvertime("your_report_suite",
#' date.from = "2014-04-01",
#' date.to = "2014-04-20",
#' metrics = c("pageviews", "visits", "bounces"),
#' date.granularity = "day")
#'
#' overtime2 <- QueueOvertime("your_report_suite",
#' date.from = "2014-04-01",
#' date.to = "2014-04-20",
#' metrics = c("pageviews", "visits", "bounces"),
#' date.granularity = "day",
#' segment.id = "5433e4e6e4b02df70be4ac63",
#' anomaly.detection = TRUE,
#' interval.seconds = 10,
#' max.attempts = 20)
#'
#' overtime3 <- QueueOvertime("your_report_suite",
#' date.from = "2014-04-01",
#' date.to = "2014-04-20",
#' metrics = c("pageviews", "visits", "bounces"),
#' date.granularity = "")
#' }
#'
#' @export
QueueOvertime <- function(reportsuite.id, date.from, date.to, metrics,
date.granularity='day', segment.id='', segment.inline='', anomaly.detection=FALSE,
data.current=FALSE, expedite=FALSE,interval.seconds=5,max.attempts=120,validate=TRUE) {
# build JSON description
# we have to use unbox to force jsonlist not put strings into single-element arrays
report.description <- c()
report.description$reportDescription <- c(data.frame(matrix(ncol=0, nrow=1)))
report.description$reportDescription$dateFrom <- unbox(date.from)
report.description$reportDescription$dateTo <- unbox(date.to)
report.description$reportDescription$reportSuiteID <- unbox(reportsuite.id)
report.description$reportDescription$dateGranularity <- unbox(date.granularity)
report.description$reportDescription$anomalyDetection <- unbox(anomaly.detection)
report.description$reportDescription$currentData <- unbox(data.current)
report.description$reportDescription$expedite <- unbox(expedite)
#Hack in locale, every method calls ApiRequest so this hopefully works
#Set encoding to utf-8 as well; if someone wanted to do base64 they are out of luck
report.description$reportDescription$locale <- unbox(AdobeAnalytics$SC.Credentials$locale)
report.description$reportDescription$elementDataEncoding <- unbox("utf8")
if(segment.inline!="") {
report.description$reportDescription$segments <- list(segment.inline)
}
report.description$reportDescription$metrics = data.frame(id = metrics)
#If segment is null, apply the standard segment unbox function
if(as.list(segment.id)[1]==''){
report.description$reportDescription$segment_id <- unbox(segment.id)
}
#If segment is not null, treat it like a list of metrics.
else{
report.description$reportDescription$segments <- data.frame( id = segment.id)
}
report.data <- SubmitJsonQueueReport(toJSON(report.description),interval.seconds=interval.seconds,max.attempts=max.attempts,validate=validate)
return(report.data)
}
| /RSiteCatalyst/R/QueueOvertime.R | no_license | ingted/R-Examples | R | false | false | 5,254 | r | #' @details Because of the Reporting API structure, this function first requests
#' the report, then checks the reporting queue to see if the report is completed,
#' and when the report returns as "done" pulls the report from the API. This checking process
#' will occur up to the specified number of times (default 120), with a delay between
#' status checks (default 5 seconds). If the report does not return as "done" after the
#' number of tries have completed, the function will return an error message.
#'
#' @description A QueueOvertime report is a report where the only granularity allowed is time. This report allows for a single report suite, time granularity,
#' multiple metrics, and a single segment. It is similar to the "Key Metrics" report or a Custom Event report
#' within the Adobe Reports & Analytics interface. To get a summary report with no time granularity (i.e. a single row),
#' pass an empty string to the date.granularity function parameter.
#'
#' @title Run an Overtime Report
#'
#' @param reportsuite.id Report suite id
#' @param date.from Start date for the report (YYYY-MM-DD)
#' @param date.to End date for the report (YYYY-MM-DD)
#' @param metrics List of metrics to include in the report
#' @param date.granularity Time granularity of the report (year/month/week/day/hour/''), default to 'day'
#' @param segment.id Id(s) of Adobe Analytics segment to retrieve the report for
#' @param segment.inline Inline segment definition
#' @param anomaly.detection Set to TRUE to include forecast data (only valid for day granularity with small date ranges)
#' @param data.current TRUE or FALSE - Whether to include current data for reports that include today's date
#' @param expedite Set to TRUE to expedite the processing of this report
#' @param interval.seconds How long to wait between attempts
#' @param max.attempts Number of API attempts before stopping
#' @param validate Weather to submit report definition for validation before requesting the data.
#'
#' @importFrom jsonlite toJSON unbox
#'
#' @return Data frame
#'
#' @examples
#' \dontrun{
#'
#' overtime1 <- QueueOvertime("your_report_suite",
#' date.from = "2014-04-01",
#' date.to = "2014-04-20",
#' metrics = c("pageviews", "visits", "bounces"),
#' date.granularity = "day")
#'
#' overtime2 <- QueueOvertime("your_report_suite",
#' date.from = "2014-04-01",
#' date.to = "2014-04-20",
#' metrics = c("pageviews", "visits", "bounces"),
#' date.granularity = "day",
#' segment.id = "5433e4e6e4b02df70be4ac63",
#' anomaly.detection = TRUE,
#' interval.seconds = 10,
#' max.attempts = 20)
#'
#' overtime3 <- QueueOvertime("your_report_suite",
#' date.from = "2014-04-01",
#' date.to = "2014-04-20",
#' metrics = c("pageviews", "visits", "bounces"),
#' date.granularity = "")
#' }
#'
#' @export
QueueOvertime <- function(reportsuite.id, date.from, date.to, metrics,
date.granularity='day', segment.id='', segment.inline='', anomaly.detection=FALSE,
data.current=FALSE, expedite=FALSE,interval.seconds=5,max.attempts=120,validate=TRUE) {
# build JSON description
# we have to use unbox to force jsonlist not put strings into single-element arrays
report.description <- c()
report.description$reportDescription <- c(data.frame(matrix(ncol=0, nrow=1)))
report.description$reportDescription$dateFrom <- unbox(date.from)
report.description$reportDescription$dateTo <- unbox(date.to)
report.description$reportDescription$reportSuiteID <- unbox(reportsuite.id)
report.description$reportDescription$dateGranularity <- unbox(date.granularity)
report.description$reportDescription$anomalyDetection <- unbox(anomaly.detection)
report.description$reportDescription$currentData <- unbox(data.current)
report.description$reportDescription$expedite <- unbox(expedite)
#Hack in locale, every method calls ApiRequest so this hopefully works
#Set encoding to utf-8 as well; if someone wanted to do base64 they are out of luck
report.description$reportDescription$locale <- unbox(AdobeAnalytics$SC.Credentials$locale)
report.description$reportDescription$elementDataEncoding <- unbox("utf8")
if(segment.inline!="") {
report.description$reportDescription$segments <- list(segment.inline)
}
report.description$reportDescription$metrics = data.frame(id = metrics)
#If segment is null, apply the standard segment unbox function
if(as.list(segment.id)[1]==''){
report.description$reportDescription$segment_id <- unbox(segment.id)
}
#If segment is not null, treat it like a list of metrics.
else{
report.description$reportDescription$segments <- data.frame( id = segment.id)
}
report.data <- SubmitJsonQueueReport(toJSON(report.description),interval.seconds=interval.seconds,max.attempts=max.attempts,validate=validate)
return(report.data)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/evaluate-estimated-potential.R
\name{eval_estimated_potential}
\alias{eval_estimated_potential}
\title{Evaluate Estimated Potential}
\usage{
eval_estimated_potential(at, fit, k, i = 1, j = 1)
}
\arguments{
\item{fit}{a group penalised fit object}
\item{k}{which of the estimates to use}
\item{i}{species 1}
\item{j}{species 2 (can be i)}
\item{r}{range over which to evaluate the potential}
}
\description{
Evaluate Estimated Potential
}
| /man/eval_estimated_potential.Rd | no_license | antiphon/PenGE | R | false | true | 520 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/evaluate-estimated-potential.R
\name{eval_estimated_potential}
\alias{eval_estimated_potential}
\title{Evaluate Estimated Potential}
\usage{
eval_estimated_potential(at, fit, k, i = 1, j = 1)
}
\arguments{
\item{fit}{a group penalised fit object}
\item{k}{which of the estimates to use}
\item{i}{species 1}
\item{j}{species 2 (can be i)}
\item{r}{range over which to evaluate the potential}
}
\description{
Evaluate Estimated Potential
}
|
library(deSolve)
# IMPORTANT: UPDATE THE WORKING DIRECTORY BELOW
setwd("UPDATE_PATH/bedbugdisclosure")
source("code/functions.R")
source("code/functions_extra.R")
# 1. Set baseline prevalence (p), renter selectivity (s), # of disclosed compartments (Dnum), and years of simulation
#p <- 0.05
p <- 0.01
s <- 0.5
nyears <- 20
Dnum <- 100
# 2. Set parameter values manually
SetParametersManual <- function(){
gamma <- 1/(6*30)
k <- 0.3
b <- 1.3
m <- 1/(2*365)
n <- 1/(3*30)
N <- 1000
D <- 365
return(c(gamma=gamma, k=k, b=b, m=m, n=n, N=N, D=D))
}
preparam <- SetParametersManual()
# 3. Solve for beta that will give the desired baseline prevalence
beta <- GetBeta(preparam, p)
# 4. Get initial conditions "at equilibrium"...
#...for base model
param <- c(preparam, beta, base.prev=p)
init <- GetInit(param)
y0 <- c(init, 0, 0, 0, 0)
names(y0)[5:8] <- c("Sv2","C_treat","C_turnover","C_vacancy")
#...for DDM
y0_DDM <- c(init, rep(0,Dnum), 0, 0,0)
names(y0_DDM)[5:(5+Dnum-1)] <- rep('Sv2',Dnum)
names(y0_DDM)[(5+Dnum):(5+Dnum+2)] <- c("C_treat","C_turnover","C_vacancy")
# 5. Format parameters and time to input into ode function
pp <- list(beta = beta, gamma = preparam[1], k = preparam[2], b = preparam[3],
m = preparam[4], n = preparam[5], N = preparam[6], D = preparam[7],
d = s)
pp_DDM <- list(beta = beta, gamma = preparam[1], k = preparam[2], b = preparam[3],
m = preparam[4], n = preparam[5], N = preparam[6], D = preparam[7],
d = s, Dnum=Dnum)
t <- seq(from=0, to=365*nyears+1, by=1)
# 6. Run the ode solver function for base and DDM models
out <- ode(y=y0, times = t, func=SetODEs, parms=pp)
outDDM <- ode(y=y0_DDM, times = t, func=SetODEsDDM, parms=pp_DDM)
# 7. Plot the difference in percent infested and percent vacant output by the
# two models
# Proportion infested and vacant according to base model
Iprop <- (out[,3] + out[,5]) / pp$N
Vprop <- (out[,4] + out[,5] + out[,6]) / pp$N
# Proportion of infested and vacant according to DDM
Sv2tot_DDM <- rowSums(outDDM[, 6 : (6 + Dnum - 1)])
Iprop_DDM <- (outDDM[,3] + outDDM[,5]) / pp_DDM$N
Vprop_DDM <- (outDDM[,4] + outDDM[,5] + Sv2tot_DDM) / pp_DDM$N
# Calculate the difference in percentages between the two models
I_diff <- (Iprop_DDM - Iprop)*100
V_diff <- (Vprop_DDM - Vprop)*100
ymin <- min(c(I_diff, V_diff))
ymax <- max(c(I_diff, V_diff))
#pdf("figures_supplement/Routput/sfig_ddm_props_prev5.pdf", width = 3.5, height = 4)
pdf("figures_supplement/Routput/sfig_ddm_props_prev1.pdf", width = 3.5, height = 4)
plot(t/365, I_diff, xlab = "Year", ylab = "Difference in %", type = "l",
col = "red", ylim = c(ymin, ymax), lwd = 2)
lines(t/365, V_diff, col = "blue", lty = 2, lwd = 2)
plot.new()
legend("right", legend=c("Infested","Vacant"), bty="n",
col=c("red","blue"), lty=c(1,2),lwd=c(2, 2), cex=0.8)
dev.off()
| /code/sfigure_ddm_props.R | no_license | sherriexie/bedbugdisclosure | R | false | false | 2,880 | r | library(deSolve)
# IMPORTANT: UPDATE THE WORKING DIRECTORY BELOW
setwd("UPDATE_PATH/bedbugdisclosure")
source("code/functions.R")
source("code/functions_extra.R")
# 1. Set baseline prevalence (p), renter selectivity (s), # of disclosed compartments (Dnum), and years of simulation
#p <- 0.05
p <- 0.01
s <- 0.5
nyears <- 20
Dnum <- 100
# 2. Set parameter values manually
SetParametersManual <- function(){
gamma <- 1/(6*30)
k <- 0.3
b <- 1.3
m <- 1/(2*365)
n <- 1/(3*30)
N <- 1000
D <- 365
return(c(gamma=gamma, k=k, b=b, m=m, n=n, N=N, D=D))
}
preparam <- SetParametersManual()
# 3. Solve for beta that will give the desired baseline prevalence
beta <- GetBeta(preparam, p)
# 4. Get initial conditions "at equilibrium"...
#...for base model
param <- c(preparam, beta, base.prev=p)
init <- GetInit(param)
y0 <- c(init, 0, 0, 0, 0)
names(y0)[5:8] <- c("Sv2","C_treat","C_turnover","C_vacancy")
#...for DDM
y0_DDM <- c(init, rep(0,Dnum), 0, 0,0)
names(y0_DDM)[5:(5+Dnum-1)] <- rep('Sv2',Dnum)
names(y0_DDM)[(5+Dnum):(5+Dnum+2)] <- c("C_treat","C_turnover","C_vacancy")
# 5. Format parameters and time to input into ode function
pp <- list(beta = beta, gamma = preparam[1], k = preparam[2], b = preparam[3],
m = preparam[4], n = preparam[5], N = preparam[6], D = preparam[7],
d = s)
pp_DDM <- list(beta = beta, gamma = preparam[1], k = preparam[2], b = preparam[3],
m = preparam[4], n = preparam[5], N = preparam[6], D = preparam[7],
d = s, Dnum=Dnum)
t <- seq(from=0, to=365*nyears+1, by=1)
# 6. Run the ode solver function for base and DDM models
out <- ode(y=y0, times = t, func=SetODEs, parms=pp)
outDDM <- ode(y=y0_DDM, times = t, func=SetODEsDDM, parms=pp_DDM)
# 7. Plot the difference in percent infested and percent vacant output by the
# two models
# Proportion infested and vacant according to base model
Iprop <- (out[,3] + out[,5]) / pp$N
Vprop <- (out[,4] + out[,5] + out[,6]) / pp$N
# Proportion of infested and vacant according to DDM
Sv2tot_DDM <- rowSums(outDDM[, 6 : (6 + Dnum - 1)])
Iprop_DDM <- (outDDM[,3] + outDDM[,5]) / pp_DDM$N
Vprop_DDM <- (outDDM[,4] + outDDM[,5] + Sv2tot_DDM) / pp_DDM$N
# Calculate the difference in percentages between the two models
I_diff <- (Iprop_DDM - Iprop)*100
V_diff <- (Vprop_DDM - Vprop)*100
ymin <- min(c(I_diff, V_diff))
ymax <- max(c(I_diff, V_diff))
#pdf("figures_supplement/Routput/sfig_ddm_props_prev5.pdf", width = 3.5, height = 4)
pdf("figures_supplement/Routput/sfig_ddm_props_prev1.pdf", width = 3.5, height = 4)
plot(t/365, I_diff, xlab = "Year", ylab = "Difference in %", type = "l",
col = "red", ylim = c(ymin, ymax), lwd = 2)
lines(t/365, V_diff, col = "blue", lty = 2, lwd = 2)
plot.new()
legend("right", legend=c("Infested","Vacant"), bty="n",
col=c("red","blue"), lty=c(1,2),lwd=c(2, 2), cex=0.8)
dev.off()
|
## Getting full dataset
data_full <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), y.intersp=0.4)
plot(Global_reactive_power~Datetime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
})
## Saving to file
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
| /plot4.R | no_license | vikrant-sahu/ExData_Plotting1 | R | false | false | 1,326 | r | ## Getting full dataset
data_full <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), y.intersp=0.4)
plot(Global_reactive_power~Datetime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
})
## Saving to file
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.