content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
## cacheSolve() calculates the inverse of the input matrix. If the inverse already exists
## then it is retrieved from the cached. matrix.
##makeCacheMatrix creates a vector of functions for an input matrix which
## cacheSolve() uses to retrieve an already calculated inverse
## Creates vector to store retrieve and get input matrices and their inverses
makeCacheMatrix <- function(x = matrix(, ncol = 2, nrow = 2)) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(inv_mat) m <<- inv_mat
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Returns the inverse of input matrix. If the inverse already exists in cache then
## that value is returned.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
out_inv=solve(data,...)
x$setinv(out_inv)
out_inv
}
|
/cachematrix.R
|
no_license
|
aneeshsathe/ProgrammingAssignment2
|
R
| false
| false
| 1,215
|
r
|
## cacheSolve() calculates the inverse of the input matrix. If the inverse already exists
## then it is retrieved from the cached. matrix.
##makeCacheMatrix creates a vector of functions for an input matrix which
## cacheSolve() uses to retrieve an already calculated inverse
## Creates vector to store retrieve and get input matrices and their inverses
makeCacheMatrix <- function(x = matrix(, ncol = 2, nrow = 2)) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(inv_mat) m <<- inv_mat
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Returns the inverse of input matrix. If the inverse already exists in cache then
## that value is returned.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
out_inv=solve(data,...)
x$setinv(out_inv)
out_inv
}
|
## Put comments here that give an overall description of what your
## functions do
## Functions that cache the inverse of a matrix
+##
+## Usage example:
+##
+## > source('cachematrix.R')
+## > m <- makeCacheMatrix(matrix(c(2, 0, 0, 2), c(2, 2)))
+## > cacheSolve(m)
+## [,1] [,2]
+## [1,] 0.5 0.0
+## [2,] 0.0 0.5
+## Create a special "matrix", which is a list containing
+## a function to
+## - set the value of the matrix
+## - get the value of the matrix
+## - set the value of the inverse matrix
+## - get the value of the inverse matrix
makeCacheMatrix <- function(x = matrix()) {
-
+ i <- NULL
+ set <- function(y) {
+ x <<- y
+ i <<- NULL
+ }
+ get <- function() x
+ setinverse <- function(inv) i <<- inv
+ getinverse <- function() i
+ list(
+ set = set,
+ get = get,
+ setinverse = setinverse,
+ getinverse = getinverse
+ )
}
+## Calculate the inverse of the special "matrix" created with the above
+## function, reusing cached result if it is available
cacheSolve <- function(x, ...) {
- ## Return a matrix that is the inverse of 'x'
-}
+ i <- x$getinverse()
+ if(!is.null(i)) {
+ message("getting cached data")
+ return(i)
+ }
+ m <- x$get()
+ i <- solve(m, ...)
+ x$setinverse(i)
+ i
+}
|
/cachematrix.R
|
no_license
|
sandyjera/ProgrammingAssignment2
|
R
| false
| false
| 1,418
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Functions that cache the inverse of a matrix
+##
+## Usage example:
+##
+## > source('cachematrix.R')
+## > m <- makeCacheMatrix(matrix(c(2, 0, 0, 2), c(2, 2)))
+## > cacheSolve(m)
+## [,1] [,2]
+## [1,] 0.5 0.0
+## [2,] 0.0 0.5
+## Create a special "matrix", which is a list containing
+## a function to
+## - set the value of the matrix
+## - get the value of the matrix
+## - set the value of the inverse matrix
+## - get the value of the inverse matrix
makeCacheMatrix <- function(x = matrix()) {
-
+ i <- NULL
+ set <- function(y) {
+ x <<- y
+ i <<- NULL
+ }
+ get <- function() x
+ setinverse <- function(inv) i <<- inv
+ getinverse <- function() i
+ list(
+ set = set,
+ get = get,
+ setinverse = setinverse,
+ getinverse = getinverse
+ )
}
+## Calculate the inverse of the special "matrix" created with the above
+## function, reusing cached result if it is available
cacheSolve <- function(x, ...) {
- ## Return a matrix that is the inverse of 'x'
-}
+ i <- x$getinverse()
+ if(!is.null(i)) {
+ message("getting cached data")
+ return(i)
+ }
+ m <- x$get()
+ i <- solve(m, ...)
+ x$setinverse(i)
+ i
+}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lexmodelsv2_operations.R
\name{lexmodelsv2_list_bot_aliases}
\alias{lexmodelsv2_list_bot_aliases}
\title{Gets a list of aliases for the specified bot}
\usage{
lexmodelsv2_list_bot_aliases(botId, maxResults = NULL, nextToken = NULL)
}
\arguments{
\item{botId}{[required] The identifier of the bot to list aliases for.}
\item{maxResults}{The maximum number of aliases to return in each page of results. If
there are fewer results than the max page size, only the actual number
of results are returned.}
\item{nextToken}{If the response from the
\code{\link[=lexmodelsv2_list_bot_aliases]{list_bot_aliases}} operation contains
more results than specified in the \code{maxResults} parameter, a token is
returned in the response. Use that token in the \code{nextToken} parameter to
return the next page of results.}
}
\description{
Gets a list of aliases for the specified bot.
See \url{https://www.paws-r-sdk.com/docs/lexmodelsv2_list_bot_aliases/} for full documentation.
}
\keyword{internal}
|
/cran/paws.machine.learning/man/lexmodelsv2_list_bot_aliases.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 1,071
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lexmodelsv2_operations.R
\name{lexmodelsv2_list_bot_aliases}
\alias{lexmodelsv2_list_bot_aliases}
\title{Gets a list of aliases for the specified bot}
\usage{
lexmodelsv2_list_bot_aliases(botId, maxResults = NULL, nextToken = NULL)
}
\arguments{
\item{botId}{[required] The identifier of the bot to list aliases for.}
\item{maxResults}{The maximum number of aliases to return in each page of results. If
there are fewer results than the max page size, only the actual number
of results are returned.}
\item{nextToken}{If the response from the
\code{\link[=lexmodelsv2_list_bot_aliases]{list_bot_aliases}} operation contains
more results than specified in the \code{maxResults} parameter, a token is
returned in the response. Use that token in the \code{nextToken} parameter to
return the next page of results.}
}
\description{
Gets a list of aliases for the specified bot.
See \url{https://www.paws-r-sdk.com/docs/lexmodelsv2_list_bot_aliases/} for full documentation.
}
\keyword{internal}
|
# FUNCTIONS #
filename_to_metadata <- function(file_path){
# fxn to read in Hondo file names and pull out metadata (stand, month, year)
myfiles <- as.data.frame(list.files(file_path,
full.names = FALSE,
pattern = "*.txt"))
colnames(myfiles) <- c("file_name")
myfiles <- myfiles %>%
# split the name into columns named hondo and month (aka Hondo189 and JUN)
separate(file_name, c("hondo", "month", NA)) %>%
# make sure the month is capitalized
# make a new column called year, fill with last 2 characters from hondo col
# make a new column called stand, fill with character third from the end of hondo
mutate("month" = str_to_upper(month),
"year" = as.numeric(str_sub(hondo, start = -2)),
"stand" = str_sub(hondo, start = -3, end = -3)) %>%
# for the year column, add 2000 if the value in the column created above is
# less than 50; add 1900 if it is greater than 50
mutate("year" = as.character(if_else(year < 50, year + 2000, year +1900))) %>%
# change the 3 letter months to numbers
mutate("month" = ifelse(month == 'JAN', 1,
ifelse(month == 'FEB', 2,
ifelse(month == 'MAR', 3,
ifelse(month == 'APR', 4,
ifelse(month == 'MAY', 5,
ifelse(month == 'JUN', 6,
ifelse(month == 'JUL', 7,
ifelse(month == 'AUG', 8,
ifelse(month == 'SEP', 9,
ifelse(month == 'OCT', 10,
ifelse(month == 'NOV', 11,
ifelse(month == 'DEC', 12, NA))))))))))))) %>%
# remove the hondo column (all data has been extracted from it)
select(-hondo)
return(myfiles)
}
read_in_txt_file <- function(file_path){
### READ IN FILE ###
## once all files have been made into txt files
# open a connection to the file we want to read in
con <- file(file_path)
open(con)
# make a list to put the results into
results_list <- list()
# start with the first line in the file and cycle through all of them
current_line <- 1
while (length(line <- readLines(con, n = 1, warn = FALSE)) > 0) {
results_list[current_line] <- line
current_line <- current_line + 1
}
# close the connection to the file
close(con)
return(results_list)
}
txt_file_to_df <- function(results_list){
### TURN INTO DATAFRAME ###
# remove remaining white spaces and make everything uppercase
results_list <- lapply(results_list, str_trim, side = "both") %>%
lapply(., str_squish) %>%
lapply(., str_to_upper)
# get the rows after the metadata and unlist them so each value gets read
# separately; otherwise, each row is one big long character string
split_list <- lapply(results_list[1:length(results_list)],
str_split,
pattern = " ")
## find first "quad" row and cut out the rest (remove metadata at top of file)
# empty vector
quad_rows <- vector()
for (i in 1:length(split_list)){
if (split_list[[i]][[1]][1] == 'QUAD') {
# add each row/list num that starts with 'QUAD' to the vector
quad_rows <- c(quad_rows, i)
}
}
# select only the rows/lists from the first 'QUAD' row through the end
# this removes all of the lines of metadata from the top of the file
split_list <- split_list[min(quad_rows):length(split_list)]
## in order to bind the list together as rows, they need to be the same length
for (i in 1:length(split_list)){
# get length of row first row (a 'QUAD' row)
max_length <- 36
row_length <- length(split_list[[i]][[1]])
## make each type of row the correct length
if (row_length > 1){
# this code adds NAs to the row to match max_length
if (row_length < max_length) {
# if the length of the row is less than the max length, make a vector
# of NAs needed to match the max length
add_NAs <- vector(mode = "character", length = (max_length - row_length)) %>%
na_if("")
# append that vector of NAs to the row
split_list[[i]][[1]] <- c(split_list[[i]][[1]], add_NAs)
}
# for lists that are empty, make a vector of NAs as long as max_length
} else if (row_length <= 1) {
split_list[[i]][[1]][1:max_length] <- NA
}
}
# stitch lists together to act as rows in a dataframe
cover_df <- data.frame(matrix(unlist(split_list),
nrow = length(split_list),
byrow = T)) %>%
# remove the empty rows
janitor::remove_empty("rows") %>%
# make the first row ("QUAD") into the column names
janitor::row_to_names(., row_number = 1) %>%
# remove any remaining "QUAD" rows (filter throws an error, for some reason)
.[.$QUAD != 'QUAD',]
# make sure all columns have unique names
colnames(cover_df) <- make.unique(colnames(cover_df))
# if any columns are all NA, remove them
not_any_na <- function(x) all(!is.na(x))
cover_df <- cover_df %>% select(where(not_any_na))
# make tidy
cover_df_long <- rename(cover_df, "Species" = "QUAD") %>%
pivot_longer(2:ncol(cover_df), names_to = "Quad") %>%
rename("Cover" = "value")
return(cover_df_long)
}
|
/HONDO/VascularCover/scripts/01_functions.R
|
no_license
|
avhesketh/LDP_SEADYN
|
R
| false
| false
| 5,910
|
r
|
# FUNCTIONS #
filename_to_metadata <- function(file_path){
# fxn to read in Hondo file names and pull out metadata (stand, month, year)
myfiles <- as.data.frame(list.files(file_path,
full.names = FALSE,
pattern = "*.txt"))
colnames(myfiles) <- c("file_name")
myfiles <- myfiles %>%
# split the name into columns named hondo and month (aka Hondo189 and JUN)
separate(file_name, c("hondo", "month", NA)) %>%
# make sure the month is capitalized
# make a new column called year, fill with last 2 characters from hondo col
# make a new column called stand, fill with character third from the end of hondo
mutate("month" = str_to_upper(month),
"year" = as.numeric(str_sub(hondo, start = -2)),
"stand" = str_sub(hondo, start = -3, end = -3)) %>%
# for the year column, add 2000 if the value in the column created above is
# less than 50; add 1900 if it is greater than 50
mutate("year" = as.character(if_else(year < 50, year + 2000, year +1900))) %>%
# change the 3 letter months to numbers
mutate("month" = ifelse(month == 'JAN', 1,
ifelse(month == 'FEB', 2,
ifelse(month == 'MAR', 3,
ifelse(month == 'APR', 4,
ifelse(month == 'MAY', 5,
ifelse(month == 'JUN', 6,
ifelse(month == 'JUL', 7,
ifelse(month == 'AUG', 8,
ifelse(month == 'SEP', 9,
ifelse(month == 'OCT', 10,
ifelse(month == 'NOV', 11,
ifelse(month == 'DEC', 12, NA))))))))))))) %>%
# remove the hondo column (all data has been extracted from it)
select(-hondo)
return(myfiles)
}
read_in_txt_file <- function(file_path){
### READ IN FILE ###
## once all files have been made into txt files
# open a connection to the file we want to read in
con <- file(file_path)
open(con)
# make a list to put the results into
results_list <- list()
# start with the first line in the file and cycle through all of them
current_line <- 1
while (length(line <- readLines(con, n = 1, warn = FALSE)) > 0) {
results_list[current_line] <- line
current_line <- current_line + 1
}
# close the connection to the file
close(con)
return(results_list)
}
txt_file_to_df <- function(results_list){
### TURN INTO DATAFRAME ###
# remove remaining white spaces and make everything uppercase
results_list <- lapply(results_list, str_trim, side = "both") %>%
lapply(., str_squish) %>%
lapply(., str_to_upper)
# get the rows after the metadata and unlist them so each value gets read
# separately; otherwise, each row is one big long character string
split_list <- lapply(results_list[1:length(results_list)],
str_split,
pattern = " ")
## find first "quad" row and cut out the rest (remove metadata at top of file)
# empty vector
quad_rows <- vector()
for (i in 1:length(split_list)){
if (split_list[[i]][[1]][1] == 'QUAD') {
# add each row/list num that starts with 'QUAD' to the vector
quad_rows <- c(quad_rows, i)
}
}
# select only the rows/lists from the first 'QUAD' row through the end
# this removes all of the lines of metadata from the top of the file
split_list <- split_list[min(quad_rows):length(split_list)]
## in order to bind the list together as rows, they need to be the same length
for (i in 1:length(split_list)){
# get length of row first row (a 'QUAD' row)
max_length <- 36
row_length <- length(split_list[[i]][[1]])
## make each type of row the correct length
if (row_length > 1){
# this code adds NAs to the row to match max_length
if (row_length < max_length) {
# if the length of the row is less than the max length, make a vector
# of NAs needed to match the max length
add_NAs <- vector(mode = "character", length = (max_length - row_length)) %>%
na_if("")
# append that vector of NAs to the row
split_list[[i]][[1]] <- c(split_list[[i]][[1]], add_NAs)
}
# for lists that are empty, make a vector of NAs as long as max_length
} else if (row_length <= 1) {
split_list[[i]][[1]][1:max_length] <- NA
}
}
# stitch lists together to act as rows in a dataframe
cover_df <- data.frame(matrix(unlist(split_list),
nrow = length(split_list),
byrow = T)) %>%
# remove the empty rows
janitor::remove_empty("rows") %>%
# make the first row ("QUAD") into the column names
janitor::row_to_names(., row_number = 1) %>%
# remove any remaining "QUAD" rows (filter throws an error, for some reason)
.[.$QUAD != 'QUAD',]
# make sure all columns have unique names
colnames(cover_df) <- make.unique(colnames(cover_df))
# if any columns are all NA, remove them
not_any_na <- function(x) all(!is.na(x))
cover_df <- cover_df %>% select(where(not_any_na))
# make tidy
cover_df_long <- rename(cover_df, "Species" = "QUAD") %>%
pivot_longer(2:ncol(cover_df), names_to = "Quad") %>%
rename("Cover" = "value")
return(cover_df_long)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inputs.R
\name{phoneInput}
\alias{myInput}
\alias{phoneInput}
\alias{zipInput}
\title{Create a telephone number input control}
\usage{
phoneInput(inputId, label, value = "", width = NULL, placeholder = NULL,
...)
zipInput(inputId, label, value = "", width = NULL, placeholder = NULL,
...)
myInput(type, inputId, label, value = "", width = NULL,
placeholder = NULL, class = "", ...)
}
\arguments{
\item{inputId}{The input slot that will be used to access the value.}
\item{label}{Display the label for the control, or NULL for no label.}
\item{value}{Initial Value.}
\item{width}{The width of the input, e.g. '400px', or '100%';see \link[shiny]{validateCssUnit}}
\item{placeholder}{A character string giving the user a hint as to what can be entered into the control. Internet Explorer 8 and 9 do not support this option.}
}
\value{
A phone input control that can be added to a UI definition.
}
\description{
Creates a input for telephone numbers, which is validated using the formance JavaScript Library
}
\seealso{
\link[shiny]{textInput}
}
|
/man/phoneInput.Rd
|
no_license
|
carlganz/formancer
|
R
| false
| true
| 1,133
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inputs.R
\name{phoneInput}
\alias{myInput}
\alias{phoneInput}
\alias{zipInput}
\title{Create a telephone number input control}
\usage{
phoneInput(inputId, label, value = "", width = NULL, placeholder = NULL,
...)
zipInput(inputId, label, value = "", width = NULL, placeholder = NULL,
...)
myInput(type, inputId, label, value = "", width = NULL,
placeholder = NULL, class = "", ...)
}
\arguments{
\item{inputId}{The input slot that will be used to access the value.}
\item{label}{Display the label for the control, or NULL for no label.}
\item{value}{Initial Value.}
\item{width}{The width of the input, e.g. '400px', or '100%';see \link[shiny]{validateCssUnit}}
\item{placeholder}{A character string giving the user a hint as to what can be entered into the control. Internet Explorer 8 and 9 do not support this option.}
}
\value{
A phone input control that can be added to a UI definition.
}
\description{
Creates a input for telephone numbers, which is validated using the formance JavaScript Library
}
\seealso{
\link[shiny]{textInput}
}
|
source("helpers.R")
source("libraries.R")
server <- function(input, output, session) {
#Group Ride Tab
#Value Boxes
output$box1gr <- renderValueBox({
valueBox(
value = prettyNum(round(median(df5$Trip_distance),2), big.mark = ",")
,subtitle = "Median Distance"
,color = "green"
,icon = icon("arrows-h")
)})
#Average Fare Amount
output$box2gr <- renderValueBox({
valueBox(
value = prettyNum(round(mean(df5$Fare_amount),2), big.mark = ",")
,subtitle = "Average Fare Amount"
,color = "green"
,icon = icon("dollar")
)})
#Total Trips
output$box3gr <- renderValueBox({
valueBox(
value = prettyNum(length(df5$RateCodeID), big.mark = ",")
,subtitle = "Total Trips"
,color = "green"
,icon = icon("car")
)})
#Total Distance
output$box4gr <- renderValueBox({
valueBox(
value = prettyNum(round((df5$Fare_amount/df5$Trip_distance),2), big.mark = ",")
,subtitle = "Fare Amount per Mile"
,color = "black"
,icon = icon("car")
)})
#Total Distance
output$box5gr <- renderValueBox({
valueBox(
value = prettyNum(sum(round(df5$Fare_amount),1), big.mark = ",")
,subtitle = "Total Money Made"
,color = "black"
,icon = icon("black")
)})
#Total Distance
output$box6gr <- renderValueBox({
valueBox(
value = prettyNum(mean(round(df5$Tip_amount),2), big.mark = ",")
,subtitle = "Average Tip Amount"
,color = "black"
,icon = icon("dollar")
)})
#Leaflet Map
output$mymap3gr <- renderLeaflet({
# define the leaflet map object
leaflet() %>%
addTiles() %>%
#setView(0,0,2) %>%
setView(-73.9465, 40.8116, zoom = 14) %>%
addProviderTiles(providers$CartoDB.Positron) %>%
addCircleMarkers(lng = df5$Pickup_longitude, lat = df5$Pickup_latitude
,radius = 6
,color = "black"
,stroke = FALSE
,fillOpacity = 0.5
,popup = df5$Trip_distance)
})
#Distance Vs Amount scatter chart
output$mainChart3gr <- renderHighchart({
hc <- highchart()
hc <- hc %>%
hc_add_series_scatter(df5$Trip_distance, df5$Tip_amount ,showInLegend = FALSE) %>%
hc_colors(color='black') %>%
hc_yAxis(title=list(text='Tip Amount')) %>%
hc_xAxis(title=list(text='Trip Distance'))%>%
hc_tooltip(headerFormat = "", pointFormat = "Tip: {point.y} <br> Distance: {point.x}")
})
#Barchart for Day Vs Count
output$barChart1gr <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = dategr$date) %>%
hc_add_series(name = "Total Number of Trips", data = dategr$count, type = "column" ,color = "black")
hc
})
#Brachart for Day Vs Average Distance
output$barChart2gr <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = dategr$date) %>%
hc_add_series(name = "Average Distance", data = dategr$mean, color = "black")
hc
})
#Brachart for Day Vs Total Distance
output$barChart3gr <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = dategr$date) %>%
hc_add_series(name = "Total Distance", data = dategr$sum, color = "black")
hc
})
#NEGOTIATED FARE
#Value Boxes
output$box1nf <- renderValueBox({
valueBox(
value = prettyNum(round(median(df4$Trip_distance),2), big.mark = ",")
,subtitle = "Median Distance"
,color = "green"
,icon = icon("arrows-h")
)})
#Average Fare Amount
output$box2nf <- renderValueBox({
valueBox(
value = prettyNum(round(mean(df4$Fare_amount),2), big.mark = ",")
,subtitle = "Average Fare Amount"
,color = "green"
,icon = icon("dollar")
)})
#Total Trips
output$box3nf <- renderValueBox({
valueBox(
value = prettyNum(length(df4$RateCodeID), big.mark = ",")
,subtitle = "Total Trips"
,color = "green"
,icon = icon("car")
)})
#Total Distance
output$box4nf <- renderValueBox({
valueBox(
value = prettyNum(round((df4$Fare_amount/df2$Trip_distance),2), big.mark = ",")
,subtitle = "Fare Amount per Mile"
,color = "black"
,icon = icon("car")
)})
#Total Distance
output$box5nf <- renderValueBox({
valueBox(
value = prettyNum(sum(round(df4$Fare_amount),1), big.mark = ",")
,subtitle = "Total Money Made"
,color = "black"
,icon = icon("black")
)})
#Total Distance
output$box6nf <- renderValueBox({
valueBox(
value = prettyNum(mean(round(df4$Tip_amount),2), big.mark = ",")
,subtitle = "Average Tip Amount"
,color = "black"
,icon = icon("dollar")
)})
#Leaflet Map
output$mymap3nf <- renderLeaflet({
# define the leaflet map object
leaflet() %>%
addTiles() %>%
#setView(0,0,2) %>%
setView(-73.9465, 40.8116, zoom = 14) %>%
addProviderTiles(providers$CartoDB.Positron) %>%
addCircleMarkers(lng = df4$Pickup_longitude, lat = df4$Pickup_latitude
,radius = 6
,color = "black"
,stroke = FALSE
,fillOpacity = 0.5
,popup = df4$Trip_distance)
})
#Distance Vs Amount scatter chart
output$mainChart3nf <- renderHighchart({
hc <- highchart()
hc <- hc %>%
hc_add_series_scatter(df4$Tip_amount, df4$Trip_distance, showInLegend = FALSE) %>%
hc_colors(color='black') %>%
hc_yAxis(title=list(text='Tip Amount')) %>%
hc_xAxis(title=list(text='Trip Distance'))%>%
hc_tooltip(headerFormat = "", pointFormat = "Tip: {point.x} <br> Distance: {point.y}")
})
#Barchart for Day Vs Count
output$barChart1nf <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = datenf$date) %>%
hc_add_series(name = "Total Number of Trips", data = datenf$count, type = "column" ,color = "black")
hc
})
#Brachart for Day Vs Average Distance
output$barChart2nf <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = datenf$date) %>%
hc_add_series(name = "Average Distance", data = datenf$mean, color = "black")
hc
})
#Brachart for Day Vs Total Distance
output$barChart3nf <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = datenf$date) %>%
hc_add_series(name = "Total Distance", data = datenf$sum, color = "black")
hc
})
#NASSAU TAB
#Value Boxes
output$box1na <- renderValueBox({
valueBox(
value = prettyNum(round(median(df3$Trip_distance),2), big.mark = ",")
,subtitle = "Median Distance"
,color = "green"
,icon = icon("arrows-h")
)})
#Average Fare Amount
output$box2na <- renderValueBox({
valueBox(
value = prettyNum(round(mean(df3$Fare_amount),2), big.mark = ",")
,subtitle = "Average Fare Amount"
,color = "green"
,icon = icon("dollar")
)})
#Total Trips
output$box3na <- renderValueBox({
valueBox(
value = prettyNum(length(df3$RateCodeID), big.mark = ",")
,subtitle = "Total Trips"
,color = "green"
,icon = icon("car")
)})
#Total Distance
output$box4na <- renderValueBox({
valueBox(
value = prettyNum(round((df3$Fare_amount/df2$Trip_distance),2), big.mark = ",")
,subtitle = "Fare Amount per Mile"
,color = "black"
,icon = icon("car")
)})
#Total Distance
output$box5na <- renderValueBox({
valueBox(
value = prettyNum(sum(round(df3$Fare_amount),1), big.mark = ",")
,subtitle = "Total Money Made"
,color = "black"
,icon = icon("black")
)})
#Total Distance
output$box6na <- renderValueBox({
valueBox(
value = prettyNum(mean(round(df3$Tip_amount),2), big.mark = ",")
,subtitle = "Average Tip Amount"
,color = "black"
,icon = icon("dollar")
)})
output$mymap3na <- renderLeaflet({
# define the leaflet map object
leaflet() %>%
addTiles() %>%
#setView(0,0,2) %>%
setView(-73.9465, 40.8116, zoom = 14) %>%
addProviderTiles(providers$CartoDB.Positron) %>%
addCircleMarkers(lng = df3$Pickup_longitude, lat = df3$Pickup_latitude
,radius = 6
,color = "black"
,stroke = FALSE
,fillOpacity = 0.5
,popup = df3$Trip_distance)
})
#Distance Vs Amount scatter chart
output$mainChart3na <- renderHighchart({
hc <- highchart()
hc <- hc %>%
hc_add_series_scatter(df3$Tip_amount, df3$Trip_distance, showInLegend = FALSE) %>%
hc_colors(color='black') %>%
hc_yAxis(title=list(text='Tip Amount')) %>%
hc_xAxis(title=list(text='Trip Distance'))%>%
hc_tooltip(headerFormat = "", pointFormat = "Tip: {point.x} <br> Distance: {point.y}")
})
#Barchart for Day Vs Count
output$barChart1na <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = datena$date) %>%
hc_add_series(name = "Total Number of Trips", data = datena$count, type = "column" ,color = "black")
hc
})
#Brachart for Day Vs Average Distance
output$barChart2na <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = datena$date) %>%
hc_add_series(name = "Average Distance", data = datena$mean, color = "black")
hc
})
#Brachart for Day Vs Total Distance
output$barChart3na <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = datena$date) %>%
hc_add_series(name = "Total Distance", data = datena$sum, color = "black")
hc
})
#NEWARK TAB
#Value Boxes
output$box1ne <- renderValueBox({
valueBox(
value = prettyNum(median(df2$Trip_distance), big.mark = ",")
,subtitle = "Median Distance"
,color = "green"
,icon = icon("arrows-h")
)})
#Average Fare Amount
output$box2ne <- renderValueBox({
valueBox(
value = prettyNum(round(mean(df2$Fare_amount),2), big.mark = ",")
,subtitle = "Average Fare Amount"
,color = "green"
,icon = icon("dollar")
)})
#Total Trips
output$box3ne <- renderValueBox({
valueBox(
value = prettyNum(length(df2$RateCodeID), big.mark = ",")
,subtitle = "Total Trips"
,color = "green"
,icon = icon("car")
)})
#Total Distance
output$box4ne <- renderValueBox({
valueBox(
value = prettyNum(round((df2$Fare_amount/df2$Trip_distance),2), big.mark = ",")
,subtitle = "Fare Amount per Mile"
,color = "black"
,icon = icon("car")
)})
#Total Distance
output$box5ne <- renderValueBox({
valueBox(
value = prettyNum(sum(round(df2$Fare_amount),1), big.mark = ",")
,subtitle = "Total Money Made"
,color = "black"
,icon = icon("black")
)})
#Total Distance
output$box6ne <- renderValueBox({
valueBox(
value = prettyNum(mean(round(df$Tip_amount),2), big.mark = ",")
,subtitle = "Average Tip Amount"
,color = "black"
,icon = icon("dollar")
)})
#Leaflet Map for newark data
output$mymap2ne <- renderLeaflet({
# define the leaflet map object
leaflet() %>%
addTiles() %>%
#setView(0,0,2) %>%
setView(-73.9465, 40.8116, zoom = 14) %>%
addProviderTiles(providers$CartoDB.Positron) %>%
addCircleMarkers(lng = df2$Pickup_longitude, lat = df2$Pickup_latitude
,radius = 6
,color = "black"
,stroke = FALSE
,fillOpacity = 0.5
,popup = df2$Trip_distance)
})
#Distance Vs Amount scatter chart
output$mainChart2ne <- renderHighchart({
hc <- highchart()
hc <- hc %>%
hc_add_series_scatter(df2$Tip_amount, df2$Trip_distance, showInLegend = FALSE) %>%
hc_colors(color='black') %>%
hc_yAxis(title=list(text='Tip Amount')) %>%
hc_xAxis(title=list(text='Trip Distance'))%>%
hc_tooltip(headerFormat = "", pointFormat = "Tip: {point.x} <br> Distance: {point.y}")
})
#Barchart for Day Vs Count
output$barChart1ne <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = date2ne$date) %>%
hc_add_series(name = "Total Number of Trips", data = date2ne$count, type = "column" ,color = "black")
hc
})
#Brachart for Day Vs Average Distance
output$barChart2ne <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = date2ne$date) %>%
hc_add_series(name = "Average Distance", data = date2ne$mean, color = "black")
hc
})
#Brachart for Day Vs Total Distance
output$barChart3ne <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = date2ne$date) %>%
hc_add_series(name = "Total Distance", data = date2ne$sum, color = "black")
hc
})
#JFK TAB
#Value Boxes
#Average Distance
output$box1 <- renderValueBox({
valueBox(
value = prettyNum(median(df$Trip_distance), big.mark = ",")
,subtitle = "Median Distance"
,color = "green"
,icon = icon("arrows-h")
)})
#Average Fare Amount
output$box2 <- renderValueBox({
valueBox(
value = prettyNum(mean(df$Fare_amount), big.mark = ",")
,subtitle = "Average Fare Amount"
,color = "green"
,icon = icon("dollar")
)})
#Total Trips
output$box3 <- renderValueBox({
valueBox(
value = prettyNum(length(df$RateCodeID), big.mark = ",")
,subtitle = "Total Trips"
,color = "green"
,icon = icon("car")
)})
#Total Distance
output$box4 <- renderValueBox({
valueBox(
value = prettyNum(round((df$Fare_amount/df$Trip_distance),2), big.mark = ",")
,subtitle = "Fare Amount per Mile"
,color = "black"
,icon = icon("car")
)})
#Total Distance
output$box5 <- renderValueBox({
valueBox(
value = prettyNum(sum(round(df$Fare_amount),1), big.mark = ",")
,subtitle = "Total Money Made"
,color = "black"
,icon = icon("black")
)})
#Total Distance
output$box6 <- renderValueBox({
valueBox(
value = prettyNum(mean(round(df$Tip_amount),2), big.mark = ",")
,subtitle = "Average Tip Amount"
,color = "black"
,icon = icon("dollar")
)})
#Leaflet Map
output$mymap <- renderLeaflet({
# define the leaflet map object
leaflet() %>%
addTiles() %>%
#setView(0,0,2) %>%
setView(-73.9465, 40.8116, zoom = 14) %>%
addProviderTiles(providers$CartoDB.Positron) %>%
addCircleMarkers(lng = df$Pickup_longitude, lat = df$Pickup_latitude
,radius = 6
,color = "black"
,stroke = FALSE
,fillOpacity = 0.5
,popup = df$Trip_distance)
})
#Distance Vs Amount scatter chart
output$mainChart <- renderHighchart({
hc <- highchart()
hc <- hc %>%
hc_add_series_scatter(df$Tip_amount, df$Trip_distance, showInLegend = FALSE) %>%
hc_colors(color='black') %>%
hc_yAxis(title=list(text='Tip Amount')) %>%
hc_xAxis(title=list(text='Trip Distance'))%>%
hc_tooltip(headerFormat = "", pointFormat = "Tip: {point.x} <br> Distance: {point.y}")
})
#Barchart for Day Vs Count
output$barChart1 <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = dfdate$date) %>%
hc_add_series(name = "Total Number of Trips", data = dfdate$count, type = "column" ,color = "black")
hc
})
#Brachart for Day Vs Average Distance
output$barChart2 <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = dfdate$date) %>%
hc_add_series(name = "Average Distance", data = dfdate$mean, color = "black")
hc
})
#Brachart for Day Vs Total Distance
output$barChart3 <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = dfdate$date) %>%
hc_add_series(name = "Total Distance", data = dfdate$sum, color = "black")
hc
})
}
|
/server.R
|
no_license
|
NahoiLartem/NYT2
|
R
| false
| false
| 16,337
|
r
|
source("helpers.R")
source("libraries.R")
server <- function(input, output, session) {
#Group Ride Tab
#Value Boxes
output$box1gr <- renderValueBox({
valueBox(
value = prettyNum(round(median(df5$Trip_distance),2), big.mark = ",")
,subtitle = "Median Distance"
,color = "green"
,icon = icon("arrows-h")
)})
#Average Fare Amount
output$box2gr <- renderValueBox({
valueBox(
value = prettyNum(round(mean(df5$Fare_amount),2), big.mark = ",")
,subtitle = "Average Fare Amount"
,color = "green"
,icon = icon("dollar")
)})
#Total Trips
output$box3gr <- renderValueBox({
valueBox(
value = prettyNum(length(df5$RateCodeID), big.mark = ",")
,subtitle = "Total Trips"
,color = "green"
,icon = icon("car")
)})
#Total Distance
output$box4gr <- renderValueBox({
valueBox(
value = prettyNum(round((df5$Fare_amount/df5$Trip_distance),2), big.mark = ",")
,subtitle = "Fare Amount per Mile"
,color = "black"
,icon = icon("car")
)})
#Total Distance
output$box5gr <- renderValueBox({
valueBox(
value = prettyNum(sum(round(df5$Fare_amount),1), big.mark = ",")
,subtitle = "Total Money Made"
,color = "black"
,icon = icon("black")
)})
#Total Distance
output$box6gr <- renderValueBox({
valueBox(
value = prettyNum(mean(round(df5$Tip_amount),2), big.mark = ",")
,subtitle = "Average Tip Amount"
,color = "black"
,icon = icon("dollar")
)})
#Leaflet Map
output$mymap3gr <- renderLeaflet({
# define the leaflet map object
leaflet() %>%
addTiles() %>%
#setView(0,0,2) %>%
setView(-73.9465, 40.8116, zoom = 14) %>%
addProviderTiles(providers$CartoDB.Positron) %>%
addCircleMarkers(lng = df5$Pickup_longitude, lat = df5$Pickup_latitude
,radius = 6
,color = "black"
,stroke = FALSE
,fillOpacity = 0.5
,popup = df5$Trip_distance)
})
#Distance Vs Amount scatter chart
output$mainChart3gr <- renderHighchart({
hc <- highchart()
hc <- hc %>%
hc_add_series_scatter(df5$Trip_distance, df5$Tip_amount ,showInLegend = FALSE) %>%
hc_colors(color='black') %>%
hc_yAxis(title=list(text='Tip Amount')) %>%
hc_xAxis(title=list(text='Trip Distance'))%>%
hc_tooltip(headerFormat = "", pointFormat = "Tip: {point.y} <br> Distance: {point.x}")
})
#Barchart for Day Vs Count
output$barChart1gr <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = dategr$date) %>%
hc_add_series(name = "Total Number of Trips", data = dategr$count, type = "column" ,color = "black")
hc
})
#Brachart for Day Vs Average Distance
output$barChart2gr <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = dategr$date) %>%
hc_add_series(name = "Average Distance", data = dategr$mean, color = "black")
hc
})
#Brachart for Day Vs Total Distance
output$barChart3gr <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = dategr$date) %>%
hc_add_series(name = "Total Distance", data = dategr$sum, color = "black")
hc
})
#NEGOTIATED FARE
#Value Boxes
output$box1nf <- renderValueBox({
valueBox(
value = prettyNum(round(median(df4$Trip_distance),2), big.mark = ",")
,subtitle = "Median Distance"
,color = "green"
,icon = icon("arrows-h")
)})
#Average Fare Amount
output$box2nf <- renderValueBox({
valueBox(
value = prettyNum(round(mean(df4$Fare_amount),2), big.mark = ",")
,subtitle = "Average Fare Amount"
,color = "green"
,icon = icon("dollar")
)})
#Total Trips
output$box3nf <- renderValueBox({
valueBox(
value = prettyNum(length(df4$RateCodeID), big.mark = ",")
,subtitle = "Total Trips"
,color = "green"
,icon = icon("car")
)})
#Total Distance
output$box4nf <- renderValueBox({
valueBox(
value = prettyNum(round((df4$Fare_amount/df2$Trip_distance),2), big.mark = ",")
,subtitle = "Fare Amount per Mile"
,color = "black"
,icon = icon("car")
)})
#Total Distance
output$box5nf <- renderValueBox({
valueBox(
value = prettyNum(sum(round(df4$Fare_amount),1), big.mark = ",")
,subtitle = "Total Money Made"
,color = "black"
,icon = icon("black")
)})
#Total Distance
output$box6nf <- renderValueBox({
valueBox(
value = prettyNum(mean(round(df4$Tip_amount),2), big.mark = ",")
,subtitle = "Average Tip Amount"
,color = "black"
,icon = icon("dollar")
)})
#Leaflet Map
output$mymap3nf <- renderLeaflet({
# define the leaflet map object
leaflet() %>%
addTiles() %>%
#setView(0,0,2) %>%
setView(-73.9465, 40.8116, zoom = 14) %>%
addProviderTiles(providers$CartoDB.Positron) %>%
addCircleMarkers(lng = df4$Pickup_longitude, lat = df4$Pickup_latitude
,radius = 6
,color = "black"
,stroke = FALSE
,fillOpacity = 0.5
,popup = df4$Trip_distance)
})
#Distance Vs Amount scatter chart
output$mainChart3nf <- renderHighchart({
hc <- highchart()
hc <- hc %>%
hc_add_series_scatter(df4$Tip_amount, df4$Trip_distance, showInLegend = FALSE) %>%
hc_colors(color='black') %>%
hc_yAxis(title=list(text='Tip Amount')) %>%
hc_xAxis(title=list(text='Trip Distance'))%>%
hc_tooltip(headerFormat = "", pointFormat = "Tip: {point.x} <br> Distance: {point.y}")
})
#Barchart for Day Vs Count
output$barChart1nf <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = datenf$date) %>%
hc_add_series(name = "Total Number of Trips", data = datenf$count, type = "column" ,color = "black")
hc
})
#Brachart for Day Vs Average Distance
output$barChart2nf <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = datenf$date) %>%
hc_add_series(name = "Average Distance", data = datenf$mean, color = "black")
hc
})
#Brachart for Day Vs Total Distance
output$barChart3nf <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = datenf$date) %>%
hc_add_series(name = "Total Distance", data = datenf$sum, color = "black")
hc
})
#NASSAU TAB
#Value Boxes
output$box1na <- renderValueBox({
valueBox(
value = prettyNum(round(median(df3$Trip_distance),2), big.mark = ",")
,subtitle = "Median Distance"
,color = "green"
,icon = icon("arrows-h")
)})
#Average Fare Amount
output$box2na <- renderValueBox({
valueBox(
value = prettyNum(round(mean(df3$Fare_amount),2), big.mark = ",")
,subtitle = "Average Fare Amount"
,color = "green"
,icon = icon("dollar")
)})
#Total Trips
output$box3na <- renderValueBox({
valueBox(
value = prettyNum(length(df3$RateCodeID), big.mark = ",")
,subtitle = "Total Trips"
,color = "green"
,icon = icon("car")
)})
#Total Distance
output$box4na <- renderValueBox({
valueBox(
value = prettyNum(round((df3$Fare_amount/df2$Trip_distance),2), big.mark = ",")
,subtitle = "Fare Amount per Mile"
,color = "black"
,icon = icon("car")
)})
#Total Distance
output$box5na <- renderValueBox({
valueBox(
value = prettyNum(sum(round(df3$Fare_amount),1), big.mark = ",")
,subtitle = "Total Money Made"
,color = "black"
,icon = icon("black")
)})
#Total Distance
output$box6na <- renderValueBox({
valueBox(
value = prettyNum(mean(round(df3$Tip_amount),2), big.mark = ",")
,subtitle = "Average Tip Amount"
,color = "black"
,icon = icon("dollar")
)})
output$mymap3na <- renderLeaflet({
# define the leaflet map object
leaflet() %>%
addTiles() %>%
#setView(0,0,2) %>%
setView(-73.9465, 40.8116, zoom = 14) %>%
addProviderTiles(providers$CartoDB.Positron) %>%
addCircleMarkers(lng = df3$Pickup_longitude, lat = df3$Pickup_latitude
,radius = 6
,color = "black"
,stroke = FALSE
,fillOpacity = 0.5
,popup = df3$Trip_distance)
})
#Distance Vs Amount scatter chart
output$mainChart3na <- renderHighchart({
hc <- highchart()
hc <- hc %>%
hc_add_series_scatter(df3$Tip_amount, df3$Trip_distance, showInLegend = FALSE) %>%
hc_colors(color='black') %>%
hc_yAxis(title=list(text='Tip Amount')) %>%
hc_xAxis(title=list(text='Trip Distance'))%>%
hc_tooltip(headerFormat = "", pointFormat = "Tip: {point.x} <br> Distance: {point.y}")
})
#Barchart for Day Vs Count
output$barChart1na <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = datena$date) %>%
hc_add_series(name = "Total Number of Trips", data = datena$count, type = "column" ,color = "black")
hc
})
#Brachart for Day Vs Average Distance
output$barChart2na <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = datena$date) %>%
hc_add_series(name = "Average Distance", data = datena$mean, color = "black")
hc
})
#Brachart for Day Vs Total Distance
output$barChart3na <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = datena$date) %>%
hc_add_series(name = "Total Distance", data = datena$sum, color = "black")
hc
})
#NEWARK TAB
#Value Boxes
output$box1ne <- renderValueBox({
valueBox(
value = prettyNum(median(df2$Trip_distance), big.mark = ",")
,subtitle = "Median Distance"
,color = "green"
,icon = icon("arrows-h")
)})
#Average Fare Amount
output$box2ne <- renderValueBox({
valueBox(
value = prettyNum(round(mean(df2$Fare_amount),2), big.mark = ",")
,subtitle = "Average Fare Amount"
,color = "green"
,icon = icon("dollar")
)})
#Total Trips
output$box3ne <- renderValueBox({
valueBox(
value = prettyNum(length(df2$RateCodeID), big.mark = ",")
,subtitle = "Total Trips"
,color = "green"
,icon = icon("car")
)})
#Total Distance
output$box4ne <- renderValueBox({
valueBox(
value = prettyNum(round((df2$Fare_amount/df2$Trip_distance),2), big.mark = ",")
,subtitle = "Fare Amount per Mile"
,color = "black"
,icon = icon("car")
)})
#Total Distance
output$box5ne <- renderValueBox({
valueBox(
value = prettyNum(sum(round(df2$Fare_amount),1), big.mark = ",")
,subtitle = "Total Money Made"
,color = "black"
,icon = icon("black")
)})
#Total Distance
output$box6ne <- renderValueBox({
valueBox(
value = prettyNum(mean(round(df$Tip_amount),2), big.mark = ",")
,subtitle = "Average Tip Amount"
,color = "black"
,icon = icon("dollar")
)})
#Leaflet Map for newark data
output$mymap2ne <- renderLeaflet({
# define the leaflet map object
leaflet() %>%
addTiles() %>%
#setView(0,0,2) %>%
setView(-73.9465, 40.8116, zoom = 14) %>%
addProviderTiles(providers$CartoDB.Positron) %>%
addCircleMarkers(lng = df2$Pickup_longitude, lat = df2$Pickup_latitude
,radius = 6
,color = "black"
,stroke = FALSE
,fillOpacity = 0.5
,popup = df2$Trip_distance)
})
#Distance Vs Amount scatter chart
output$mainChart2ne <- renderHighchart({
hc <- highchart()
hc <- hc %>%
hc_add_series_scatter(df2$Tip_amount, df2$Trip_distance, showInLegend = FALSE) %>%
hc_colors(color='black') %>%
hc_yAxis(title=list(text='Tip Amount')) %>%
hc_xAxis(title=list(text='Trip Distance'))%>%
hc_tooltip(headerFormat = "", pointFormat = "Tip: {point.x} <br> Distance: {point.y}")
})
#Barchart for Day Vs Count
output$barChart1ne <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = date2ne$date) %>%
hc_add_series(name = "Total Number of Trips", data = date2ne$count, type = "column" ,color = "black")
hc
})
#Brachart for Day Vs Average Distance
output$barChart2ne <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = date2ne$date) %>%
hc_add_series(name = "Average Distance", data = date2ne$mean, color = "black")
hc
})
#Brachart for Day Vs Total Distance
output$barChart3ne <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = date2ne$date) %>%
hc_add_series(name = "Total Distance", data = date2ne$sum, color = "black")
hc
})
#JFK TAB
#Value Boxes
#Average Distance
output$box1 <- renderValueBox({
valueBox(
value = prettyNum(median(df$Trip_distance), big.mark = ",")
,subtitle = "Median Distance"
,color = "green"
,icon = icon("arrows-h")
)})
#Average Fare Amount
output$box2 <- renderValueBox({
valueBox(
value = prettyNum(mean(df$Fare_amount), big.mark = ",")
,subtitle = "Average Fare Amount"
,color = "green"
,icon = icon("dollar")
)})
#Total Trips
output$box3 <- renderValueBox({
valueBox(
value = prettyNum(length(df$RateCodeID), big.mark = ",")
,subtitle = "Total Trips"
,color = "green"
,icon = icon("car")
)})
#Total Distance
output$box4 <- renderValueBox({
valueBox(
value = prettyNum(round((df$Fare_amount/df$Trip_distance),2), big.mark = ",")
,subtitle = "Fare Amount per Mile"
,color = "black"
,icon = icon("car")
)})
#Total Distance
output$box5 <- renderValueBox({
valueBox(
value = prettyNum(sum(round(df$Fare_amount),1), big.mark = ",")
,subtitle = "Total Money Made"
,color = "black"
,icon = icon("black")
)})
#Total Distance
output$box6 <- renderValueBox({
valueBox(
value = prettyNum(mean(round(df$Tip_amount),2), big.mark = ",")
,subtitle = "Average Tip Amount"
,color = "black"
,icon = icon("dollar")
)})
#Leaflet Map
output$mymap <- renderLeaflet({
# define the leaflet map object
leaflet() %>%
addTiles() %>%
#setView(0,0,2) %>%
setView(-73.9465, 40.8116, zoom = 14) %>%
addProviderTiles(providers$CartoDB.Positron) %>%
addCircleMarkers(lng = df$Pickup_longitude, lat = df$Pickup_latitude
,radius = 6
,color = "black"
,stroke = FALSE
,fillOpacity = 0.5
,popup = df$Trip_distance)
})
#Distance Vs Amount scatter chart
output$mainChart <- renderHighchart({
hc <- highchart()
hc <- hc %>%
hc_add_series_scatter(df$Tip_amount, df$Trip_distance, showInLegend = FALSE) %>%
hc_colors(color='black') %>%
hc_yAxis(title=list(text='Tip Amount')) %>%
hc_xAxis(title=list(text='Trip Distance'))%>%
hc_tooltip(headerFormat = "", pointFormat = "Tip: {point.x} <br> Distance: {point.y}")
})
#Barchart for Day Vs Count
output$barChart1 <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = dfdate$date) %>%
hc_add_series(name = "Total Number of Trips", data = dfdate$count, type = "column" ,color = "black")
hc
})
#Brachart for Day Vs Average Distance
output$barChart2 <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = dfdate$date) %>%
hc_add_series(name = "Average Distance", data = dfdate$mean, color = "black")
hc
})
#Brachart for Day Vs Total Distance
output$barChart3 <- renderHighchart ({
hc <- highchart() %>%
hc_xAxis(categories = dfdate$date) %>%
hc_add_series(name = "Total Distance", data = dfdate$sum, color = "black")
hc
})
}
|
#' Obtain data and feature geometry for the decennial Census
#'
#' @param geography The geography of your data.
#' @param variables Character string or vector of character strings of variable
#' IDs.
#' @param table The Census table for which you would like to request all variables. Uses
#' lookup tables to identify the variables; performs faster when variable
#' table already exists through \code{load_variables(cache = TRUE)}.
#' @param cache_table Whether or not to cache table names for faster future access.
#' Defaults to FALSE; if TRUE, only needs to be called once per
#' dataset. If variables dataset is already cached via the
#' \code{load_variables} function, this can be bypassed.
#' @param year The year for which you are requesting data. 1990, 2000, and 2010 are available.
#' @param sumfile The Census summary file. Defaults to sf1; the function will look in sf3 if it
#' cannot find a variable in sf1.
#' @param state The state for which you are requesting data. State
#' names, postal codes, and FIPS codes are accepted.
#' Defaults to NULL.
#' @param county The county for which you are requesting data. County names and
#' FIPS codes are accepted. Must be combined with a value supplied
#' to `state`. Defaults to NULL.
#' @param geometry if FALSE (the default), return a regular tibble of ACS data.
#' if TRUE, uses the tigris package to return an sf tibble
#' with simple feature geometry in the `geometry` column. state, county, tract, and block group are
#' supported for 1990 through 2010; block and ZCTA geometry are supported for 2000 and 2010.
#' @param output One of "tidy" (the default) in which each row represents an
#' enumeration unit-variable combination, or "wide" in which each
#' row represents an enumeration unit and the variables are in the
#' columns.
#' @param keep_geo_vars if TRUE, keeps all the variables from the Census
#' shapefile obtained by tigris. Defaults to FALSE.
#' @param shift_geo if TRUE, returns geometry with Alaska and Hawaii shifted for thematic mapping of the entire US.
#' Geometry was originally obtained from the albersusa R package.
#' @param summary_var Character string of a "summary variable" from the decennial Census
#' to be included in your output. Usually a variable (e.g. total population)
#' that you'll want to use as a denominator or comparison.
#' @param key Your Census API key.
#' Obtain one at \url{http://api.census.gov/data/key_signup.html}
#' @param ... Other keyword arguments
#'
#' @return a tibble or sf tibble of decennial Census data
#' @examples \dontrun{
#' # Plot of race/ethnicity by county in Illinois for 2010
#' library(tidycensus)
#' library(tidyverse)
#' library(viridis)
#' census_api_key("YOUR KEY GOES HERE")
#' vars10 <- c("P0050003", "P0050004", "P0050006", "P0040003")
#'
#' il <- get_decennial(geography = "county", variables = vars10, year = 2010,
#' summary_var = "P0010001", state = "IL", geometry = TRUE) %>%
#' mutate(pct = 100 * (value / summary_value))
#'
#' ggplot(il, aes(fill = pct, color = pct)) +
#' geom_sf() +
#' facet_wrap(~variable)
#'
#'
#' }
#' @export
get_decennial <- function(geography, variables = NULL, table = NULL, cache_table = FALSE, year = 2010,
sumfile = "sf1", state = NULL, county = NULL, geometry = FALSE, output = "tidy",
keep_geo_vars = FALSE, shift_geo = FALSE, summary_var = NULL, key = NULL, ...) {
message(sprintf("Getting data from the %s decennial Census", year))
if (Sys.getenv('CENSUS_API_KEY') != '') {
key <- Sys.getenv('CENSUS_API_KEY')
} else if (is.null(key)) {
stop('A Census API key is required. Obtain one at http://api.census.gov/data/key_signup.html, and then supply the key to the `census_api_key` function to use it throughout your tidycensus session.')
}
if (is.null(variables) && is.null(table)) {
stop("Either a vector of variables or an table must be specified.", call. = FALSE)
}
if (!is.null(variables) && !is.null(table)) {
stop("Specify variables or a table to retrieve; they cannot be combined.",
call. = FALSE)
}
# if (geography == "block" && year != 2010) {
# stop("At the moment, block data is only available for 2010. I recommend using NHGIS (http://www.nhgis.org) and the ipumsr package for block data for other years.", call. = FALSE)
# }
if (geography %in% c("tract", "block group") && year == 1990 && is.null(county)) {
stop("At the moment, tracts and block groups for 1990 require specifying a county.",
call. = FALSE)
}
if (geography == "zcta") geography <- "zip code tabulation area"
if (geography == "zip code tabulation area" && is.null(state)) {
stop("ZCTA data for the decennial Census is only available by state from tidycensus.",
call. = FALSE)
}
if (geography == "zip code tabulation area" && geometry) {
stop("Linked ZCTA geometry and attributes for `get_decennial` are not currently available in tidycensus.",
call. = FALSE)
}
if (shift_geo && !geometry) {
stop("`shift_geo` is only available when requesting feature geometry with `geometry = TRUE`",
call. = FALSE)
}
cache <- getOption("tigris_use_cache", FALSE)
if (geometry) {
if (shift_geo) {
if (year != 2010) {
stop("`shift_geo` is currently only available for 2010 data in `get_decennial()` due to county boundary changes.",
call. = FALSE)
}
message("Using feature geometry obtained from the albersusa package")
} else if (!shift_geo && !cache) {
message("Downloading feature geometry from the Census website. To cache shapefiles for use in future sessions, set `options(tigris_use_cache = TRUE)`.")
}
}
# Allow users to get all block groups in a state
if (geography == "block group" && is.null(county)) {
st <- suppressMessages(validate_state(state))
county <- fips_codes[fips_codes$state_code == st, ]$county_code
}
# If more than one state specified for tracts - or more than one county
# for block groups - take care of this under the hood by having the function
# call itself and return the result
if (geography == "tract" && length(state) > 1) {
mc <- match.call(expand.dots = TRUE)
if (geometry) {
result <- map(state, function(x) {
mc[["state"]] <- x
eval(mc)
}) %>%
reduce(rbind)
geoms <- unique(st_geometry_type(result))
if (length(geoms) > 1) {
result <- st_cast(result, "MULTIPOLYGON")
}
result <- result %>%
as_tibble() %>%
st_as_sf()
} else {
result <- map_df(state, function(x) {
mc[["state"]] <- x
eval(mc)
})
}
return(result)
}
if ((geography %in% c("block group", "block") && length(county) > 1) || (geography == "tract" && length(county) > 1)) {
mc <- match.call(expand.dots = TRUE)
if (geometry) {
result <- map(county, function(x) {
mc[["county"]] <- x
eval(mc)
}) %>%
reduce(rbind)
geoms <- unique(st_geometry_type(result))
if (length(geoms) > 1) {
st_cast(result, "MULTIPOLYGON")
}
result <- result %>%
as_tibble() %>%
st_as_sf()
} else {
result <- map_df(county, function(x) {
mc[["county"]] <- x
eval(mc)
})
}
return(result)
}
# Get data for an entire table if needed
if (!is.null(table)) {
variables <- variables_from_table_decennial(table, year, sumfile, cache_table)
}
if (length(variables) > 48) {
l <- split(variables, ceiling(seq_along(variables) / 48))
dat <- map(l, function(x) {
d <- try(load_data_decennial(geography, x, key, year, sumfile, state, county),
silent = TRUE)
# If sf1 fails, try to get it from sf3
if (inherits(d, "try-error")) {
d <- try(suppressMessages(load_data_decennial(geography, x, key, year, sumfile = "sf3", state, county)))
}
d
}) %>%
bind_cols()
} else {
dat <- try(load_data_decennial(geography, variables, key, year, sumfile, state, county),
silent = TRUE)
# If sf1 fails, try to get it from sf3
if (inherits(dat, "try-error")) {
dat <- try(suppressMessages(load_data_decennial(geography, variables, key, year, sumfile = "sf3", state, county)))
}
}
if (output == "tidy") {
sub <- dat[c("GEOID", "NAME", variables)]
dat2 <- sub %>%
gather(key = variable, value = value, -GEOID, -NAME)
if (!is.null(names(variables))) {
for (i in 1:length(variables)) {
dat2[dat2 == variables[i]] <- names(variables)[i]
}
}
} else if (output == "wide") {
dat <- dat[!duplicated(names(dat), fromLast = TRUE)]
dat2 <- dat
if (!is.null(names(variables))) {
for (i in 1:length(variables)) {
names(dat2) <- str_replace(names(dat2), variables[i], names(variables)[i])
}
}
dat2 <- dat2 %>%
select(GEOID, NAME, everything())
}
if (!is.null(summary_var)) {
sumdat <- suppressMessages(try(load_data_decennial(geography, summary_var, key, year,
sumfile, state, county)))
if (inherits(sumdat, "try-error")) {
sumdat <- suppressMessages(try(load_data_decennial(geography, summary_var, key, year,
sumfile = "sf3", state, county)))
}
dat2 <- dat2 %>%
inner_join(sumdat, by = "GEOID") %>%
rename("summary_value" = !! summary_var,
NAME = "NAME.x") %>%
select(-NAME.y)
}
if (geometry) {
if (shift_geo) {
if (!is.null(state)) {
stop("`shift_geo` is only available when requesting geometry for the entire US", call. = FALSE)
}
message("Please note: Alaska and Hawaii are being shifted and are not to scale.")
if (geography == "state") {
geom <- tidycensus::state_laea
} else if (geography == "county") {
geom <- tidycensus::county_laea
} else {
stop("`shift_geo` is only available for states and counties", call. = FALSE)
}
} else {
geom <- suppressMessages(use_tigris(geography = geography, year = year,
state = state, county = county, ...))
}
if (! keep_geo_vars) {
geom <- select(geom, GEOID, geometry)
}
# Merge and return the output
out <- right_join(geom, dat2, by = "GEOID") %>%
as_tibble() %>%
st_as_sf()
return(out)
} else {
return(dat2)
}
}
|
/R/census.R
|
no_license
|
stmacdonell/tidycensus
|
R
| false
| false
| 10,954
|
r
|
#' Obtain data and feature geometry for the decennial Census
#'
#' @param geography The geography of your data.
#' @param variables Character string or vector of character strings of variable
#' IDs.
#' @param table The Census table for which you would like to request all variables. Uses
#' lookup tables to identify the variables; performs faster when variable
#' table already exists through \code{load_variables(cache = TRUE)}.
#' @param cache_table Whether or not to cache table names for faster future access.
#' Defaults to FALSE; if TRUE, only needs to be called once per
#' dataset. If variables dataset is already cached via the
#' \code{load_variables} function, this can be bypassed.
#' @param year The year for which you are requesting data. 1990, 2000, and 2010 are available.
#' @param sumfile The Census summary file. Defaults to sf1; the function will look in sf3 if it
#' cannot find a variable in sf1.
#' @param state The state for which you are requesting data. State
#' names, postal codes, and FIPS codes are accepted.
#' Defaults to NULL.
#' @param county The county for which you are requesting data. County names and
#' FIPS codes are accepted. Must be combined with a value supplied
#' to `state`. Defaults to NULL.
#' @param geometry if FALSE (the default), return a regular tibble of ACS data.
#' if TRUE, uses the tigris package to return an sf tibble
#' with simple feature geometry in the `geometry` column. state, county, tract, and block group are
#' supported for 1990 through 2010; block and ZCTA geometry are supported for 2000 and 2010.
#' @param output One of "tidy" (the default) in which each row represents an
#' enumeration unit-variable combination, or "wide" in which each
#' row represents an enumeration unit and the variables are in the
#' columns.
#' @param keep_geo_vars if TRUE, keeps all the variables from the Census
#' shapefile obtained by tigris. Defaults to FALSE.
#' @param shift_geo if TRUE, returns geometry with Alaska and Hawaii shifted for thematic mapping of the entire US.
#' Geometry was originally obtained from the albersusa R package.
#' @param summary_var Character string of a "summary variable" from the decennial Census
#' to be included in your output. Usually a variable (e.g. total population)
#' that you'll want to use as a denominator or comparison.
#' @param key Your Census API key.
#' Obtain one at \url{http://api.census.gov/data/key_signup.html}
#' @param ... Other keyword arguments
#'
#' @return a tibble or sf tibble of decennial Census data
#' @examples \dontrun{
#' # Plot of race/ethnicity by county in Illinois for 2010
#' library(tidycensus)
#' library(tidyverse)
#' library(viridis)
#' census_api_key("YOUR KEY GOES HERE")
#' vars10 <- c("P0050003", "P0050004", "P0050006", "P0040003")
#'
#' il <- get_decennial(geography = "county", variables = vars10, year = 2010,
#' summary_var = "P0010001", state = "IL", geometry = TRUE) %>%
#' mutate(pct = 100 * (value / summary_value))
#'
#' ggplot(il, aes(fill = pct, color = pct)) +
#' geom_sf() +
#' facet_wrap(~variable)
#'
#'
#' }
#' @export
get_decennial <- function(geography, variables = NULL, table = NULL, cache_table = FALSE, year = 2010,
sumfile = "sf1", state = NULL, county = NULL, geometry = FALSE, output = "tidy",
keep_geo_vars = FALSE, shift_geo = FALSE, summary_var = NULL, key = NULL, ...) {
message(sprintf("Getting data from the %s decennial Census", year))
if (Sys.getenv('CENSUS_API_KEY') != '') {
key <- Sys.getenv('CENSUS_API_KEY')
} else if (is.null(key)) {
stop('A Census API key is required. Obtain one at http://api.census.gov/data/key_signup.html, and then supply the key to the `census_api_key` function to use it throughout your tidycensus session.')
}
if (is.null(variables) && is.null(table)) {
stop("Either a vector of variables or an table must be specified.", call. = FALSE)
}
if (!is.null(variables) && !is.null(table)) {
stop("Specify variables or a table to retrieve; they cannot be combined.",
call. = FALSE)
}
# if (geography == "block" && year != 2010) {
# stop("At the moment, block data is only available for 2010. I recommend using NHGIS (http://www.nhgis.org) and the ipumsr package for block data for other years.", call. = FALSE)
# }
if (geography %in% c("tract", "block group") && year == 1990 && is.null(county)) {
stop("At the moment, tracts and block groups for 1990 require specifying a county.",
call. = FALSE)
}
if (geography == "zcta") geography <- "zip code tabulation area"
if (geography == "zip code tabulation area" && is.null(state)) {
stop("ZCTA data for the decennial Census is only available by state from tidycensus.",
call. = FALSE)
}
if (geography == "zip code tabulation area" && geometry) {
stop("Linked ZCTA geometry and attributes for `get_decennial` are not currently available in tidycensus.",
call. = FALSE)
}
if (shift_geo && !geometry) {
stop("`shift_geo` is only available when requesting feature geometry with `geometry = TRUE`",
call. = FALSE)
}
cache <- getOption("tigris_use_cache", FALSE)
if (geometry) {
if (shift_geo) {
if (year != 2010) {
stop("`shift_geo` is currently only available for 2010 data in `get_decennial()` due to county boundary changes.",
call. = FALSE)
}
message("Using feature geometry obtained from the albersusa package")
} else if (!shift_geo && !cache) {
message("Downloading feature geometry from the Census website. To cache shapefiles for use in future sessions, set `options(tigris_use_cache = TRUE)`.")
}
}
# Allow users to get all block groups in a state
if (geography == "block group" && is.null(county)) {
st <- suppressMessages(validate_state(state))
county <- fips_codes[fips_codes$state_code == st, ]$county_code
}
# If more than one state specified for tracts - or more than one county
# for block groups - take care of this under the hood by having the function
# call itself and return the result
if (geography == "tract" && length(state) > 1) {
mc <- match.call(expand.dots = TRUE)
if (geometry) {
result <- map(state, function(x) {
mc[["state"]] <- x
eval(mc)
}) %>%
reduce(rbind)
geoms <- unique(st_geometry_type(result))
if (length(geoms) > 1) {
result <- st_cast(result, "MULTIPOLYGON")
}
result <- result %>%
as_tibble() %>%
st_as_sf()
} else {
result <- map_df(state, function(x) {
mc[["state"]] <- x
eval(mc)
})
}
return(result)
}
if ((geography %in% c("block group", "block") && length(county) > 1) || (geography == "tract" && length(county) > 1)) {
mc <- match.call(expand.dots = TRUE)
if (geometry) {
result <- map(county, function(x) {
mc[["county"]] <- x
eval(mc)
}) %>%
reduce(rbind)
geoms <- unique(st_geometry_type(result))
if (length(geoms) > 1) {
st_cast(result, "MULTIPOLYGON")
}
result <- result %>%
as_tibble() %>%
st_as_sf()
} else {
result <- map_df(county, function(x) {
mc[["county"]] <- x
eval(mc)
})
}
return(result)
}
# Get data for an entire table if needed
if (!is.null(table)) {
variables <- variables_from_table_decennial(table, year, sumfile, cache_table)
}
if (length(variables) > 48) {
l <- split(variables, ceiling(seq_along(variables) / 48))
dat <- map(l, function(x) {
d <- try(load_data_decennial(geography, x, key, year, sumfile, state, county),
silent = TRUE)
# If sf1 fails, try to get it from sf3
if (inherits(d, "try-error")) {
d <- try(suppressMessages(load_data_decennial(geography, x, key, year, sumfile = "sf3", state, county)))
}
d
}) %>%
bind_cols()
} else {
dat <- try(load_data_decennial(geography, variables, key, year, sumfile, state, county),
silent = TRUE)
# If sf1 fails, try to get it from sf3
if (inherits(dat, "try-error")) {
dat <- try(suppressMessages(load_data_decennial(geography, variables, key, year, sumfile = "sf3", state, county)))
}
}
if (output == "tidy") {
sub <- dat[c("GEOID", "NAME", variables)]
dat2 <- sub %>%
gather(key = variable, value = value, -GEOID, -NAME)
if (!is.null(names(variables))) {
for (i in 1:length(variables)) {
dat2[dat2 == variables[i]] <- names(variables)[i]
}
}
} else if (output == "wide") {
dat <- dat[!duplicated(names(dat), fromLast = TRUE)]
dat2 <- dat
if (!is.null(names(variables))) {
for (i in 1:length(variables)) {
names(dat2) <- str_replace(names(dat2), variables[i], names(variables)[i])
}
}
dat2 <- dat2 %>%
select(GEOID, NAME, everything())
}
if (!is.null(summary_var)) {
sumdat <- suppressMessages(try(load_data_decennial(geography, summary_var, key, year,
sumfile, state, county)))
if (inherits(sumdat, "try-error")) {
sumdat <- suppressMessages(try(load_data_decennial(geography, summary_var, key, year,
sumfile = "sf3", state, county)))
}
dat2 <- dat2 %>%
inner_join(sumdat, by = "GEOID") %>%
rename("summary_value" = !! summary_var,
NAME = "NAME.x") %>%
select(-NAME.y)
}
if (geometry) {
if (shift_geo) {
if (!is.null(state)) {
stop("`shift_geo` is only available when requesting geometry for the entire US", call. = FALSE)
}
message("Please note: Alaska and Hawaii are being shifted and are not to scale.")
if (geography == "state") {
geom <- tidycensus::state_laea
} else if (geography == "county") {
geom <- tidycensus::county_laea
} else {
stop("`shift_geo` is only available for states and counties", call. = FALSE)
}
} else {
geom <- suppressMessages(use_tigris(geography = geography, year = year,
state = state, county = county, ...))
}
if (! keep_geo_vars) {
geom <- select(geom, GEOID, geometry)
}
# Merge and return the output
out <- right_join(geom, dat2, by = "GEOID") %>%
as_tibble() %>%
st_as_sf()
return(out)
} else {
return(dat2)
}
}
|
testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818252170339e+295, 1.22810536108213e+146, 5.71368621380148e-88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613104868-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 343
|
r
|
testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818252170339e+295, 1.22810536108213e+146, 5.71368621380148e-88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/central_nervous_system.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.55,family="gaussian",standardize=TRUE)
sink('./central_nervous_system_063.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/AvgRank/central_nervous_system/central_nervous_system_063.R
|
no_license
|
esbgkannan/QSMART
|
R
| false
| false
| 378
|
r
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/central_nervous_system.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.55,family="gaussian",standardize=TRUE)
sink('./central_nervous_system_063.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
library("ggplot2")
# normalizes all values given in vector 'x' to be between 0,1
normalize <- function(x){
xmin = min(x)
xmax = max(x)
y = (x-xmin)/(xmax-xmin)
return(y)
}
nlm <- function(x, y){
return(lm(normalize(y)~normalize(x)))
}
d<-read.csv("up.3.saved.txt",sep = "\t")
d$w.len <- nchar(as.character(d$phones))
d$pre.len <-nchar(as.character(d$prefix))
d$suf.len <-nchar(as.character(d$suffix))
d$word_given_context = normalize(d$word_info)
d$prefix_given_suffix = normalize(d$prefix_info)
d$suffix_given_prefix = normalize(d$suffix_info)
# because it's based off of index in Python, 0 is first character, etc
d$u.point = d$u.point + 1
#d$word_given_context = log1p(d$word_info)
#d$prefix_given_suffix = log1p(d$prefix_info)
#d$suffix_given_prefix = log1p(d$suffix_info)
mono <- subset(d, morpheme_count == 0)
plot(density(mono$word_given_context))
plot(density(mono$suffix_given_prefix))
plot(density(mono$prefix_given_suffix))
# MORE surprising the word, LESS surprising the prefix
summary(lm(mono$word_given_context ~ mono$prefix_given_suffix))
# MORE surprising the word, MORE surprising the suffix
# NOT signif
summary(lm(mono$word_given_context ~ mono$suffix_given_prefix))
ggplot(mono, aes(prefix_given_suffix, word_given_context)) +
geom_point() + geom_smooth(method="lm")
ggplot(mono, aes(suffix_given_prefix, word_given_context)) +
geom_point() + geom_smooth(method="lm")
ggplot(mono, aes(x= word_given_context, y = normalize(u.point/w.len))) +
geom_smooth(method="lm")
summary(lm(normalize(mono$u.point_mass/mono$u.point) ~ mono$word_given_context))
ggplot(mono, aes(suffix_given_prefix, word_given_context)) +
geom_smooth(method="lm")
qplot(mono$word_given_context, mono$prefix_given_suffix)
ggplot(mono, aes(x = normalize(w.len), y = normalize(suffix_info))) +
geom_point()
summary(lm(mono$suffix_info ~ mono$w.len))
|
/a.r
|
no_license
|
AdamKing11/Ngram_Informativity
|
R
| false
| false
| 1,878
|
r
|
library("ggplot2")
# normalizes all values given in vector 'x' to be between 0,1
normalize <- function(x){
xmin = min(x)
xmax = max(x)
y = (x-xmin)/(xmax-xmin)
return(y)
}
nlm <- function(x, y){
return(lm(normalize(y)~normalize(x)))
}
d<-read.csv("up.3.saved.txt",sep = "\t")
d$w.len <- nchar(as.character(d$phones))
d$pre.len <-nchar(as.character(d$prefix))
d$suf.len <-nchar(as.character(d$suffix))
d$word_given_context = normalize(d$word_info)
d$prefix_given_suffix = normalize(d$prefix_info)
d$suffix_given_prefix = normalize(d$suffix_info)
# because it's based off of index in Python, 0 is first character, etc
d$u.point = d$u.point + 1
#d$word_given_context = log1p(d$word_info)
#d$prefix_given_suffix = log1p(d$prefix_info)
#d$suffix_given_prefix = log1p(d$suffix_info)
mono <- subset(d, morpheme_count == 0)
plot(density(mono$word_given_context))
plot(density(mono$suffix_given_prefix))
plot(density(mono$prefix_given_suffix))
# MORE surprising the word, LESS surprising the prefix
summary(lm(mono$word_given_context ~ mono$prefix_given_suffix))
# MORE surprising the word, MORE surprising the suffix
# NOT signif
summary(lm(mono$word_given_context ~ mono$suffix_given_prefix))
ggplot(mono, aes(prefix_given_suffix, word_given_context)) +
geom_point() + geom_smooth(method="lm")
ggplot(mono, aes(suffix_given_prefix, word_given_context)) +
geom_point() + geom_smooth(method="lm")
ggplot(mono, aes(x= word_given_context, y = normalize(u.point/w.len))) +
geom_smooth(method="lm")
summary(lm(normalize(mono$u.point_mass/mono$u.point) ~ mono$word_given_context))
ggplot(mono, aes(suffix_given_prefix, word_given_context)) +
geom_smooth(method="lm")
qplot(mono$word_given_context, mono$prefix_given_suffix)
ggplot(mono, aes(x = normalize(w.len), y = normalize(suffix_info))) +
geom_point()
summary(lm(mono$suffix_info ~ mono$w.len))
|
if (!file.exists(file.path(getwd(),"Project 1"))){
unzip("exdata-data-household_power_consumption.zip", exdir="./Project 1")
}
# read the data into R
data = read.table(file.path(getwd(),"Project 1","household_power_consumption.txt"),
header=TRUE,stringsAsFactors=FALSE,sep=";",na.strings="?")
# create another column which contains both date and time info
data$DateAndTime <- paste(data$Date,data$Time)
# convert date-time info from character to date and time class
data$Date <- as.Date(data$Date,format="%d/%m/%Y")
data$DateAndTime <- strptime(data$DateAndTime,format="%d/%m/%Y %H:%M:%S")
#subset the data based on dates
data <- subset(data,Date>=as.Date("01/02/2007",format="%d/%m/%Y") &
Date<=as.Date("02/02/2007",format="%d/%m/%Y"))
# create 4 plots
png(file="plot4.png",height=480,width=480)
par(mfrow=c(2,2))
with(data,{
# first plot
plot(type="l",x=DateAndTime,y=Global_active_power,xlab="",ylab="Global Active Power")
# second plot
plot(type="l",x=DateAndTime,y=Voltage,xlab="datetime",ylab="Voltage")
# third plot
plot(type="l",x=DateAndTime,y=Sub_metering_1,xlab="",ylab="Energy sub metering")
lines(type="l",x=DateAndTime,y=Sub_metering_2,xlab="",col="red")
lines(type="l",x=DateAndTime,y=Sub_metering_3,col="blue")
# add in the legend for the third plot
legend(x="topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col=c("black","red","blue"),lty=c(1,1,1),bty="n")
# fourth plot
plot(type="l",x=DateAndTime,y=Global_reactive_power,xlab="datetime",
ylab="Global_reactive_power")
})
dev.off()
|
/Exploratory Data Analysis/Project 1/plot4.R
|
no_license
|
JLKW/DataScienceCoursera
|
R
| false
| false
| 1,618
|
r
|
if (!file.exists(file.path(getwd(),"Project 1"))){
unzip("exdata-data-household_power_consumption.zip", exdir="./Project 1")
}
# read the data into R
data = read.table(file.path(getwd(),"Project 1","household_power_consumption.txt"),
header=TRUE,stringsAsFactors=FALSE,sep=";",na.strings="?")
# create another column which contains both date and time info
data$DateAndTime <- paste(data$Date,data$Time)
# convert date-time info from character to date and time class
data$Date <- as.Date(data$Date,format="%d/%m/%Y")
data$DateAndTime <- strptime(data$DateAndTime,format="%d/%m/%Y %H:%M:%S")
#subset the data based on dates
data <- subset(data,Date>=as.Date("01/02/2007",format="%d/%m/%Y") &
Date<=as.Date("02/02/2007",format="%d/%m/%Y"))
# create 4 plots
png(file="plot4.png",height=480,width=480)
par(mfrow=c(2,2))
with(data,{
# first plot
plot(type="l",x=DateAndTime,y=Global_active_power,xlab="",ylab="Global Active Power")
# second plot
plot(type="l",x=DateAndTime,y=Voltage,xlab="datetime",ylab="Voltage")
# third plot
plot(type="l",x=DateAndTime,y=Sub_metering_1,xlab="",ylab="Energy sub metering")
lines(type="l",x=DateAndTime,y=Sub_metering_2,xlab="",col="red")
lines(type="l",x=DateAndTime,y=Sub_metering_3,col="blue")
# add in the legend for the third plot
legend(x="topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col=c("black","red","blue"),lty=c(1,1,1),bty="n")
# fourth plot
plot(type="l",x=DateAndTime,y=Global_reactive_power,xlab="datetime",
ylab="Global_reactive_power")
})
dev.off()
|
make.synthetic.data <- function(
n.observation,
beta,
errorRate,
reviewFraction
) {
npredictors <- length(beta) - 1;
X <- rbinom(
n = n.observation * npredictors,
size = 1,
prob = 0.5
);
X <- matrix(data = X, nrow = n.observation, byrow = TRUE);
X <- cbind(rep(1,times=n.observation),X);
colnames(X) <- paste0("x",seq(0,npredictors));
prY1.numerator <- exp(X %*% beta);
prY1 <- prY1.numerator / (1 + prY1.numerator);
response.vector <- rbinom(n = n.observation, size = 1, prob = prY1);
# match.vector <- rbinom(n = n.observation, size = 1, prob = 1 - errorRate);
match.vector <- sample(
x = c(TRUE,FALSE),
size = n.observation,
replace = TRUE,
prob = c(1-errorRate,errorRate)
);
# review.vector <- rbinom(n = n.observation, size = 1, prob = reviewFraction);
review.vector <- sample(
x = c(TRUE,FALSE),
size = n.observation,
replace = TRUE,
prob = c(reviewFraction,1-reviewFraction)
);
tempID <- seq(1,n.observation);
DF.output <- cbind(
tempID,
match.vector,
tempID,
response.vector,
response.vector,
X,
review.vector,
match.vector,
prY1
);
colnames(DF.output) <- c(
"ID",
"true.match",
"IDstar",
"y.true",
"y.observed",
colnames(X),
"review",
"match",
"prY1"
);
DF.matches <- DF.output[DF.output[,"match"] == TRUE,];
DF.nonmatches <- DF.output[DF.output[,"match"] == FALSE,];
# nrow.nonmatches <- nrow(DF.nonmatches);
# cyclically.permuted.indices <- c( seq(from = 2, to = nrow.nonmatches), 1 );
# permuted.indices <- sample(
# x = 1:nrow.nonmatches,
# size = nrow.nonmatches,
# replace = FALSE
# );
if (nrow(DF.nonmatches) > 1) {
DF.nonmatches[,c("IDstar","y.observed")] <- DF.nonmatches[ c(2:nrow(DF.nonmatches),1), c("IDstar","y.observed") ];
}
DF.output <- rbind(DF.matches,DF.nonmatches);
DF.output <- DF.output[order(DF.output[,"ID"]),];
DF.output[DF.output[,"review"] == FALSE,"match"] <- NA;
return(DF.output);
}
|
/projects/StatCan/recordLinkage/errorAdjustment/chipperfield/001-StatCan-linkAdjust/StatCan.linkAdjust/R/make-synthetic-data.R
|
no_license
|
paradisepilot/statistics
|
R
| false
| false
| 1,987
|
r
|
make.synthetic.data <- function(
n.observation,
beta,
errorRate,
reviewFraction
) {
npredictors <- length(beta) - 1;
X <- rbinom(
n = n.observation * npredictors,
size = 1,
prob = 0.5
);
X <- matrix(data = X, nrow = n.observation, byrow = TRUE);
X <- cbind(rep(1,times=n.observation),X);
colnames(X) <- paste0("x",seq(0,npredictors));
prY1.numerator <- exp(X %*% beta);
prY1 <- prY1.numerator / (1 + prY1.numerator);
response.vector <- rbinom(n = n.observation, size = 1, prob = prY1);
# match.vector <- rbinom(n = n.observation, size = 1, prob = 1 - errorRate);
match.vector <- sample(
x = c(TRUE,FALSE),
size = n.observation,
replace = TRUE,
prob = c(1-errorRate,errorRate)
);
# review.vector <- rbinom(n = n.observation, size = 1, prob = reviewFraction);
review.vector <- sample(
x = c(TRUE,FALSE),
size = n.observation,
replace = TRUE,
prob = c(reviewFraction,1-reviewFraction)
);
tempID <- seq(1,n.observation);
DF.output <- cbind(
tempID,
match.vector,
tempID,
response.vector,
response.vector,
X,
review.vector,
match.vector,
prY1
);
colnames(DF.output) <- c(
"ID",
"true.match",
"IDstar",
"y.true",
"y.observed",
colnames(X),
"review",
"match",
"prY1"
);
DF.matches <- DF.output[DF.output[,"match"] == TRUE,];
DF.nonmatches <- DF.output[DF.output[,"match"] == FALSE,];
# nrow.nonmatches <- nrow(DF.nonmatches);
# cyclically.permuted.indices <- c( seq(from = 2, to = nrow.nonmatches), 1 );
# permuted.indices <- sample(
# x = 1:nrow.nonmatches,
# size = nrow.nonmatches,
# replace = FALSE
# );
if (nrow(DF.nonmatches) > 1) {
DF.nonmatches[,c("IDstar","y.observed")] <- DF.nonmatches[ c(2:nrow(DF.nonmatches),1), c("IDstar","y.observed") ];
}
DF.output <- rbind(DF.matches,DF.nonmatches);
DF.output <- DF.output[order(DF.output[,"ID"]),];
DF.output[DF.output[,"review"] == FALSE,"match"] <- NA;
return(DF.output);
}
|
## Nicolas Servant
## HiTC BioConductor package
##**********************************************************************************************************##
##
## HIC Normalization procedure from Lieberman-Aiden et al. 2009
##
##**********************************************************************************************************##
## Normalized per expected number of count
setMethod("normPerExpected", signature=c("HTCexp"), definition=function(x, ...){
expCounts <- getExpectedCounts(forceSymmetric(x), asList=TRUE, ...)
if (! is.null(expCounts$stdev.estimate)){
x@intdata <- (x@intdata-expCounts$exp.interaction)/expCounts$stdev.estimate
}else{
x@intdata <- x@intdata/(expCounts$exp.interaction)
}
## Remove NaN or Inf values for further analyses
#x@intdata[which(is.na(x@intdata) | is.infinite(x@intdata))]<-NA
x@intdata[Matrix::which(is.infinite(x@intdata))]<-0
x
})
## Normalized per expected number of counts across all cis maps
setMethod("normPerExpected", signature=c("HTClist"), definition=function(x, ...){
xintra <- x[isIntraChrom(x)]
## estimated expected counts for all cis maps
exp <- lapply(xintra, function(xx){
r <- getExpectedCounts(forceSymmetric(xx), method="mean", asList=TRUE, ...)
r$exp.interaction
})
## combined all cis expected counts
N <- max(sapply(exp, dim))
counts <- matrix(0, ncol=N, nrow=N)
ss <- matrix(0, ncol=N, nrow=N)
for (i in 1:length(exp)){
n <- dim(exp[[i]])[1]
counts[1:n, 1:n] <- counts[1:n, 1:n]+1
ss[1:n, 1:n] <- ss[1:n, 1:n] + as.matrix(exp[[i]])
}
## Mean over all expected matrices
ss <- ss / counts
xintranorm <- lapply(xintra, function(xx){
n <- dim(xx@intdata)[1]
xx@intdata <- xx@intdata/ss[1:n,1:n]
xx@intdata[which(is.na(xx@intdata) | is.infinite(xx@intdata))]<-0
xx
})
x[isIntraChrom(x)] <- xintranorm
x
})
###################################
## getExpectedCountsMean
##
## This way of calculate expected counts was used in Naumova et al.
## The idea is just to look at all diagonals and to calculate their mean
##
## x = a HTCexp object
##
##
## NOTES
## Migth be interesting to add an isotonic regression on the mean to force the expected value to decrease with the distance
###################################
getExpectedCounts <- function(x, method=c("mean","loess"), asList=FALSE, ...){
met <- match.arg(method)
if (dim(intdata(x))[1]>500 & met=="loess"){
warning("Contact map looks big. Use mean method instead or be sure that the loess fit gives good results.")
}
if (met=="mean"){
ret <- getExpectedCountsMean(x, ...)
}else if (met=="loess"){
ret <- getExpectedCountsLoess(x, ...)
}else{
stop("Unknown method")
}
if (asList){
return(ret)
}else{
intdata(x) <- ret$exp.interaction
return(x)
}
}
logbins<- function(from, to, step=1.05, N=NULL) {
if (is.null(N)){
unique(round(c(from, exp(seq(log(from), log(to), by=log(step))), to)))
}else{
unique(round(c(from, exp(seq(log(from), log(to), length.out=N)), to)))
}
}
getExpectedCountsMean <- function(x, logbin=TRUE, step=1.05, filter.low=0.05){
xdata <- intdata(x)
N <- dim(xdata)[1]
if (logbin){
bins <- logbins(from=1,to=N, step=step)
bins <- as.vector(Rle(values=bins, lengths=c(diff(bins),1)))
stopifnot(length(bins)==N)
}else{
bins <- 1:N
}
message("Estimate expected using mean contact frequency per genomic distance ...")
xdata <- as.matrix(xdata)
rc <- colSums(xdata, na.rm=TRUE)
##rc <- which(rc==0)
rc <- which(rc < ceiling(quantile(rc[which(rc>0)], probs=filter.low)))
rr <- rowSums(xdata, na.rm=TRUE)
##rr <- which(rr==0)
rr <- which(rr < ceiling(quantile(rr[which(rr>0)], probs=filter.low)))
## rm line with only zeros
xdata[rr,] <- NA
xdata[,rc] <- NA
## create an indicator for all diagonals in the matrix
rows <- matrix(rep.int(bins, N), nrow=N)
##d <- rows - t(rows)
d <- matrix(bins[1+abs(col(rows) - row(rows))],nrow=N) - 1
d[lower.tri(d)] <- -d[lower.tri(d)]
if (isSymmetric(xdata)){
## remove half of the matrix
d[lower.tri(d)] <- NA
}
## use split to group on these values
mi <- split(xdata, d)
milen <- lapply(mi, length)
mimean <- lapply(mi, mean, na.rm=TRUE)
miexp <- lapply(1:length(milen), function(i){rep(mimean[[i]], milen[[i]])})
names(miexp) <- names(mi)
expmat <- as(matrix(unsplit(miexp, d), nrow=nrow(xdata), ncol=ncol(xdata)), "Matrix")
if (isSymmetric(xdata)){
expmat <- forceSymmetric(expmat, uplo="U")
}
colnames(expmat) <- colnames(xdata)
rownames(expmat) <- rownames(xdata)
## Put NA at rc and cc
expmat[rr,] <- NA
expmat[,rc] <- NA
return(list(exp.interaction=expmat, stdev.estimate=NULL))
}
###################################
## getExpectedCounts
##
## Estimate the expected interaction counts from a HTCexp object based on the interaction distances
##
## x = a HTCexp object
## span=fraction of the data used for smoothing at each x point. The larger the f value, the smoother the fit.
## bin=interval size (in units corresponding to x). If lowess estimates at two x values within delta of one another, it fits any points between them by linear interpolation. The default is 1% of the range of x. If delta=0 all but identical x values are estimated independently.
## stdev = logical,the standard deviation is estimated for each interpolation point
## plot = logical, display lowess and variance smoothing
##
## bin is used to speed up computation: instead of computing the local polynomial fit at each data point it is not computed for points within delta of the last computed point, and linear interpolation is used to fill in the fitted values for the skipped points.
## This function may be slow for large numbers of points. Increasing bin should speed things up, as will decreasing span.
## Lowess uses robust locally linear fits. A window, dependent on f, is placed about each x value; points that are inside the window are weighted so that nearby points get the most weight.
##
## NOTES
## All variances are calculated (even identical values) because of the parallel implementation.
## Cannot use rle object because the neighboring have to be calculated on the wall dataset. Or have to find a way to convert rle index to real index ...
## Easy to do with a 'for' loop but the parallel advantages are much bigger
###################################
getExpectedCountsLoess<- function(x, span=0.01, bin=0.005, stdev=FALSE, plot=FALSE){
stopifnot(inherits(x,"HTCexp"))
xdata <- as.matrix(intdata(x))
rc <- which(colSums(xdata, na.rm=TRUE)==0)
rr <- which(rowSums(xdata, na.rm=TRUE)==0)
## rm line with only zeros
xdata[rr,] <- NA
xdata[,rc] <- NA
ydata <- as.vector(xdata)
ydata[which(is.na(ydata))] <- 0
xdata.dist <- as.vector(intervalsDist(x))
o<- order(xdata.dist)
xdata.dist <- xdata.dist[o]
ydata <- ydata[o]
delta <- bin*diff(range(xdata.dist))
######################
## Lowess Fit
######################
message("Lowess fit ...")
#lowess.fit <- .C("lowess", x = as.double(xdata.dist), as.double(ydata),
# length(ydata), as.double(span), as.integer(3), as.double(delta),
# y = double(length(ydata)), double(length(ydata)), double(length(ydata)), PACKAGE = "stats")$y
lowess.fit <-lowess(x=xdata.dist, y=ydata, f=span, delta=delta)$y
y1 <- sort(ydata)
y1 <- quantile(y1[which(y1>1)], probs=0.99)
if (plot){
par(font.lab=2, mar=c(4,4,1,1))
##plotIntraDist(ydata, xdata.dist, xlab="Genomic Distance (bp)", ylim=c(0,y1), ylab="Counts", main="", cex=0.5, cex.lab=0.7, pch=20, cex.axis=0.7, col="gray", frame=FALSE)
plot(x=xdata.dist, y=ydata, xlab="Genomic Distance (bp)", ylim=c(0,y1), ylab="Counts", main="", cex=0.5, cex.lab=0.7, pch=20, cex.axis=0.7, col="gray", frame=FALSE)
points(x=xdata.dist[order(lowess.fit)], y=sort(lowess.fit), type="l", col="red")
}
lowess.mat <- Matrix(lowess.fit[order(o)], nrow=length(y_intervals(x)), byrow=FALSE)
rownames(lowess.mat) <- id(y_intervals(x))
colnames(lowess.mat) <- id(x_intervals(x))
######################
## Variance estimation
######################
stdev.mat <- NULL
if (stdev){
message("Standard deviation calculation ...")
##interpolation
ind <- getDeltaRange(delta, xdata.dist)
lx <- length(xdata.dist)
Q <- floor(lx*span)
stdev.delta <- unlist(mclapply(1:length(ind), function(k){
i <- ind[k]
x1 <- xdata.dist[i]
## Neighbors selection 2*Q
ll <- i-Q-1
lr <- i+Q-1
if (ll<0) ll=0
if (lr>lx) lr=lx
xdata.dist.sub <- xdata.dist[ll:lr]
ydata.sub <- ydata[ll:lr]
## Select the Q closest distances
d <- abs(x1-xdata.dist.sub)
o2 <- order(d)[1:Q]
x2 <- xdata.dist.sub[o2]
y2 <- ydata.sub[o2]
## Distance between x and other points
dref <- d[o2]
drefs <- dref/max(abs(dref-x1)) ##max(dref) - NS
## Tricube weigths and stdev calculation
w <- tricube(drefs)
sqrt <- w*(y2-lowess.fit[i])^2
stdev <- sqrt(sum(sqrt)/
(((length(sqrt)-1) * sum(w))/length(sqrt)))
}))
if (plot){
points(x=xdata.dist[ind], y=lowess.fit[ind], col="black", cex=.8, pch="+")
legend(x="topright", lty=c(1,NA), pch=c(NA,"+"), col=c("red","black"),legend=c("Lowess fit","Interpolation points"), cex=.8, bty="n")
}
## Approximation according to delta
stdev.estimate <- approx(x=xdata.dist[ind], y=stdev.delta, method="linear", xout=xdata.dist)$y
stdev.mat <- matrix(stdev.estimate[order(o)], nrow=length(y_intervals(x)), byrow=FALSE)
rownames(stdev.mat) <- id(y_intervals(x))
colnames(stdev.mat) <- id(x_intervals(x))
}
## Put NA at rc and cc
lowess.mat[rr,] <- NA
lowess.mat[,rc] <- NA
return(list(exp.interaction=lowess.mat,stdev.estimate=stdev.mat))
}
###################################
## getDeltaRange
## INTERNAL FUNCTION
## Calculate the interpolation points from the delta value
##
## delta = lowess delta parameter
## xdata = Intervals distances matrix
###################################
getDeltaRange <- function(delta, xdata){
message("Delta=",delta)
if (delta>0){
ind <- 1
for (i in 1:length(xdata)-1){
if (xdata[i+1]>=delta){
ind <- c(ind,i)
delta=delta+delta
}
}
if (max(ind)<length(xdata)){
ind <- c(ind, length(xdata))
}
message("Calculating stdev ... ")
}else{
ind <- 1:length(xdata)
}
ind
}
###################################
## tricube
## INTERNAL FUNCTION
## tricube distance weigth
##
## x = numeric. A distance
###################################
tricube <- function(x) {
ifelse (abs(x) < 1, (1 - (abs(x))^3)^3, 0)
}
##**********************************************************************************************************##
##
## HIC Normalization procedure from HiCNorm package, Hu et al. 2012
##
##**********************************************************************************************************##
###################################
## normLGF
## Local Genomic Features normalization
##
##
## x = HTCexp/HTClist object
## family = regression model Poisson or Neg Binon
##
##
##################################
normLGF <- function(x, family=c("poisson", "nb")){
family <- match.arg(family)
message("Starting LGF normalization on ", seqlevels(x), " ...")
counts <- intdata(x)
## Remove rowCounts=0 & colCounts=0
rc <- which(rowSums(counts)>0)
## Intrachromosomal maps
if (isIntraChrom(x)){
cc <- rc
stopifnot(length(rc)>0)
counts.rc <- counts[rc,rc]
elt <- elementMetadata(y_intervals(x)[rc])
len <- elt$len
gcc <- elt$GC
map <- elt$map
if(all(is.na(len)) || all(is.na(gcc)) || all(is.na(map)))
stop("Genomic features are missing. Effective fragments length, GC content and mappability are required.")
##get cov matrix
len_m<-as.matrix(log(1+len%o%len))
gcc_m<-as.matrix(log(1+gcc%o%gcc))
##error for regions with 0 mappability
map[which(map==0)] <- 10e-4
map_m<-as.matrix(log(map%o%map))
}else{
## Interchromosomal maps
cc <- which(colSums(counts)>0)
stopifnot(length(rc)>0 & length(cc)>0)
counts.rc <- counts[rc,cc]
yelt <- elementMetadata(y_intervals(x)[rc])
xelt <- elementMetadata(x_intervals(x)[cc])
ylen <- yelt$len
xlen <- xelt$len
ygcc <- yelt$GC
xgcc <- xelt$GC
ymap <- yelt$map
xmap <- xelt$map
if(all(is.na(ylen)) || all(is.na(ygcc)) || all(is.na(ymap)) || all(is.na(xlen)) || all(is.na(xgcc)) || all(is.na(xmap)))
stop("Genomic features are missing. Effective fragments length, GC content and mappability are required.")
##get cov matrix
len_m<-as.matrix(log(1+ylen%o%xlen))
gcc_m<-as.matrix(log(1+ygcc%o%xgcc))
##error for regions with 0 mappability
ymap[which(ymap==0)] <- 10e-4
xmap[which(xmap==0)] <- 10e-4
map_m<-as.matrix(log(ymap%o%xmap))
}
##centralize cov matrix of enz, gcc
#len_m<-(len_m-mean(len_m, na.rm=TRUE))/apply(len_m, 2, sd, na.rm=TRUE)
#gcc_m<-(gcc_m-mean(gcc_m, na.rm=TRUE))/apply(gcc_m, 2, sd, na.rm=TRUE)
#Fix bug in BioC [bioc] A: normLGF yields non-symetric matrices
len_m<-(len_m-mean(len_m, na.rm=TRUE))/sd(len_m, na.rm=TRUE)
gcc_m<-(gcc_m-mean(gcc_m, na.rm=TRUE))/sd(gcc_m, na.rm=TRUE)
##change matrix into vector
if (isIntraChrom(x)){
counts_vec<-counts.rc[which(upper.tri(counts.rc,diag=FALSE))]
len_vec<-len_m[upper.tri(len_m,diag=FALSE)]
gcc_vec<-gcc_m[upper.tri(gcc_m,diag=FALSE)]
map_vec<-map_m[upper.tri(map_m,diag=FALSE)]
}else{
counts_vec<-as.vector(counts.rc)
len_vec<-as.vector(len_m)
gcc_vec<-as.vector(gcc_m)
map_vec<-as.vector(map_m)
}
print("fit ...")
if (family=="poisson"){
##fit Poisson regression: u~len+gcc+offset(map)
fit<-glm(counts_vec~ len_vec+gcc_vec+offset(map_vec),family="poisson")
##fit<-bigglm(counts_vec~len_vec+gcc_vec+offset(map_vec),family="poisson", data=cbind(counts_vec, len_vec, gcc_vec, map_vec))
}else{
fit<-glm.nb(counts_vec~len_vec+gcc_vec+offset(map_vec))
}
coeff<-fit$coeff
## The corrected values (residuals) can be seen as a observed/expected correction.
## So I will compare the normalized counts with one: the observed count is higher or lower than the expected count. We may not want to compare the range of the normalized count with the range of the raw count. They have different interpretations.
counts.cor<-round(counts.rc/exp(coeff[1]+coeff[2]*len_m+coeff[3]*gcc_m+map_m), 4)
counts[rownames(counts.rc), colnames(counts.rc)]<-counts.cor
intdata(x) <- counts
return(x)
}##normLGF
###################################
## setGenomicFeatures
## Annotate a HTCexp or HTClist object with the GC content and the mappability features
##
##
## x = HTCexp/HTClist object
## cutSites = GRanges object ir GRangesList from getAnnotatedRestrictionSites function
## minFragMap = Discard restriction with mappability lower the this threshold (and NA)
## effFragLen = Effective fragment length
##################################
setGenomicFeatures <- function(x, cutSites, minFragMap=.5, effFragLen=1000){
stopifnot(inherits(x,"HTCexp"))
stopifnot(seqlevels(x) %in% seqlevels(cutSites))
obj <- x
xgi <- x_intervals(x)
message("Annotation of ", seqlevels(x), " ...")
xgi <- annotateIntervals(xgi, cutSites[[seqlevels(xgi)]], minfragmap=minFragMap, efffraglen=effFragLen)
x_intervals(obj) <- xgi
if (isIntraChrom(x) & isBinned(x)){
y_intervals(obj) <- xgi
}else{
ygi <- y_intervals(x)
ygi <- annotateIntervals(ygi, cutSites[[seqlevels(ygi)]], minfragmap=minFragMap, efffraglen=effFragLen)
y_intervals(obj) <- ygi
}
obj
}
###################################
## annotateIntervals
## INTERNAL FUNCTION
##
##
## gi = GRanges object from x_intervals or y_intervals methods
## annot = GRanges object from getAnnotatedRestrictionSites function
##
##################################
annotateIntervals <- function(gi, annot, minfragmap=.5, efffraglen=1000){
## Preprocess, keep fragments ends with mappability score larger than .5.
## Depends on the data processing (see Yaffe and Tanay, 2011). These fragments will be exclude from the analysis.
if (!is.na(minfragmap) & !all(is.na(annot$map_U)) & !all(is.na(annot$map_D))){
idxmap <- which(annot$map_U<minfragmap | is.na(annot$map_U))
elementMetadata(annot)[idxmap,c("len_U", "GC_U", "map_U")]<-NA_real_
idxmap <- which(annot$map_D<minfragmap | is.na(annot$map_D))
elementMetadata(annot)[idxmap,c("len_D", "GC_D", "map_D")]<-NA_real_
}
## Get all restriction sites which overlap with the bins
## Split upstream and downstream bins to deal with restriction sites which overlap the start or end of a fragment
annot_up <- annot
end(annot_up)<-start(annot)
elementMetadata(annot_up) <- NULL
annot_up$len=as.numeric(annot$len_U)
annot_up$GC=as.numeric(annot$GC_U)
annot_up$map=as.numeric(annot$map_U)
annot_down <- annot
start(annot_down) <- end(annot)
elementMetadata(annot_down) <- NULL
annot_down$len=as.numeric(annot$len_D)
annot_down$GC=as.numeric(annot$GC_D)
annot_down$map=as.numeric(annot$map_D)
outl_up<- as.list(findOverlaps(gi, annot_up))
outl_dw<- as.list(findOverlaps(gi, annot_down))
annotscores <- lapply(1:length(outl_up), function(i){
id_up <- outl_up[[i]]
id_dw <- outl_dw[[i]]
##temp <- c(annot_up[id_up], annot_down[id_dw])
temp_up <- annot_up[id_up]
temp_dw <- annot_down[id_dw]
## len - effective length" is the fragment length truncated by 1000 bp, which is the number of bases with specific ligation.
## In Yaffe & Tanay's paper Figure 1b, they define specific ligation as sum of distance to cutter sites (d1+d2) <= 500 bp. Such criterion implies that d1<=500 bp and d2 <= 500 bp. So for each fragment end, only reads mapped within 500 bp to cutter sites are used for downstream analysis.
lenv <- unique(c(temp_up$len, temp_dw$len))
if (!is.na(efffraglen))
lenscore <- sum(lenv>efffraglen, na.rm=TRUE)*efffraglen + sum(lenv[lenv<efffraglen], na.rm=TRUE)
else
lenscore <- sum(lenv, na.rm=TRUE)
##GC
gcscore <- mean(c(temp_up$GC, temp_dw$GC), na.rm=TRUE)
##map
mapscore <- mean(c(temp_up$map, temp_dw$map), na.rm=TRUE)
c(lenscore, gcscore, mapscore)
})
annotscores <- matrix(unlist(annotscores), ncol=3, byrow=TRUE)
colnames(annotscores) <- c("len", "GC", "map")
elementMetadata(gi)$len <- round(annotscores[,"len"],3)
elementMetadata(gi)$GC <- round(annotscores[,"GC"],3)
elementMetadata(gi)$map <- round(annotscores[,"map"],3)
gi
}
###################################
## getAnnotatedRestrictionFragments
## Return the restriction fragments for a given enzyme, annotated with the GC content and the mappability
##
##
## resSite = Cutting site of the restriction enzyme used (default HindIII)
## overhangs5 = Cleavage 5 overhang
## chromosome = chromosomes list to focus on. If NULL, all genome chromosome are investigated
## genomePack = name of the BSgenome package to load
## w = size of the downstream/upstream window to use around the restriction site to calculate the GC content. Default is 200. See Yaffe and Tanay for more details
## mappability = GRanges object of the mappability (see the ENCODE mappability tracks)
##
## D = downstream / U = upstream the restriction site
##################################
getAnnotatedRestrictionSites <- function(resSite="AAGCTT", overhangs5=1, chromosomes=NULL, genomePack="BSgenome.Mmusculus.UCSC.mm9", mappability=NULL, wingc=200, winmap=500){
if(genomePack %in% loadedNamespaces()==FALSE){
stopifnot(require(genomePack, character.only=TRUE))
}
genome <- eval(as.name(genomePack))
if (is.null(chromosomes)){
chromosomes <- seqlevels(genome)
}
genomeCutSites <- mclapply(chromosomes, function(chr){
message("Get restriction sites for ", chr, " ...")
cutSites <- getRestrictionSitesPerChromosome(resSite, overhangs5, genome, chr)
message(length(cutSites), " sites")
message("Calculate fragment length ...")
## Add chromosome start/end
len_D <- c(end(cutSites)[-1], length(genome[[chr]])) - start(cutSites)
len_U <- end(cutSites) - c(0, start(cutSites)[-length(cutSites)])
cutSites$len_U <- len_U
cutSites$len_D <- len_D
message("Calculate GC content ...")
## Upstream GC content
win <- start(cutSites)-wingc
win[win<0] <- 1
seq <- Biostrings::getSeq(genome, chr, start=win, end=start(cutSites)-1)
##cutSites$seq_U <- seq
cutSites$GC_U<- round(Biostrings::letterFrequency(seq, as.prob=FALSE, letters="CG")/Biostrings::letterFrequency(seq, as.prob=FALSE, letters="ACGT"),3)
## Downstream GC content
win <- start(cutSites)+wingc-1
win[win>length(genome[[chr]])] <- length(genome[[chr]])
seq <- Biostrings::getSeq(genome, chr, start(cutSites), win)
cutSites$GC_D<- round(Biostrings::letterFrequency(seq, as.prob=FALSE, letters="CG")/Biostrings::letterFrequency(seq, as.prob=FALSE, letters="ACGT"),3)
##cutSites$seq_D <- seq
if (!is.null(mappability)){
message("Calculate mappability ...")
stopifnot(inherits(mappability,"GRanges"))
mappability <- mappability[seqnames(mappability)==chr]
win <- start(cutSites)-winmap+1
win[win<0] <- 1
gr <- GRanges(seqnames = chr, ranges = IRanges(start=win, end=start(cutSites)))
overl <- as.list(findOverlaps(gr, mappability))
mscore <- mappability$score
cutSites$map_U<- unlist(lapply(overl, function(idx){
round(mean(mscore[idx], na.rm=TRUE),3)
}))
win <- start(cutSites)+winmap
win[win>length(genome[[chr]])] <- length(genome[[chr]])
gr <- GRanges(seqnames = chr, ranges = IRanges(start=start(cutSites)+1, end=win))
overl <- as.list(findOverlaps(gr, mappability))
mscore <- mappability$score
cutSites$map_D<- unlist(lapply(overl, function(idx){
round(mean(mscore[idx], na.rm=TRUE),3)
}))
}else{
cutSites$map_U<-NA_real_
cutSites$map_D<-NA_real_
}
message("done ...")
cutSites
})
grl <- GRangesList(genomeCutSites)
names(grl) <- chromosomes
grl
}
###################################
## getRestrictionSitesPerChromosome
## INTERNAL FUNCTION
##
##
## resSite = Cutting site of the restriction enzyme used
## overhangs5 = Cleavage 5 overhang
## genome = BSgenome object of the reference genome
## chromosome = chromosome to focus on
##
##################################
getRestrictionSitesPerChromosome <- function(resSite, overhangs5, genome, chromosome){
stopifnot(inherits(genome,"BSgenome"))
stopifnot(length(chromosome)==1)
restrictionSites<-Biostrings::matchPattern(resSite, genome[[chromosome]])
## Deal with restriction enzyme 5' overhangs
s <- start(restrictionSites) + overhangs5
e <- end(restrictionSites) - overhangs5
ir <- IRanges(start=s, end=e)
restrictionSites <- GRanges(seqnames = chromosome, ranges = ir, strand = "*")
return(restrictionSites)
}
###################################
## getRestrictionFragmentsPerChromosome
##
##
## resSite = Cutting site of the restriction enzyme used
## overhangs5 = Cleavage 5 overhang
## genome = BSgenome object of the reference genome
## chromosome = chromosome to focus on
##
##################################
getRestrictionFragmentsPerChromosome <- function(resSite="AAGCTT", overhangs5=1,
chromosomes=NULL, genomePack="BSgenome.Mmusculus.UCSC.mm9"){
if(genomePack %in% loadedNamespaces()==FALSE){
stopifnot(require(genomePack, character.only=TRUE))
}
genome <- eval(as.name(genomePack))
stopifnot(inherits(genome,"BSgenome"))
if (is.null(chromosomes)){
chromosomes <- seqlevels(genome)
}
genomeResFrag <- mclapply(chromosomes, function(chromosome){
message("Get restriction fragments for ", chromosome, " ...")
restrictionSites<-getRestrictionSitesPerChromosome(resSite, overhangs5, genome, chromosome)
restrictionFrag <- GRanges(seqnames=chromosome, ranges=IRanges(
start=c(1,start(restrictionSites)),
end=c(start(restrictionSites)-1, seqlengths(genome)[chromosome])), strand="+")
})
return(genomeResFrag)
}
##**********************************************************************************************************##
##
## ICE Normalization procedure from Imakaev et al .2012
##
##**********************************************************************************************************##
###################################
## balancingSK
## INTERNAL FUNCTION
##
## Matrix balancing used in ICE normalization
## Based on the Sinkhorn-Knopp algorithm
##
## x = HTCexp object
## max_iter = maximum number of iteration to converge
## eps = threshold to converge
##
##################################
balancingSK<- function(x, max_iter=50, eps=1e-4){
m <- dim(x)[1]
## Initialization
sum_ss <- matrix(rep(0, m), ncol=1)
bias <- matrix(rep(1, m), ncol=1)
old_dbias <- NULL
## Remove Diagonal ?
for (it in 1:max_iter){
message("it=",it," ", Sys.time())
## 1- calculate sum of W over all rows ++
sum_ds <- rowSums(x, na.rm=TRUE)
##sum_ds <- sqrt(rowSums(x^2))
## 2- Calculate a vector of corrected ss reads
## NOT DONE
## 3- Calculate vector of bias
dbias <- as.matrix(sum_ds, ncol=1) + sum_ss
## 4 - Renormalize bias by its mean valude over non-zero bins to avoid numerical instabilities
dbias <- dbias/mean(dbias[dbias!=0])
## 5- Set zero values of bias to 1 to avoir 0/0 error
dbias[dbias==0] <- 1
## 6- Divide W by bias BiBj for all (i,j) ++++
x <- x/(dbias %*% t(dbias))
## 7- Multiple total vector of bias by additional biases
##bias <- bias * dbias
if (!is.null(old_dbias) && sum(abs(old_dbias - dbias))<eps){
message("Break at iteration ", it)
break
}
old_dbias <- dbias
}
if (it == max_iter){
message("Did not converged. Stop at iteration ",max_iter)
}else{
message("Converge in ",it," iteration")
}
return(x)
}
###################################
## IterativeCorNormalization
## ICE normlization
##
##
## x = HTCexp object or HTClist object
## max_iter = maximum number of iteration to converge
## eps = threshold to converge
## spars.filter = Percentage of row and column to discard based on sparsity (default=0.02)
##
##################################
normICE <- function(x, max_iter=50, eps=1e-4, sparse.filter=0.02){
if (inherits(x, "HTCexp")){
stopifnot(isSymmetric(x))
idata <- intdata(x)
gr <- y_intervals(x)
}else if (inherits(x, "HTClist")){
idata <- getCombinedContacts(x)
gr <- getCombinedIntervals(x)
}
if (!is.na(sparse.filter)){
message("Start filtering ...", Sys.time())
spars <- apply(idata, 1, function(x){length(which(x==0))})
spars.t <- quantile(spars[spars!=dim(idata)[1]], probs=(1-sparse.filter))
idx <- which(spars>as.numeric(spars.t))
idata[idx,] <- 0
idata[,idx] <- 0
message("Filter out ",length(idx)," rows and columns ...")
}
message("Start Iterative Correction ...")
xmat <- balancingSK(idata, max_iter=max_iter, eps=eps)
if (inherits(x, "HTCexp")){
intdata(x) <- xmat
}else if (inherits(x, "HTClist")){
## gr <- dimnames2gr(xmat, pattern="\\||\\:|\\-", feat.names=c("name","chr","start", "end"))
## xgi <- gr[[1]]
## ygi <- gr[[2]]
## rownames(xmat) <- id(ygi)
## colnames(xmat) <- id(xgi)
if (is.null(gr$xgi))
x <- splitCombinedContacts(xmat, xgi=gr$ygi, ygi=gr$ygi)
else
x <- splitCombinedContacts(xmat, xgi=gr$xgi, ygi=gr$ygi)
}
x
}
|
/R/HiC_norm.R
|
no_license
|
mckf111/hiceize
|
R
| false
| false
| 29,506
|
r
|
## Nicolas Servant
## HiTC BioConductor package
##**********************************************************************************************************##
##
## HIC Normalization procedure from Lieberman-Aiden et al. 2009
##
##**********************************************************************************************************##
## Normalized per expected number of count
setMethod("normPerExpected", signature=c("HTCexp"), definition=function(x, ...){
expCounts <- getExpectedCounts(forceSymmetric(x), asList=TRUE, ...)
if (! is.null(expCounts$stdev.estimate)){
x@intdata <- (x@intdata-expCounts$exp.interaction)/expCounts$stdev.estimate
}else{
x@intdata <- x@intdata/(expCounts$exp.interaction)
}
## Remove NaN or Inf values for further analyses
#x@intdata[which(is.na(x@intdata) | is.infinite(x@intdata))]<-NA
x@intdata[Matrix::which(is.infinite(x@intdata))]<-0
x
})
## Normalized per expected number of counts across all cis maps
setMethod("normPerExpected", signature=c("HTClist"), definition=function(x, ...){
xintra <- x[isIntraChrom(x)]
## estimated expected counts for all cis maps
exp <- lapply(xintra, function(xx){
r <- getExpectedCounts(forceSymmetric(xx), method="mean", asList=TRUE, ...)
r$exp.interaction
})
## combined all cis expected counts
N <- max(sapply(exp, dim))
counts <- matrix(0, ncol=N, nrow=N)
ss <- matrix(0, ncol=N, nrow=N)
for (i in 1:length(exp)){
n <- dim(exp[[i]])[1]
counts[1:n, 1:n] <- counts[1:n, 1:n]+1
ss[1:n, 1:n] <- ss[1:n, 1:n] + as.matrix(exp[[i]])
}
## Mean over all expected matrices
ss <- ss / counts
xintranorm <- lapply(xintra, function(xx){
n <- dim(xx@intdata)[1]
xx@intdata <- xx@intdata/ss[1:n,1:n]
xx@intdata[which(is.na(xx@intdata) | is.infinite(xx@intdata))]<-0
xx
})
x[isIntraChrom(x)] <- xintranorm
x
})
###################################
## getExpectedCountsMean
##
## This way of calculate expected counts was used in Naumova et al.
## The idea is just to look at all diagonals and to calculate their mean
##
## x = a HTCexp object
##
##
## NOTES
## Migth be interesting to add an isotonic regression on the mean to force the expected value to decrease with the distance
###################################
getExpectedCounts <- function(x, method=c("mean","loess"), asList=FALSE, ...){
met <- match.arg(method)
if (dim(intdata(x))[1]>500 & met=="loess"){
warning("Contact map looks big. Use mean method instead or be sure that the loess fit gives good results.")
}
if (met=="mean"){
ret <- getExpectedCountsMean(x, ...)
}else if (met=="loess"){
ret <- getExpectedCountsLoess(x, ...)
}else{
stop("Unknown method")
}
if (asList){
return(ret)
}else{
intdata(x) <- ret$exp.interaction
return(x)
}
}
logbins<- function(from, to, step=1.05, N=NULL) {
if (is.null(N)){
unique(round(c(from, exp(seq(log(from), log(to), by=log(step))), to)))
}else{
unique(round(c(from, exp(seq(log(from), log(to), length.out=N)), to)))
}
}
getExpectedCountsMean <- function(x, logbin=TRUE, step=1.05, filter.low=0.05){
xdata <- intdata(x)
N <- dim(xdata)[1]
if (logbin){
bins <- logbins(from=1,to=N, step=step)
bins <- as.vector(Rle(values=bins, lengths=c(diff(bins),1)))
stopifnot(length(bins)==N)
}else{
bins <- 1:N
}
message("Estimate expected using mean contact frequency per genomic distance ...")
xdata <- as.matrix(xdata)
rc <- colSums(xdata, na.rm=TRUE)
##rc <- which(rc==0)
rc <- which(rc < ceiling(quantile(rc[which(rc>0)], probs=filter.low)))
rr <- rowSums(xdata, na.rm=TRUE)
##rr <- which(rr==0)
rr <- which(rr < ceiling(quantile(rr[which(rr>0)], probs=filter.low)))
## rm line with only zeros
xdata[rr,] <- NA
xdata[,rc] <- NA
## create an indicator for all diagonals in the matrix
rows <- matrix(rep.int(bins, N), nrow=N)
##d <- rows - t(rows)
d <- matrix(bins[1+abs(col(rows) - row(rows))],nrow=N) - 1
d[lower.tri(d)] <- -d[lower.tri(d)]
if (isSymmetric(xdata)){
## remove half of the matrix
d[lower.tri(d)] <- NA
}
## use split to group on these values
mi <- split(xdata, d)
milen <- lapply(mi, length)
mimean <- lapply(mi, mean, na.rm=TRUE)
miexp <- lapply(1:length(milen), function(i){rep(mimean[[i]], milen[[i]])})
names(miexp) <- names(mi)
expmat <- as(matrix(unsplit(miexp, d), nrow=nrow(xdata), ncol=ncol(xdata)), "Matrix")
if (isSymmetric(xdata)){
expmat <- forceSymmetric(expmat, uplo="U")
}
colnames(expmat) <- colnames(xdata)
rownames(expmat) <- rownames(xdata)
## Put NA at rc and cc
expmat[rr,] <- NA
expmat[,rc] <- NA
return(list(exp.interaction=expmat, stdev.estimate=NULL))
}
###################################
## getExpectedCounts
##
## Estimate the expected interaction counts from a HTCexp object based on the interaction distances
##
## x = a HTCexp object
## span=fraction of the data used for smoothing at each x point. The larger the f value, the smoother the fit.
## bin=interval size (in units corresponding to x). If lowess estimates at two x values within delta of one another, it fits any points between them by linear interpolation. The default is 1% of the range of x. If delta=0 all but identical x values are estimated independently.
## stdev = logical,the standard deviation is estimated for each interpolation point
## plot = logical, display lowess and variance smoothing
##
## bin is used to speed up computation: instead of computing the local polynomial fit at each data point it is not computed for points within delta of the last computed point, and linear interpolation is used to fill in the fitted values for the skipped points.
## This function may be slow for large numbers of points. Increasing bin should speed things up, as will decreasing span.
## Lowess uses robust locally linear fits. A window, dependent on f, is placed about each x value; points that are inside the window are weighted so that nearby points get the most weight.
##
## NOTES
## All variances are calculated (even identical values) because of the parallel implementation.
## Cannot use rle object because the neighboring have to be calculated on the wall dataset. Or have to find a way to convert rle index to real index ...
## Easy to do with a 'for' loop but the parallel advantages are much bigger
###################################
getExpectedCountsLoess<- function(x, span=0.01, bin=0.005, stdev=FALSE, plot=FALSE){
stopifnot(inherits(x,"HTCexp"))
xdata <- as.matrix(intdata(x))
rc <- which(colSums(xdata, na.rm=TRUE)==0)
rr <- which(rowSums(xdata, na.rm=TRUE)==0)
## rm line with only zeros
xdata[rr,] <- NA
xdata[,rc] <- NA
ydata <- as.vector(xdata)
ydata[which(is.na(ydata))] <- 0
xdata.dist <- as.vector(intervalsDist(x))
o<- order(xdata.dist)
xdata.dist <- xdata.dist[o]
ydata <- ydata[o]
delta <- bin*diff(range(xdata.dist))
######################
## Lowess Fit
######################
message("Lowess fit ...")
#lowess.fit <- .C("lowess", x = as.double(xdata.dist), as.double(ydata),
# length(ydata), as.double(span), as.integer(3), as.double(delta),
# y = double(length(ydata)), double(length(ydata)), double(length(ydata)), PACKAGE = "stats")$y
lowess.fit <-lowess(x=xdata.dist, y=ydata, f=span, delta=delta)$y
y1 <- sort(ydata)
y1 <- quantile(y1[which(y1>1)], probs=0.99)
if (plot){
par(font.lab=2, mar=c(4,4,1,1))
##plotIntraDist(ydata, xdata.dist, xlab="Genomic Distance (bp)", ylim=c(0,y1), ylab="Counts", main="", cex=0.5, cex.lab=0.7, pch=20, cex.axis=0.7, col="gray", frame=FALSE)
plot(x=xdata.dist, y=ydata, xlab="Genomic Distance (bp)", ylim=c(0,y1), ylab="Counts", main="", cex=0.5, cex.lab=0.7, pch=20, cex.axis=0.7, col="gray", frame=FALSE)
points(x=xdata.dist[order(lowess.fit)], y=sort(lowess.fit), type="l", col="red")
}
lowess.mat <- Matrix(lowess.fit[order(o)], nrow=length(y_intervals(x)), byrow=FALSE)
rownames(lowess.mat) <- id(y_intervals(x))
colnames(lowess.mat) <- id(x_intervals(x))
######################
## Variance estimation
######################
stdev.mat <- NULL
if (stdev){
message("Standard deviation calculation ...")
##interpolation
ind <- getDeltaRange(delta, xdata.dist)
lx <- length(xdata.dist)
Q <- floor(lx*span)
stdev.delta <- unlist(mclapply(1:length(ind), function(k){
i <- ind[k]
x1 <- xdata.dist[i]
## Neighbors selection 2*Q
ll <- i-Q-1
lr <- i+Q-1
if (ll<0) ll=0
if (lr>lx) lr=lx
xdata.dist.sub <- xdata.dist[ll:lr]
ydata.sub <- ydata[ll:lr]
## Select the Q closest distances
d <- abs(x1-xdata.dist.sub)
o2 <- order(d)[1:Q]
x2 <- xdata.dist.sub[o2]
y2 <- ydata.sub[o2]
## Distance between x and other points
dref <- d[o2]
drefs <- dref/max(abs(dref-x1)) ##max(dref) - NS
## Tricube weigths and stdev calculation
w <- tricube(drefs)
sqrt <- w*(y2-lowess.fit[i])^2
stdev <- sqrt(sum(sqrt)/
(((length(sqrt)-1) * sum(w))/length(sqrt)))
}))
if (plot){
points(x=xdata.dist[ind], y=lowess.fit[ind], col="black", cex=.8, pch="+")
legend(x="topright", lty=c(1,NA), pch=c(NA,"+"), col=c("red","black"),legend=c("Lowess fit","Interpolation points"), cex=.8, bty="n")
}
## Approximation according to delta
stdev.estimate <- approx(x=xdata.dist[ind], y=stdev.delta, method="linear", xout=xdata.dist)$y
stdev.mat <- matrix(stdev.estimate[order(o)], nrow=length(y_intervals(x)), byrow=FALSE)
rownames(stdev.mat) <- id(y_intervals(x))
colnames(stdev.mat) <- id(x_intervals(x))
}
## Put NA at rc and cc
lowess.mat[rr,] <- NA
lowess.mat[,rc] <- NA
return(list(exp.interaction=lowess.mat,stdev.estimate=stdev.mat))
}
###################################
## getDeltaRange
## INTERNAL FUNCTION
## Calculate the interpolation points from the delta value
##
## delta = lowess delta parameter
## xdata = Intervals distances matrix
###################################
getDeltaRange <- function(delta, xdata){
message("Delta=",delta)
if (delta>0){
ind <- 1
for (i in 1:length(xdata)-1){
if (xdata[i+1]>=delta){
ind <- c(ind,i)
delta=delta+delta
}
}
if (max(ind)<length(xdata)){
ind <- c(ind, length(xdata))
}
message("Calculating stdev ... ")
}else{
ind <- 1:length(xdata)
}
ind
}
###################################
## tricube
## INTERNAL FUNCTION
## tricube distance weigth
##
## x = numeric. A distance
###################################
tricube <- function(x) {
ifelse (abs(x) < 1, (1 - (abs(x))^3)^3, 0)
}
##**********************************************************************************************************##
##
## HIC Normalization procedure from HiCNorm package, Hu et al. 2012
##
##**********************************************************************************************************##
###################################
## normLGF
## Local Genomic Features normalization
##
##
## x = HTCexp/HTClist object
## family = regression model Poisson or Neg Binon
##
##
##################################
normLGF <- function(x, family=c("poisson", "nb")){
family <- match.arg(family)
message("Starting LGF normalization on ", seqlevels(x), " ...")
counts <- intdata(x)
## Remove rowCounts=0 & colCounts=0
rc <- which(rowSums(counts)>0)
## Intrachromosomal maps
if (isIntraChrom(x)){
cc <- rc
stopifnot(length(rc)>0)
counts.rc <- counts[rc,rc]
elt <- elementMetadata(y_intervals(x)[rc])
len <- elt$len
gcc <- elt$GC
map <- elt$map
if(all(is.na(len)) || all(is.na(gcc)) || all(is.na(map)))
stop("Genomic features are missing. Effective fragments length, GC content and mappability are required.")
##get cov matrix
len_m<-as.matrix(log(1+len%o%len))
gcc_m<-as.matrix(log(1+gcc%o%gcc))
##error for regions with 0 mappability
map[which(map==0)] <- 10e-4
map_m<-as.matrix(log(map%o%map))
}else{
## Interchromosomal maps
cc <- which(colSums(counts)>0)
stopifnot(length(rc)>0 & length(cc)>0)
counts.rc <- counts[rc,cc]
yelt <- elementMetadata(y_intervals(x)[rc])
xelt <- elementMetadata(x_intervals(x)[cc])
ylen <- yelt$len
xlen <- xelt$len
ygcc <- yelt$GC
xgcc <- xelt$GC
ymap <- yelt$map
xmap <- xelt$map
if(all(is.na(ylen)) || all(is.na(ygcc)) || all(is.na(ymap)) || all(is.na(xlen)) || all(is.na(xgcc)) || all(is.na(xmap)))
stop("Genomic features are missing. Effective fragments length, GC content and mappability are required.")
##get cov matrix
len_m<-as.matrix(log(1+ylen%o%xlen))
gcc_m<-as.matrix(log(1+ygcc%o%xgcc))
##error for regions with 0 mappability
ymap[which(ymap==0)] <- 10e-4
xmap[which(xmap==0)] <- 10e-4
map_m<-as.matrix(log(ymap%o%xmap))
}
##centralize cov matrix of enz, gcc
#len_m<-(len_m-mean(len_m, na.rm=TRUE))/apply(len_m, 2, sd, na.rm=TRUE)
#gcc_m<-(gcc_m-mean(gcc_m, na.rm=TRUE))/apply(gcc_m, 2, sd, na.rm=TRUE)
#Fix bug in BioC [bioc] A: normLGF yields non-symetric matrices
len_m<-(len_m-mean(len_m, na.rm=TRUE))/sd(len_m, na.rm=TRUE)
gcc_m<-(gcc_m-mean(gcc_m, na.rm=TRUE))/sd(gcc_m, na.rm=TRUE)
##change matrix into vector
if (isIntraChrom(x)){
counts_vec<-counts.rc[which(upper.tri(counts.rc,diag=FALSE))]
len_vec<-len_m[upper.tri(len_m,diag=FALSE)]
gcc_vec<-gcc_m[upper.tri(gcc_m,diag=FALSE)]
map_vec<-map_m[upper.tri(map_m,diag=FALSE)]
}else{
counts_vec<-as.vector(counts.rc)
len_vec<-as.vector(len_m)
gcc_vec<-as.vector(gcc_m)
map_vec<-as.vector(map_m)
}
print("fit ...")
if (family=="poisson"){
##fit Poisson regression: u~len+gcc+offset(map)
fit<-glm(counts_vec~ len_vec+gcc_vec+offset(map_vec),family="poisson")
##fit<-bigglm(counts_vec~len_vec+gcc_vec+offset(map_vec),family="poisson", data=cbind(counts_vec, len_vec, gcc_vec, map_vec))
}else{
fit<-glm.nb(counts_vec~len_vec+gcc_vec+offset(map_vec))
}
coeff<-fit$coeff
## The corrected values (residuals) can be seen as a observed/expected correction.
## So I will compare the normalized counts with one: the observed count is higher or lower than the expected count. We may not want to compare the range of the normalized count with the range of the raw count. They have different interpretations.
counts.cor<-round(counts.rc/exp(coeff[1]+coeff[2]*len_m+coeff[3]*gcc_m+map_m), 4)
counts[rownames(counts.rc), colnames(counts.rc)]<-counts.cor
intdata(x) <- counts
return(x)
}##normLGF
###################################
## setGenomicFeatures
## Annotate a HTCexp or HTClist object with the GC content and the mappability features
##
##
## x = HTCexp/HTClist object
## cutSites = GRanges object ir GRangesList from getAnnotatedRestrictionSites function
## minFragMap = Discard restriction with mappability lower the this threshold (and NA)
## effFragLen = Effective fragment length
##################################
setGenomicFeatures <- function(x, cutSites, minFragMap=.5, effFragLen=1000){
stopifnot(inherits(x,"HTCexp"))
stopifnot(seqlevels(x) %in% seqlevels(cutSites))
obj <- x
xgi <- x_intervals(x)
message("Annotation of ", seqlevels(x), " ...")
xgi <- annotateIntervals(xgi, cutSites[[seqlevels(xgi)]], minfragmap=minFragMap, efffraglen=effFragLen)
x_intervals(obj) <- xgi
if (isIntraChrom(x) & isBinned(x)){
y_intervals(obj) <- xgi
}else{
ygi <- y_intervals(x)
ygi <- annotateIntervals(ygi, cutSites[[seqlevels(ygi)]], minfragmap=minFragMap, efffraglen=effFragLen)
y_intervals(obj) <- ygi
}
obj
}
###################################
## annotateIntervals
## INTERNAL FUNCTION
##
##
## gi = GRanges object from x_intervals or y_intervals methods
## annot = GRanges object from getAnnotatedRestrictionSites function
##
##################################
annotateIntervals <- function(gi, annot, minfragmap=.5, efffraglen=1000){
## Preprocess, keep fragments ends with mappability score larger than .5.
## Depends on the data processing (see Yaffe and Tanay, 2011). These fragments will be exclude from the analysis.
if (!is.na(minfragmap) & !all(is.na(annot$map_U)) & !all(is.na(annot$map_D))){
idxmap <- which(annot$map_U<minfragmap | is.na(annot$map_U))
elementMetadata(annot)[idxmap,c("len_U", "GC_U", "map_U")]<-NA_real_
idxmap <- which(annot$map_D<minfragmap | is.na(annot$map_D))
elementMetadata(annot)[idxmap,c("len_D", "GC_D", "map_D")]<-NA_real_
}
## Get all restriction sites which overlap with the bins
## Split upstream and downstream bins to deal with restriction sites which overlap the start or end of a fragment
annot_up <- annot
end(annot_up)<-start(annot)
elementMetadata(annot_up) <- NULL
annot_up$len=as.numeric(annot$len_U)
annot_up$GC=as.numeric(annot$GC_U)
annot_up$map=as.numeric(annot$map_U)
annot_down <- annot
start(annot_down) <- end(annot)
elementMetadata(annot_down) <- NULL
annot_down$len=as.numeric(annot$len_D)
annot_down$GC=as.numeric(annot$GC_D)
annot_down$map=as.numeric(annot$map_D)
outl_up<- as.list(findOverlaps(gi, annot_up))
outl_dw<- as.list(findOverlaps(gi, annot_down))
annotscores <- lapply(1:length(outl_up), function(i){
id_up <- outl_up[[i]]
id_dw <- outl_dw[[i]]
##temp <- c(annot_up[id_up], annot_down[id_dw])
temp_up <- annot_up[id_up]
temp_dw <- annot_down[id_dw]
## len - effective length" is the fragment length truncated by 1000 bp, which is the number of bases with specific ligation.
## In Yaffe & Tanay's paper Figure 1b, they define specific ligation as sum of distance to cutter sites (d1+d2) <= 500 bp. Such criterion implies that d1<=500 bp and d2 <= 500 bp. So for each fragment end, only reads mapped within 500 bp to cutter sites are used for downstream analysis.
lenv <- unique(c(temp_up$len, temp_dw$len))
if (!is.na(efffraglen))
lenscore <- sum(lenv>efffraglen, na.rm=TRUE)*efffraglen + sum(lenv[lenv<efffraglen], na.rm=TRUE)
else
lenscore <- sum(lenv, na.rm=TRUE)
##GC
gcscore <- mean(c(temp_up$GC, temp_dw$GC), na.rm=TRUE)
##map
mapscore <- mean(c(temp_up$map, temp_dw$map), na.rm=TRUE)
c(lenscore, gcscore, mapscore)
})
annotscores <- matrix(unlist(annotscores), ncol=3, byrow=TRUE)
colnames(annotscores) <- c("len", "GC", "map")
elementMetadata(gi)$len <- round(annotscores[,"len"],3)
elementMetadata(gi)$GC <- round(annotscores[,"GC"],3)
elementMetadata(gi)$map <- round(annotscores[,"map"],3)
gi
}
###################################
## getAnnotatedRestrictionFragments
## Return the restriction fragments for a given enzyme, annotated with the GC content and the mappability
##
##
## resSite = Cutting site of the restriction enzyme used (default HindIII)
## overhangs5 = Cleavage 5 overhang
## chromosome = chromosomes list to focus on. If NULL, all genome chromosome are investigated
## genomePack = name of the BSgenome package to load
## w = size of the downstream/upstream window to use around the restriction site to calculate the GC content. Default is 200. See Yaffe and Tanay for more details
## mappability = GRanges object of the mappability (see the ENCODE mappability tracks)
##
## D = downstream / U = upstream the restriction site
##################################
getAnnotatedRestrictionSites <- function(resSite="AAGCTT", overhangs5=1, chromosomes=NULL, genomePack="BSgenome.Mmusculus.UCSC.mm9", mappability=NULL, wingc=200, winmap=500){
if(genomePack %in% loadedNamespaces()==FALSE){
stopifnot(require(genomePack, character.only=TRUE))
}
genome <- eval(as.name(genomePack))
if (is.null(chromosomes)){
chromosomes <- seqlevels(genome)
}
genomeCutSites <- mclapply(chromosomes, function(chr){
message("Get restriction sites for ", chr, " ...")
cutSites <- getRestrictionSitesPerChromosome(resSite, overhangs5, genome, chr)
message(length(cutSites), " sites")
message("Calculate fragment length ...")
## Add chromosome start/end
len_D <- c(end(cutSites)[-1], length(genome[[chr]])) - start(cutSites)
len_U <- end(cutSites) - c(0, start(cutSites)[-length(cutSites)])
cutSites$len_U <- len_U
cutSites$len_D <- len_D
message("Calculate GC content ...")
## Upstream GC content
win <- start(cutSites)-wingc
win[win<0] <- 1
seq <- Biostrings::getSeq(genome, chr, start=win, end=start(cutSites)-1)
##cutSites$seq_U <- seq
cutSites$GC_U<- round(Biostrings::letterFrequency(seq, as.prob=FALSE, letters="CG")/Biostrings::letterFrequency(seq, as.prob=FALSE, letters="ACGT"),3)
## Downstream GC content
win <- start(cutSites)+wingc-1
win[win>length(genome[[chr]])] <- length(genome[[chr]])
seq <- Biostrings::getSeq(genome, chr, start(cutSites), win)
cutSites$GC_D<- round(Biostrings::letterFrequency(seq, as.prob=FALSE, letters="CG")/Biostrings::letterFrequency(seq, as.prob=FALSE, letters="ACGT"),3)
##cutSites$seq_D <- seq
if (!is.null(mappability)){
message("Calculate mappability ...")
stopifnot(inherits(mappability,"GRanges"))
mappability <- mappability[seqnames(mappability)==chr]
win <- start(cutSites)-winmap+1
win[win<0] <- 1
gr <- GRanges(seqnames = chr, ranges = IRanges(start=win, end=start(cutSites)))
overl <- as.list(findOverlaps(gr, mappability))
mscore <- mappability$score
cutSites$map_U<- unlist(lapply(overl, function(idx){
round(mean(mscore[idx], na.rm=TRUE),3)
}))
win <- start(cutSites)+winmap
win[win>length(genome[[chr]])] <- length(genome[[chr]])
gr <- GRanges(seqnames = chr, ranges = IRanges(start=start(cutSites)+1, end=win))
overl <- as.list(findOverlaps(gr, mappability))
mscore <- mappability$score
cutSites$map_D<- unlist(lapply(overl, function(idx){
round(mean(mscore[idx], na.rm=TRUE),3)
}))
}else{
cutSites$map_U<-NA_real_
cutSites$map_D<-NA_real_
}
message("done ...")
cutSites
})
grl <- GRangesList(genomeCutSites)
names(grl) <- chromosomes
grl
}
###################################
## getRestrictionSitesPerChromosome
## INTERNAL FUNCTION
##
##
## resSite = Cutting site of the restriction enzyme used
## overhangs5 = Cleavage 5 overhang
## genome = BSgenome object of the reference genome
## chromosome = chromosome to focus on
##
##################################
getRestrictionSitesPerChromosome <- function(resSite, overhangs5, genome, chromosome){
stopifnot(inherits(genome,"BSgenome"))
stopifnot(length(chromosome)==1)
restrictionSites<-Biostrings::matchPattern(resSite, genome[[chromosome]])
## Deal with restriction enzyme 5' overhangs
s <- start(restrictionSites) + overhangs5
e <- end(restrictionSites) - overhangs5
ir <- IRanges(start=s, end=e)
restrictionSites <- GRanges(seqnames = chromosome, ranges = ir, strand = "*")
return(restrictionSites)
}
###################################
## getRestrictionFragmentsPerChromosome
##
##
## resSite = Cutting site of the restriction enzyme used
## overhangs5 = Cleavage 5 overhang
## genome = BSgenome object of the reference genome
## chromosome = chromosome to focus on
##
##################################
getRestrictionFragmentsPerChromosome <- function(resSite="AAGCTT", overhangs5=1,
chromosomes=NULL, genomePack="BSgenome.Mmusculus.UCSC.mm9"){
if(genomePack %in% loadedNamespaces()==FALSE){
stopifnot(require(genomePack, character.only=TRUE))
}
genome <- eval(as.name(genomePack))
stopifnot(inherits(genome,"BSgenome"))
if (is.null(chromosomes)){
chromosomes <- seqlevels(genome)
}
genomeResFrag <- mclapply(chromosomes, function(chromosome){
message("Get restriction fragments for ", chromosome, " ...")
restrictionSites<-getRestrictionSitesPerChromosome(resSite, overhangs5, genome, chromosome)
restrictionFrag <- GRanges(seqnames=chromosome, ranges=IRanges(
start=c(1,start(restrictionSites)),
end=c(start(restrictionSites)-1, seqlengths(genome)[chromosome])), strand="+")
})
return(genomeResFrag)
}
##**********************************************************************************************************##
##
## ICE Normalization procedure from Imakaev et al .2012
##
##**********************************************************************************************************##
###################################
## balancingSK
## INTERNAL FUNCTION
##
## Matrix balancing used in ICE normalization
## Based on the Sinkhorn-Knopp algorithm
##
## x = HTCexp object
## max_iter = maximum number of iteration to converge
## eps = threshold to converge
##
##################################
balancingSK<- function(x, max_iter=50, eps=1e-4){
m <- dim(x)[1]
## Initialization
sum_ss <- matrix(rep(0, m), ncol=1)
bias <- matrix(rep(1, m), ncol=1)
old_dbias <- NULL
## Remove Diagonal ?
for (it in 1:max_iter){
message("it=",it," ", Sys.time())
## 1- calculate sum of W over all rows ++
sum_ds <- rowSums(x, na.rm=TRUE)
##sum_ds <- sqrt(rowSums(x^2))
## 2- Calculate a vector of corrected ss reads
## NOT DONE
## 3- Calculate vector of bias
dbias <- as.matrix(sum_ds, ncol=1) + sum_ss
## 4 - Renormalize bias by its mean valude over non-zero bins to avoid numerical instabilities
dbias <- dbias/mean(dbias[dbias!=0])
## 5- Set zero values of bias to 1 to avoir 0/0 error
dbias[dbias==0] <- 1
## 6- Divide W by bias BiBj for all (i,j) ++++
x <- x/(dbias %*% t(dbias))
## 7- Multiple total vector of bias by additional biases
##bias <- bias * dbias
if (!is.null(old_dbias) && sum(abs(old_dbias - dbias))<eps){
message("Break at iteration ", it)
break
}
old_dbias <- dbias
}
if (it == max_iter){
message("Did not converged. Stop at iteration ",max_iter)
}else{
message("Converge in ",it," iteration")
}
return(x)
}
###################################
## IterativeCorNormalization
## ICE normlization
##
##
## x = HTCexp object or HTClist object
## max_iter = maximum number of iteration to converge
## eps = threshold to converge
## spars.filter = Percentage of row and column to discard based on sparsity (default=0.02)
##
##################################
normICE <- function(x, max_iter=50, eps=1e-4, sparse.filter=0.02){
if (inherits(x, "HTCexp")){
stopifnot(isSymmetric(x))
idata <- intdata(x)
gr <- y_intervals(x)
}else if (inherits(x, "HTClist")){
idata <- getCombinedContacts(x)
gr <- getCombinedIntervals(x)
}
if (!is.na(sparse.filter)){
message("Start filtering ...", Sys.time())
spars <- apply(idata, 1, function(x){length(which(x==0))})
spars.t <- quantile(spars[spars!=dim(idata)[1]], probs=(1-sparse.filter))
idx <- which(spars>as.numeric(spars.t))
idata[idx,] <- 0
idata[,idx] <- 0
message("Filter out ",length(idx)," rows and columns ...")
}
message("Start Iterative Correction ...")
xmat <- balancingSK(idata, max_iter=max_iter, eps=eps)
if (inherits(x, "HTCexp")){
intdata(x) <- xmat
}else if (inherits(x, "HTClist")){
## gr <- dimnames2gr(xmat, pattern="\\||\\:|\\-", feat.names=c("name","chr","start", "end"))
## xgi <- gr[[1]]
## ygi <- gr[[2]]
## rownames(xmat) <- id(ygi)
## colnames(xmat) <- id(xgi)
if (is.null(gr$xgi))
x <- splitCombinedContacts(xmat, xgi=gr$ygi, ygi=gr$ygi)
else
x <- splitCombinedContacts(xmat, xgi=gr$xgi, ygi=gr$ygi)
}
x
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ezr_h2o_get_gridmodels.R
\name{ezr.h2o_get_gridmodels}
\alias{ezr.h2o_get_gridmodels}
\title{Get H2o Grid/AutoMl Model IDs}
\usage{
ezr.h2o_get_gridmodels(h2o_grid)
}
\arguments{
\item{h2o_grid}{Doesn't matter if string or model object. Can be either an h2o grid or h2o-automl}
}
\value{
Returns a vector of model ids so you can use these in a loop
}
\description{
Get H2o Grid/AutoMl Model IDs
}
|
/man/ezr.h2o_get_gridmodels.Rd
|
no_license
|
lenamax2355/easyr
|
R
| false
| true
| 476
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ezr_h2o_get_gridmodels.R
\name{ezr.h2o_get_gridmodels}
\alias{ezr.h2o_get_gridmodels}
\title{Get H2o Grid/AutoMl Model IDs}
\usage{
ezr.h2o_get_gridmodels(h2o_grid)
}
\arguments{
\item{h2o_grid}{Doesn't matter if string or model object. Can be either an h2o grid or h2o-automl}
}
\value{
Returns a vector of model ids so you can use these in a loop
}
\description{
Get H2o Grid/AutoMl Model IDs
}
|
testlist <- list(a = 0L, b = 0L, x = c(-78249985L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610131656-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 254
|
r
|
testlist <- list(a = 0L, b = 0L, x = c(-78249985L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parent_cluster.mppData.R
\name{parent_cluster.mppData}
\alias{parent_cluster.mppData}
\title{Parent clustering for \code{mppData} objects}
\usage{
parent_cluster.mppData(mppData, method = NULL, par.clu = NULL,
w1 = "kernel.exp", w2 = "kernel.unif", window, K = 10,
simulation.type = "equi", simulation.Ng = 50, simulation.Nrep = 3,
threshold.quantile = 95, plot = TRUE, plot.loc = getwd())
}
\arguments{
\item{mppData}{An object of class \code{mppData}. the \code{mppData} must
have been processed using: \code{\link{create.mppData}},
\code{\link{QC.mppData}}, \code{\link{IBS.mppData}},
and \code{\link{IBD.mppData}}.}
\item{method}{\code{Character} expression. If (\code{method = "clusthaplo"}),
the clustering is done using the R package clusthaplo.
If (\code{method = "given"}), the user must provide the parent clustering
information using \code{par.clu}. Default = NULL.}
\item{par.clu}{Optional argument if (\code{method = "given"}).
\code{Interger matrix} representing the results of a
parents genotypes clustering. The columns represent the parental lines and
the rows the markers. The columns names must be the same as the parents
list of the mppData object. The rownames must be the same as the map marker
list of the mppData object. At a particular position, parents with the same
value are assumed to inherit from the same ancestor. for more details,
see \code{\link{par_clu}}. Default = NULL.}
\item{w1}{The w1 weight function in the Li&Jyang similarity score.
Possible values are "kernel.const", "kernel.exp", "kernel.gauss",
"kernel.unif", "kernel.laplace" or "kernel.null". Default = "kernel.exp".}
\item{w2}{The w2 weight function in the Li&Jyang similarity score.
Possible values are "kernel.const", "kernel.exp", "kernel.gauss",
"kernel.unif", "kernel.laplace" or "kernel.null". Default = "kernel.unif".}
\item{window}{\code{Numeric} value for the size of the window used for
clustering in centi-Morgan. The clustering procedure is done for the position
that is in the centre of the window taking marker scores within the window
into consideration.}
\item{K}{A positive integer representing the number of markers in a window
below which the kinship data will be used. Default = 10.}
\item{simulation.type}{The type of simulation used for the training.
One of "equi" or "mosaic". Default = "equi".}
\item{simulation.Ng}{The number of intermediary generations to simulate
for the training (only relevant for "mosaic"). Default = 50.}
\item{simulation.Nrep}{The number of replicates to simulate for the training.
Default = 3.}
\item{threshold.quantile}{The quantile to use to select the threshold
automatically. It must be a plain integer xx with 80 <= xx <= 100.
Default = 95.}
\item{plot}{\code{Logical} value indicating if the plot of the clustering
results must be saved at the location specified in argument \code{plot.loc}.
Default = TRUE.}
\item{plot.loc}{Path where a folder will be created to save the plot of
the clustering results. By default the function uses the current working
directory.}
}
\value{
an increased \code{mppData} object containing the the same elements
as the \code{mppData} object provided as argument and the
following new elements:
\item{par.clu}{\code{Integer matrix} with rows repersenting markers and
columns corresponding to the parents. At a single marker position, parents
with the same value were clustered in the same ancestral group.}
\item{n.anc}{Average number of ancestral clusters along the genome.}
\item{mono.anc}{Positions for which the ancestral clustering was monomorphic.}
}
\description{
Local clustering of the parental lines done by the R package clushaplo
(Leroux et al. 2014) or by providing own parent clustering data.
}
\details{
This function integrate the parent clustering information to the mppData
object. The parent clustering is necessary to compute the ancestral model.
If the parent clustering step is skipped, the ancestral model can not be
used but the other models (cross-specific, parental, and bi-allelic) can
still be computed.
The parent clustering can be performed using the R package
clusthaplo using \code{method = "clusthaplo"}. Clusthaplo can be found there:
\url{https://cran.r-project.org/src/contrib/Archive/clusthaplo/}. Using
clusthaplo, a visualisation of ancestral haplotype blocks can be obtained
setting \code{plot = TRUE}. The plots will be saved at the location specified
in \code{plot.loc}.
An alternative (\code{method = "given"}), is to provide your own parent
clustering information via the argument \code{par.clu}.
}
\examples{
data(mppData_init)
data(par_clu)
mppData <- QC.mppData(mppData_init)
mppData <- IBS.mppData(mppData = mppData)
mppData <- IBD.mppData(mppData = mppData, type = 'RIL',
type.mating = 'selfing')
mppData <- parent_cluster.mppData(mppData = mppData, method = "given",
par.clu = par_clu)
\dontrun{
library(clusthaplo)
mppData <- parent_cluster.mppData(mppData = mppData, method = "clusthaplo",
window = 25, K = 10, plot = FALSE)
}
}
\references{
Leroux, D., Rahmani, A., Jasson, S., Ventelon, M., Louis, F., Moreau, L.,
& Mangin, B. (2014). Clusthaplo: a plug-in for MCQTL to enhance QTL detection
using ancestral alleles in multi-cross design. Theoretical and Applied
Genetics, 127(4), 921-933.
}
\seealso{
\code{\link{create.mppData}}, \code{\link{QC.mppData}},
\code{\link{IBS.mppData}}, \code{\link{IBD.mppData}}
}
\author{
Vincent Garin
}
|
/man/parent_cluster.mppData.Rd
|
no_license
|
jancrichter/mppR
|
R
| false
| true
| 5,747
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parent_cluster.mppData.R
\name{parent_cluster.mppData}
\alias{parent_cluster.mppData}
\title{Parent clustering for \code{mppData} objects}
\usage{
parent_cluster.mppData(mppData, method = NULL, par.clu = NULL,
w1 = "kernel.exp", w2 = "kernel.unif", window, K = 10,
simulation.type = "equi", simulation.Ng = 50, simulation.Nrep = 3,
threshold.quantile = 95, plot = TRUE, plot.loc = getwd())
}
\arguments{
\item{mppData}{An object of class \code{mppData}. the \code{mppData} must
have been processed using: \code{\link{create.mppData}},
\code{\link{QC.mppData}}, \code{\link{IBS.mppData}},
and \code{\link{IBD.mppData}}.}
\item{method}{\code{Character} expression. If (\code{method = "clusthaplo"}),
the clustering is done using the R package clusthaplo.
If (\code{method = "given"}), the user must provide the parent clustering
information using \code{par.clu}. Default = NULL.}
\item{par.clu}{Optional argument if (\code{method = "given"}).
\code{Interger matrix} representing the results of a
parents genotypes clustering. The columns represent the parental lines and
the rows the markers. The columns names must be the same as the parents
list of the mppData object. The rownames must be the same as the map marker
list of the mppData object. At a particular position, parents with the same
value are assumed to inherit from the same ancestor. for more details,
see \code{\link{par_clu}}. Default = NULL.}
\item{w1}{The w1 weight function in the Li&Jyang similarity score.
Possible values are "kernel.const", "kernel.exp", "kernel.gauss",
"kernel.unif", "kernel.laplace" or "kernel.null". Default = "kernel.exp".}
\item{w2}{The w2 weight function in the Li&Jyang similarity score.
Possible values are "kernel.const", "kernel.exp", "kernel.gauss",
"kernel.unif", "kernel.laplace" or "kernel.null". Default = "kernel.unif".}
\item{window}{\code{Numeric} value for the size of the window used for
clustering in centi-Morgan. The clustering procedure is done for the position
that is in the centre of the window taking marker scores within the window
into consideration.}
\item{K}{A positive integer representing the number of markers in a window
below which the kinship data will be used. Default = 10.}
\item{simulation.type}{The type of simulation used for the training.
One of "equi" or "mosaic". Default = "equi".}
\item{simulation.Ng}{The number of intermediary generations to simulate
for the training (only relevant for "mosaic"). Default = 50.}
\item{simulation.Nrep}{The number of replicates to simulate for the training.
Default = 3.}
\item{threshold.quantile}{The quantile to use to select the threshold
automatically. It must be a plain integer xx with 80 <= xx <= 100.
Default = 95.}
\item{plot}{\code{Logical} value indicating if the plot of the clustering
results must be saved at the location specified in argument \code{plot.loc}.
Default = TRUE.}
\item{plot.loc}{Path where a folder will be created to save the plot of
the clustering results. By default the function uses the current working
directory.}
}
\value{
an increased \code{mppData} object containing the the same elements
as the \code{mppData} object provided as argument and the
following new elements:
\item{par.clu}{\code{Integer matrix} with rows repersenting markers and
columns corresponding to the parents. At a single marker position, parents
with the same value were clustered in the same ancestral group.}
\item{n.anc}{Average number of ancestral clusters along the genome.}
\item{mono.anc}{Positions for which the ancestral clustering was monomorphic.}
}
\description{
Local clustering of the parental lines done by the R package clushaplo
(Leroux et al. 2014) or by providing own parent clustering data.
}
\details{
This function integrate the parent clustering information to the mppData
object. The parent clustering is necessary to compute the ancestral model.
If the parent clustering step is skipped, the ancestral model can not be
used but the other models (cross-specific, parental, and bi-allelic) can
still be computed.
The parent clustering can be performed using the R package
clusthaplo using \code{method = "clusthaplo"}. Clusthaplo can be found there:
\url{https://cran.r-project.org/src/contrib/Archive/clusthaplo/}. Using
clusthaplo, a visualisation of ancestral haplotype blocks can be obtained
setting \code{plot = TRUE}. The plots will be saved at the location specified
in \code{plot.loc}.
An alternative (\code{method = "given"}), is to provide your own parent
clustering information via the argument \code{par.clu}.
}
\examples{
data(mppData_init)
data(par_clu)
mppData <- QC.mppData(mppData_init)
mppData <- IBS.mppData(mppData = mppData)
mppData <- IBD.mppData(mppData = mppData, type = 'RIL',
type.mating = 'selfing')
mppData <- parent_cluster.mppData(mppData = mppData, method = "given",
par.clu = par_clu)
\dontrun{
library(clusthaplo)
mppData <- parent_cluster.mppData(mppData = mppData, method = "clusthaplo",
window = 25, K = 10, plot = FALSE)
}
}
\references{
Leroux, D., Rahmani, A., Jasson, S., Ventelon, M., Louis, F., Moreau, L.,
& Mangin, B. (2014). Clusthaplo: a plug-in for MCQTL to enhance QTL detection
using ancestral alleles in multi-cross design. Theoretical and Applied
Genetics, 127(4), 921-933.
}
\seealso{
\code{\link{create.mppData}}, \code{\link{QC.mppData}},
\code{\link{IBS.mppData}}, \code{\link{IBD.mppData}}
}
\author{
Vincent Garin
}
|
################# UniProtKB_query.R ########################
#Obtain structural annotation info for GOI from UniProtKB web API
cat("##############################################################\n")
cat("####### Running UniProtKB structural annotation query ########\n")
cat("##############################################################\n\n\n")
#declare query function
parse_uniprotKB_annotation <- function(gene = GOI){
cat("############# retriving Uniprot/Swissprot ID from biomaRt ############","\n\n")
GOI_uniprot_id <<- as.character(getBM(attributes = "uniprotswissprot", filters = "ensembl_gene_id", values = GOI_ENSG, mart = hs_ensembl))
# in rare cases biomaRt returns emty logical for uniprotID. This happens for MYH16 regardless of genome/mart version.
if(GOI_uniprot_id == "logical(0)"){
cat("\t########### no UniProtID returned by biomaRt ############\n\t\tAttempting to retrive from bioDBnet instead\n\tIDs mapped:\n")
# rjson:: returns error likely due to bug? use jsonlite:: to query bioDBnet
library(jsonlite)
bioDBnet_UniProtIds <- jsonlite::fromJSON(paste0("https://biodbnet-abcc.ncifcrf.gov/webServices/rest.php/biodbnetRestApi.json?method=db2db&format=row&input=genesymbol&inputValues=",GOI,"&outputs=UniProtAccession&taxonId=9606"))
print(bioDBnet_UniProtIds$`UniProt Accession`)
GOI_uniprot_id <<- bioDBnet_UniProtIds$`UniProt Accession`[1]
}
cat("\tUniProtKB ID:",GOI_uniprot_id,"\n\n")
cat("Querying UniProtKB GOI page via RCurl","\n\n")
# retrieve features with some filetering
UniProt_URL <- paste0("https://www.uniprot.org/uniprot/",GOI_uniprot_id,".txt") #returns data frame with one col of lines
cat("\tURL: ",UniProt_URL,"\n\n")
library(RCurl)
# attempt to access url, if fail try up to 19 more times every 15s
# may no longer be an issue now that SSL error is avoided
attempt<-1
success <- FALSE
while(attempt < 20 & success == FALSE){
read.df <- tryCatch({
success <- TRUE
#curl is broken for UniProt (SSL connect error) in container: use wget via system() instread
#read.delim(textConnection(getURL(UniProt_URL, ssl.verifypeer=FALSE)),stringsAsFactors = FALSE)
system(paste0("wget ",UniProt_URL," -O uniprot_temp.txt"))
read.delim("uniprot_temp.txt",stringsAsFactors = FALSE)
},error=function(cond){
print(cond)
cat("\tAttempt",attempt,"failed. Attemting ",20-attempt," more times.\n")
success <<- FALSE
attempt<<-attempt+1
if(attempt == 20) stop("Unable to connect with UniProt; try again later :[")
Sys.sleep(15)
})
}
rm(attempt,success)
system("rm uniprot_temp.txt")
Uniprot_txt_parsed <<- read.df
cat("Succesffully retrieved\n\n")
#colnames of this parsed txt file contains AA length <- extract for future use
GOI_UNIPROT_AA_LENGTH <<- as.numeric(rev(unlist(strsplit(colnames(read.df),split = "\\.+")))[2])
cat("\tAA length parsed:",GOI_UNIPROT_AA_LENGTH,"\n\n")
#subset for feature lines that are not secondary stucture elements
cat("\t","Filtering out secondary structure elements","\n")
read.df.ft <- as.data.frame(read.df[grepl("FT {3}\\S",read.df[,1]) & !grepl("PDB:",read.df[,1]),],stringsAsFactors = FALSE)
#filter out conflict and variant rows (switch to list from df)
read.df.ft <- as.character(read.df.ft[!grepl("FT VARIANT",read.df.ft[,1]) & !grepl("FT CONFLICT",read.df.ft[,1]),])
ft.col.names <- unique(sapply(read.df.ft, function(x) unlist(strsplit(x,"\\s+"))[2]))
cat("Feature names:","\n")
print(ft.col.names)
#subseting with "FT {3}\\S" omits some continuations of lines that are prefixed by "FT {>3}".
#Append continued lines in those cases ( were the line does not end in "." ("\\.$") or contain ". " ("\\.\\s") just in cast there is internal period)
truncated_line_index <- !grepl("\\.$|\\.\\s",read.df.ft)
if(sum(truncated_line_index)>0){
cat("\t","Fixing truncated lines","\n")
read.df.ft[truncated_line_index] <- sapply(read.df.ft[truncated_line_index],
function(x){
if(length(unlist(strsplit(x,"\\s+"))) > 4){ #skip if no label (also lacks terminal period)
holder <- paste0(x," ",sub("FT\\s+","\\1",read.df[(which(read.df[,1] == x)+1),1]))
if(grepl("\\.",holder)){ #may need to add third line
return(holder)
}else{
holder <- paste0(holder," ",sub("FT\\s+","\\1",read.df[(which(read.df[,1] == x)+2),1]))
if(!grepl("\\.",holder)) cat("\t\tWARNING - no terminal period; still truncated after merging 3 lines","\n")
return(holder)
}
}else{
cat("\t\tWARNING - Line contains no label:",x,"\n")
return(x)
}
})
}
cat("\nCreating feature data frame","\n")
#create feature_df
feature_df <- data.frame(
TYPE = character(length(read.df.ft)),
AA_start = integer(length(read.df.ft)),
AA_end = integer(length(read.df.ft)),
LABEL = character(length(read.df.ft)),
stringsAsFactors = FALSE
)
for (x in 1:length(read.df.ft)) {
cat("\t",read.df.ft[x],"\n")
line.split <- unlist(strsplit(read.df.ft[x],split = "\\s+")) #first element is 'FT'
feature_df[x,"TYPE"] <- line.split[2]
feature_df[x,"AA_start"] <- as.integer(line.split[3])
feature_df[x,"AA_end"] <- as.integer(line.split[4])
if(!is.na(line.split[5])){
feature_df[x,"LABEL"] <- sub(paste0(".* ",line.split[4]," *(.*?) *\\..*"),"\\1",read.df.ft[x])
}else{
feature_df[x,"LABEL"] <-line.split[2] #if no label provided (i.e. DNA_BIND) use the TYPE as the LABEL
}
}
cat("\nCreating feature metadata data frame","\n")
domain_annotation_metadata <- data.frame(stringsAsFactors = FALSE)
for (x in ft.col.names) {
domain_annotation_metadata[1,x]<- sum(feature_df$TYPE == x)
}
#write out data
cat("Writing out data","\n")
assign("GOI_uniprot_id",GOI_uniprot_id, envir = .GlobalEnv)
assign(paste0(gene,"_protein_feature_annotation"),feature_df, envir = .GlobalEnv)
assign(paste0(gene,"_protein_feature_annotation_metadata"),domain_annotation_metadata, envir = .GlobalEnv)
cat("######## UniProtKB query complete ##########","\n\n\n")
}
#query GOI
parse_uniprotKB_annotation()
save.image("troubleshooting_workspace.RData") #####################
|
/UniProtKB_query.R
|
no_license
|
tituslabumn/Pan-Cancer-Gene-Reports
|
R
| false
| false
| 6,796
|
r
|
################# UniProtKB_query.R ########################
#Obtain structural annotation info for GOI from UniProtKB web API
cat("##############################################################\n")
cat("####### Running UniProtKB structural annotation query ########\n")
cat("##############################################################\n\n\n")
#declare query function
parse_uniprotKB_annotation <- function(gene = GOI){
cat("############# retriving Uniprot/Swissprot ID from biomaRt ############","\n\n")
GOI_uniprot_id <<- as.character(getBM(attributes = "uniprotswissprot", filters = "ensembl_gene_id", values = GOI_ENSG, mart = hs_ensembl))
# in rare cases biomaRt returns emty logical for uniprotID. This happens for MYH16 regardless of genome/mart version.
if(GOI_uniprot_id == "logical(0)"){
cat("\t########### no UniProtID returned by biomaRt ############\n\t\tAttempting to retrive from bioDBnet instead\n\tIDs mapped:\n")
# rjson:: returns error likely due to bug? use jsonlite:: to query bioDBnet
library(jsonlite)
bioDBnet_UniProtIds <- jsonlite::fromJSON(paste0("https://biodbnet-abcc.ncifcrf.gov/webServices/rest.php/biodbnetRestApi.json?method=db2db&format=row&input=genesymbol&inputValues=",GOI,"&outputs=UniProtAccession&taxonId=9606"))
print(bioDBnet_UniProtIds$`UniProt Accession`)
GOI_uniprot_id <<- bioDBnet_UniProtIds$`UniProt Accession`[1]
}
cat("\tUniProtKB ID:",GOI_uniprot_id,"\n\n")
cat("Querying UniProtKB GOI page via RCurl","\n\n")
# retrieve features with some filetering
UniProt_URL <- paste0("https://www.uniprot.org/uniprot/",GOI_uniprot_id,".txt") #returns data frame with one col of lines
cat("\tURL: ",UniProt_URL,"\n\n")
library(RCurl)
# attempt to access url, if fail try up to 19 more times every 15s
# may no longer be an issue now that SSL error is avoided
attempt<-1
success <- FALSE
while(attempt < 20 & success == FALSE){
read.df <- tryCatch({
success <- TRUE
#curl is broken for UniProt (SSL connect error) in container: use wget via system() instread
#read.delim(textConnection(getURL(UniProt_URL, ssl.verifypeer=FALSE)),stringsAsFactors = FALSE)
system(paste0("wget ",UniProt_URL," -O uniprot_temp.txt"))
read.delim("uniprot_temp.txt",stringsAsFactors = FALSE)
},error=function(cond){
print(cond)
cat("\tAttempt",attempt,"failed. Attemting ",20-attempt," more times.\n")
success <<- FALSE
attempt<<-attempt+1
if(attempt == 20) stop("Unable to connect with UniProt; try again later :[")
Sys.sleep(15)
})
}
rm(attempt,success)
system("rm uniprot_temp.txt")
Uniprot_txt_parsed <<- read.df
cat("Succesffully retrieved\n\n")
#colnames of this parsed txt file contains AA length <- extract for future use
GOI_UNIPROT_AA_LENGTH <<- as.numeric(rev(unlist(strsplit(colnames(read.df),split = "\\.+")))[2])
cat("\tAA length parsed:",GOI_UNIPROT_AA_LENGTH,"\n\n")
#subset for feature lines that are not secondary stucture elements
cat("\t","Filtering out secondary structure elements","\n")
read.df.ft <- as.data.frame(read.df[grepl("FT {3}\\S",read.df[,1]) & !grepl("PDB:",read.df[,1]),],stringsAsFactors = FALSE)
#filter out conflict and variant rows (switch to list from df)
read.df.ft <- as.character(read.df.ft[!grepl("FT VARIANT",read.df.ft[,1]) & !grepl("FT CONFLICT",read.df.ft[,1]),])
ft.col.names <- unique(sapply(read.df.ft, function(x) unlist(strsplit(x,"\\s+"))[2]))
cat("Feature names:","\n")
print(ft.col.names)
#subseting with "FT {3}\\S" omits some continuations of lines that are prefixed by "FT {>3}".
#Append continued lines in those cases ( were the line does not end in "." ("\\.$") or contain ". " ("\\.\\s") just in cast there is internal period)
truncated_line_index <- !grepl("\\.$|\\.\\s",read.df.ft)
if(sum(truncated_line_index)>0){
cat("\t","Fixing truncated lines","\n")
read.df.ft[truncated_line_index] <- sapply(read.df.ft[truncated_line_index],
function(x){
if(length(unlist(strsplit(x,"\\s+"))) > 4){ #skip if no label (also lacks terminal period)
holder <- paste0(x," ",sub("FT\\s+","\\1",read.df[(which(read.df[,1] == x)+1),1]))
if(grepl("\\.",holder)){ #may need to add third line
return(holder)
}else{
holder <- paste0(holder," ",sub("FT\\s+","\\1",read.df[(which(read.df[,1] == x)+2),1]))
if(!grepl("\\.",holder)) cat("\t\tWARNING - no terminal period; still truncated after merging 3 lines","\n")
return(holder)
}
}else{
cat("\t\tWARNING - Line contains no label:",x,"\n")
return(x)
}
})
}
cat("\nCreating feature data frame","\n")
#create feature_df
feature_df <- data.frame(
TYPE = character(length(read.df.ft)),
AA_start = integer(length(read.df.ft)),
AA_end = integer(length(read.df.ft)),
LABEL = character(length(read.df.ft)),
stringsAsFactors = FALSE
)
for (x in 1:length(read.df.ft)) {
cat("\t",read.df.ft[x],"\n")
line.split <- unlist(strsplit(read.df.ft[x],split = "\\s+")) #first element is 'FT'
feature_df[x,"TYPE"] <- line.split[2]
feature_df[x,"AA_start"] <- as.integer(line.split[3])
feature_df[x,"AA_end"] <- as.integer(line.split[4])
if(!is.na(line.split[5])){
feature_df[x,"LABEL"] <- sub(paste0(".* ",line.split[4]," *(.*?) *\\..*"),"\\1",read.df.ft[x])
}else{
feature_df[x,"LABEL"] <-line.split[2] #if no label provided (i.e. DNA_BIND) use the TYPE as the LABEL
}
}
cat("\nCreating feature metadata data frame","\n")
domain_annotation_metadata <- data.frame(stringsAsFactors = FALSE)
for (x in ft.col.names) {
domain_annotation_metadata[1,x]<- sum(feature_df$TYPE == x)
}
#write out data
cat("Writing out data","\n")
assign("GOI_uniprot_id",GOI_uniprot_id, envir = .GlobalEnv)
assign(paste0(gene,"_protein_feature_annotation"),feature_df, envir = .GlobalEnv)
assign(paste0(gene,"_protein_feature_annotation_metadata"),domain_annotation_metadata, envir = .GlobalEnv)
cat("######## UniProtKB query complete ##########","\n\n\n")
}
#query GOI
parse_uniprotKB_annotation()
save.image("troubleshooting_workspace.RData") #####################
|
#' add_osm_objects
#'
#' Adds layers of spatial objects (polygons, lines, or points generated by
#' extract_osm_objects ()) to a graphics object initialised with
#' plot_osm_basemap().
#'
#' @param map A ggplot2 object to which the objects are to be added
#' @param obj A spatial ('sp') data frame of polygons, lines, or points,
#' typically as returned by extract_osm_objects ()
#' @param col Colour of lines or points; fill colour of polygons
#' @param border Border colour of polygons
#' @param size Size argument passed to ggplot2 (polygon, path, point) functions:
#' determines width of lines for (polygon, line), and sizes of points.
#' Respective defaults are (0, 0.5, 0.5).
#' @param shape Shape of points or lines (the latter passed as 'linetype'): see
#' ?ggplot2::shape
#' @return modified version of map (a ggplot object) to which objects have been
#' added
#' @export
#'
#' @seealso \code{\link{plot_osm_basemap}}, \code{\link{extract_osm_objects}}.
#'
#' @examples
#' bbox <- get_bbox (c (-0.13, 51.5, -0.11, 51.52))
#' map <- plot_osm_basemap (bbox=bbox, bg="gray20")
#'
#' \dontrun{
#' # The 'london' data used below were downloaded as:
#' dat_BNR <- extract_osm_objects (bbox=bbox, key='building',
#' value='!residential')
#' dat_HP <- extract_osm_objects (bbox=bbox, key='highway',
#' value='primary')
#' dat_T <- extract_osm_objects (bbox=bbox, key='tree')
#' }
#' map <- add_osm_objects (map, obj=london$dat_BNR, col="gray40", border="yellow")
#' map <- add_osm_objects (map, obj=london$dat_HP, col="gray80",
#' size=1, shape=2)
#' map <- add_osm_objects (map, london$dat_T, col="green", size=2, shape=1)
#' print_osm_map (map)
#'
#' # Polygons with different coloured borders
#' map <- plot_osm_basemap (bbox=bbox, bg="gray20")
#' map <- add_osm_objects (map, obj=london$dat_HP, col="gray80")
#' map <- add_osm_objects (map, london$dat_T, col="green")
#' map <- add_osm_objects (map, obj=london$dat_BNR, col="gray40", border="yellow",
#' size=0.5)
#' print_osm_map (map)
add_osm_objects <- function (map, obj, col='gray40', border=NA, size,
shape)
{
# --------------- sanity checks and warnings ---------------
if (missing (map))
stop ('map must be supplied to add_osm_objects')
if (!is (map, 'ggplot'))
stop ('map must be a ggplot object')
if (missing (obj))
stop ('object must be supplied to add_osm_objects')
if (!inherits (obj, 'Spatial'))
stop ('obj must be Spatial')
if (!(is.character (col) | is.numeric (col)))
{
warning ("col will be coerced to character")
col <- as.character (col)
}
# --------------- end sanity checks and warnings ---------------
lon <- lat <- id <- NULL # suppress 'no visible binding' error
if (class (obj) == 'SpatialPolygonsDataFrame')
{
if (missing (size))
size <- 0
xy <- lapply (slot (obj, "polygons"), function (x)
slot (slot (x, "Polygons") [[1]], "coords"))
xy <- list2df (xy)
map <- map + ggplot2::geom_polygon (ggplot2::aes (group=id),
data=xy, size=size,
fill=col, colour=border)
} else if (class (obj) == 'SpatialLinesDataFrame')
{
if (missing (size))
size <- 0.5
if (missing (shape))
shape <- 1
xy <- lapply (slot (obj, 'lines'), function (x)
slot (slot (x, 'Lines') [[1]], 'coords'))
xy <- list2df (xy, islines=TRUE)
map <- map + ggplot2::geom_path (data=xy,
ggplot2::aes (x=lon, y=lat),
colour=col, size=size, linetype=shape)
} else if (class (obj) == 'SpatialPointsDataFrame')
{
if (missing (size))
size <- 0.5
if (missing (shape))
shape <- 19
xy <- data.frame (slot (obj, 'coords'))
map <- map + ggplot2::geom_point (data=xy,
ggplot2::aes (x=lon, y=lat),
col=col, size=size, shape=shape)
} else
stop ("obj is not a spatial class")
return (map)
}
#' list2df
#'
#' Converts lists of coordinates to single data frames
#'
#' @param xy A list of coordinates extracted from an sp object
#' @param islines Set to TRUE for spatial lines, otherwise FALSE
#' @return data frame
list2df <- function (xy, islines=FALSE)
{
if (islines) # lines have to be separated by NAs
xy <- lapply (xy, function (i) rbind (i, rep (NA, 2)))
else # Add id column to each:
for (i in seq (xy))
xy [[i]] <- cbind (i, xy [[i]])
# And rbind them to a single matrix.
xy <- do.call (rbind, xy)
# And then to a data.frame, for which duplicated row names flag warnings
# which are not relevant, so are suppressed by specifying new row names
xy <- data.frame (xy, row.names=1:nrow (xy))
if (islines) # remove terminal row of NAs
xy <- xy [1:(nrow (xy) - 1),]
else
names (xy) <- c ("id", "lon", "lat")
return (xy)
}
|
/R/add-osm-objects.R
|
no_license
|
jeperez/osmplotr
|
R
| false
| false
| 5,272
|
r
|
#' add_osm_objects
#'
#' Adds layers of spatial objects (polygons, lines, or points generated by
#' extract_osm_objects ()) to a graphics object initialised with
#' plot_osm_basemap().
#'
#' @param map A ggplot2 object to which the objects are to be added
#' @param obj A spatial ('sp') data frame of polygons, lines, or points,
#' typically as returned by extract_osm_objects ()
#' @param col Colour of lines or points; fill colour of polygons
#' @param border Border colour of polygons
#' @param size Size argument passed to ggplot2 (polygon, path, point) functions:
#' determines width of lines for (polygon, line), and sizes of points.
#' Respective defaults are (0, 0.5, 0.5).
#' @param shape Shape of points or lines (the latter passed as 'linetype'): see
#' ?ggplot2::shape
#' @return modified version of map (a ggplot object) to which objects have been
#' added
#' @export
#'
#' @seealso \code{\link{plot_osm_basemap}}, \code{\link{extract_osm_objects}}.
#'
#' @examples
#' bbox <- get_bbox (c (-0.13, 51.5, -0.11, 51.52))
#' map <- plot_osm_basemap (bbox=bbox, bg="gray20")
#'
#' \dontrun{
#' # The 'london' data used below were downloaded as:
#' dat_BNR <- extract_osm_objects (bbox=bbox, key='building',
#' value='!residential')
#' dat_HP <- extract_osm_objects (bbox=bbox, key='highway',
#' value='primary')
#' dat_T <- extract_osm_objects (bbox=bbox, key='tree')
#' }
#' map <- add_osm_objects (map, obj=london$dat_BNR, col="gray40", border="yellow")
#' map <- add_osm_objects (map, obj=london$dat_HP, col="gray80",
#' size=1, shape=2)
#' map <- add_osm_objects (map, london$dat_T, col="green", size=2, shape=1)
#' print_osm_map (map)
#'
#' # Polygons with different coloured borders
#' map <- plot_osm_basemap (bbox=bbox, bg="gray20")
#' map <- add_osm_objects (map, obj=london$dat_HP, col="gray80")
#' map <- add_osm_objects (map, london$dat_T, col="green")
#' map <- add_osm_objects (map, obj=london$dat_BNR, col="gray40", border="yellow",
#' size=0.5)
#' print_osm_map (map)
add_osm_objects <- function (map, obj, col='gray40', border=NA, size,
shape)
{
# --------------- sanity checks and warnings ---------------
if (missing (map))
stop ('map must be supplied to add_osm_objects')
if (!is (map, 'ggplot'))
stop ('map must be a ggplot object')
if (missing (obj))
stop ('object must be supplied to add_osm_objects')
if (!inherits (obj, 'Spatial'))
stop ('obj must be Spatial')
if (!(is.character (col) | is.numeric (col)))
{
warning ("col will be coerced to character")
col <- as.character (col)
}
# --------------- end sanity checks and warnings ---------------
lon <- lat <- id <- NULL # suppress 'no visible binding' error
if (class (obj) == 'SpatialPolygonsDataFrame')
{
if (missing (size))
size <- 0
xy <- lapply (slot (obj, "polygons"), function (x)
slot (slot (x, "Polygons") [[1]], "coords"))
xy <- list2df (xy)
map <- map + ggplot2::geom_polygon (ggplot2::aes (group=id),
data=xy, size=size,
fill=col, colour=border)
} else if (class (obj) == 'SpatialLinesDataFrame')
{
if (missing (size))
size <- 0.5
if (missing (shape))
shape <- 1
xy <- lapply (slot (obj, 'lines'), function (x)
slot (slot (x, 'Lines') [[1]], 'coords'))
xy <- list2df (xy, islines=TRUE)
map <- map + ggplot2::geom_path (data=xy,
ggplot2::aes (x=lon, y=lat),
colour=col, size=size, linetype=shape)
} else if (class (obj) == 'SpatialPointsDataFrame')
{
if (missing (size))
size <- 0.5
if (missing (shape))
shape <- 19
xy <- data.frame (slot (obj, 'coords'))
map <- map + ggplot2::geom_point (data=xy,
ggplot2::aes (x=lon, y=lat),
col=col, size=size, shape=shape)
} else
stop ("obj is not a spatial class")
return (map)
}
#' list2df
#'
#' Converts lists of coordinates to single data frames
#'
#' @param xy A list of coordinates extracted from an sp object
#' @param islines Set to TRUE for spatial lines, otherwise FALSE
#' @return data frame
list2df <- function (xy, islines=FALSE)
{
if (islines) # lines have to be separated by NAs
xy <- lapply (xy, function (i) rbind (i, rep (NA, 2)))
else # Add id column to each:
for (i in seq (xy))
xy [[i]] <- cbind (i, xy [[i]])
# And rbind them to a single matrix.
xy <- do.call (rbind, xy)
# And then to a data.frame, for which duplicated row names flag warnings
# which are not relevant, so are suppressed by specifying new row names
xy <- data.frame (xy, row.names=1:nrow (xy))
if (islines) # remove terminal row of NAs
xy <- xy [1:(nrow (xy) - 1),]
else
names (xy) <- c ("id", "lon", "lat")
return (xy)
}
|
rm(list=ls())
library(simode)
library(doRNG)
require(doParallel)
set.seed(2000)
vars <- paste0('x', 1:3)
eq1 <- 'gamma11*(x2^f121)*(x3^f131)-gamma12*(x1^f112)*(x2^f122)-gamma13*(x1^f113)*(x3^f133)'
eq2 <- 'gamma21*(x1^f211)*(x2^f221)-gamma22*(x2^f222)'
eq3 <- 'gamma31*(x1^f311)*(x3^f331)-gamma32*(x3^f332)'
equations <- c(eq1,eq2,eq3)
names(equations) <- vars
pars1 <- c('gamma11','f121','f131','gamma12','f112','f122','gamma13','f113','f133')
pars2 <- c('gamma21','f211','f221','gamma22','f222')
pars3 <- c('gamma31','f311','f331','gamma32','f332')
pars <- c(pars1,pars2,pars3)
lin_pars <- c('gamma11','gamma12','gamma13','gamma21','gamma22','gamma31','gamma32')
nlin_pars <- setdiff(pars,lin_pars)
theta1 <- c(0.4,-1,-1,3,0.5,-0.1,2,0.75,-0.2)
theta2 <- c(3,0.5,-0.1,1.5,0.5)
theta3 <- c(2,0.75,-0.2,5,0.5)
theta <- c(theta1,theta2,theta3)
names(theta) <- pars
x0 <- c(0.5,0.5,1)
names(x0) <- vars
n <- 100
time <- seq(0,4,length.out=n)
model_out <- solve_ode(equations,theta,x0,time)
x_det <- model_out[,vars]
SNR <- 10
sigma_x <- apply(x_det, 2, sd)
sigma <- signif(sigma_x / SNR, digits=2)
print(sigma)
obs <- list()
for(i in 1:length(vars)) {
obs[[i]] <- x_det[,i] + rnorm(n,0,sigma[i])
}
names(obs) <- vars
pdf(file="../out/solution-gma-SNR10.pdf")
par(mfrow=c(1,1))
plot(time,model_out[,'x1'],'l',ylab="",ylim=c(0,1.5), main=expression(GMA~System~with~SNR==10))
lines(time,model_out[,'x2'])
lines(time,model_out[,'x3'])
points(time,obs$x1,pch=1)
points(time,obs$x2,pch=2)
points(time,obs$x3,pch=4)
dev.off()
pars_min <- c(0, -1.1, -1.1, 0, 0, -1.1, 0, 0, -1.1, 0, 0, -1.1, 0, 0, 0, 0, -1.1, 0, 0)
#pars_min <- pars_min * 2
names(pars_min) <- pars
pars_max <- c(6, 0, 0, 6, 1, 0, 6, 1, 0, 6, 1, 0, 6, 1, 6, 1, 0, 6, 1)
#pars_max <- pars_max * 2
names(pars_max) <- pars
priorInf=c(0.1,1,3,5)
# nlin_init <- rnorm(length(theta[nlin_pars]),theta[nlin_pars],
# + priorInf[1]*abs(theta[nlin_pars]))
# names(nlin_init) <- nlin_pars
#
# NLSest <- simode(equations=equations, pars=pars, fixed=x0, time=time, obs=obs,
# nlin_pars=nlin_pars, start=nlin_init, lower=pars_min, upper=pars_max,
# im_method = "non-separable",
# simode_ctrl=simode.control(optim_type = "im"))
# par(mfrow=c(1,3))
# plot(NLSest, type="fit", show="im")
# SLSest <- simode(equations=equations, pars=pars, fixed=x0, time=time, obs=obs,
# nlin_pars=nlin_pars, start=nlin_init, lower=pars_min, upper=pars_max,
# simode_ctrl=simode.control(optim_type = "im"))
# plot(SLSest, type="fit", show="im")
unlink("log")
N <- 500
set.seed(1000)
cl <- makeForkCluster(16, outfile="log")
registerDoParallel(cl)
args <- c('equations', 'pars', 'time', 'x0', 'theta',
'nlin_pars', 'x_det', 'vars', 'sigma')
results <- list()
for(ip in 1:4){
results <- foreach(j=1:N, .packages='simode') %dorng% {
# for(j in 1:N) {
SLSmc <- NULL
NLSmc <- NULL
while (TRUE) {
#print("beginloop")
obs <- list()
for(i in 1:length(vars)) {
obs[[i]] <- x_det[,i] + rnorm(n,0,sigma[i])
}
names(obs) <- vars
nlin_init <- rnorm(length(theta[nlin_pars]),theta[nlin_pars],
+ priorInf[ip]*abs(theta[nlin_pars]))
names(nlin_init) <- nlin_pars
ptimeNLS <- system.time({
NLSmc <- simode(equations=equations, pars=pars, fixed=x0, time=time, obs=obs,
nlin_pars=nlin_pars, start=nlin_init,
lower=pars_min, upper=pars_max,
im_method = "non-separable",
simode_ctrl=simode.control(optim_type = "im"))})
if (is.null(NLSmc) || !is.numeric(NLSmc$im_pars_est)) {
print("should repeat NLS call")
next
}
ptimeSLS <- system.time({
SLSmc <- simode(equations=equations, pars=pars, fixed=x0, time=time, obs=obs,
nlin_pars=nlin_pars, start=nlin_init,
lower=pars_min, upper=pars_max,
simode_ctrl=simode.control(optim_type = "im"))})
if (is.null(SLSmc) || !is.numeric(SLSmc$im_pars_est)) {
print("should repeat SLS call")
next
}
break
}
#print(paste0("NLS num:", is.numeric(NLSmc$im_pars_est), " SLS num:", is.numeric(SLSmc$im_pars_est), " num NLS:", length(NLSmc$im_pars_est), " num SLS:", length(SLSmc$im_pars_est)))
list(NLSmc=NLSmc,SLSmc=SLSmc,ptimeNLS=ptimeNLS,ptimeSLS=ptimeSLS)
#results[[j]] <- list(NLSmc=NLSmc,SLSmc=SLSmc,ptimeNLS=ptimeNLS,ptimeSLS=ptimeSLS)
}
NLSmc_im_loss_vals <- sapply(results,function(x) x$NLSmc$im_loss)
SLSmc_im_loss_vals <- sapply(results,function(x) x$SLSmc$im_loss)
NLS_im_vars=sapply(results,function(x) x$NLSmc$im_pars_est)
SLS_im_vars=sapply(results,function(x) x$SLSmc$im_pars_est)
NLSmc_time=list()
SLSmc_time=list()
for (mc in 1:N){
NLSmc_time[mc]<- results[[mc]]$ptimeNLS[3]
SLSmc_time[mc]<- results[[mc]]$ptimeSLS[3]
}
#mean(unlist(NLSmc_im_loss_vals))
#mean(unlist(SLSmc_im_loss_vals))
#mean(unlist(NLSmc_time))
#mean(unlist(SLSmc_time))
loss_df=data.frame(NLSmc=unlist(NLSmc_im_loss_vals),SLSmc=unlist(SLSmc_im_loss_vals),
NLSest_gamma11=NLS_im_vars['gamma11',],NLSest_f121=NLS_im_vars['f121',],NLSest_f131=NLS_im_vars['f131',],
NLSest_gamma12=NLS_im_vars['gamma12',],NLSest_f112=NLS_im_vars['f112',],NLSest_f122=NLS_im_vars['f122',],
NLSest_gamma13=NLS_im_vars['gamma13',],NLSest_f113=NLS_im_vars['f113',],NLSest_f133=NLS_im_vars['f133',],
NLSest_gamma21=NLS_im_vars['gamma21',],NLSest_f211=NLS_im_vars['f211',],NLSest_f221=NLS_im_vars['f221',],
NLSest_gamma22=NLS_im_vars['gamma22',],NLSest_f222=NLS_im_vars['f222',],
NLSest_gamma31=NLS_im_vars['gamma31',],NLSest_f311=NLS_im_vars['f311',],NLSest_f331=NLS_im_vars['f331',],
NLSest_gamma32=NLS_im_vars['gamma32',],NLSest_f332=NLS_im_vars['f332',],
SLSest_gamma11=SLS_im_vars['gamma11',],SLSest_f121=SLS_im_vars['f121',],SLSest_f131=SLS_im_vars['f131',],
SLSest_gamma12=SLS_im_vars['gamma12',],SLSest_f112=SLS_im_vars['f112',],SLSest_f122=SLS_im_vars['f122',],
SLSest_gamma13=SLS_im_vars['gamma13',],SLSest_f113=SLS_im_vars['f113',],SLSest_f133=SLS_im_vars['f133',],
SLSest_gamma21=SLS_im_vars['gamma21',],SLSest_f211=SLS_im_vars['f211',],SLSest_f221=SLS_im_vars['f221',],
SLSest_gamma22=SLS_im_vars['gamma22',],SLSest_f222=SLS_im_vars['f222',],
SLSest_gamma31=SLS_im_vars['gamma31',],SLSest_f311=SLS_im_vars['f311',],SLSest_f331=SLS_im_vars['f331',],
SLSest_gamma32=SLS_im_vars['gamma32',],SLSest_f332=SLS_im_vars['f332',]
)
time_df=data.frame(NLStime=unlist(NLSmc_time),SLStime=unlist(SLSmc_time))
write.csv(loss_df, file = paste0(ip, "-NLStoSLSloss.csv"))
write.csv(time_df, file = paste0(ip, "-NLStoSLStime.csv"))
}
#plot(unlist(NLSmc_im_loss_vals),type='l')
#lines(unlist(SLSmc_im_loss_vals),col="red")
|
/previous_backup/GMA-System1-SNR5-newmethod/compNLStoSLS_GMA.R
|
no_license
|
haroldship/complexity-2019-code
|
R
| false
| false
| 7,159
|
r
|
rm(list=ls())
library(simode)
library(doRNG)
require(doParallel)
set.seed(2000)
vars <- paste0('x', 1:3)
eq1 <- 'gamma11*(x2^f121)*(x3^f131)-gamma12*(x1^f112)*(x2^f122)-gamma13*(x1^f113)*(x3^f133)'
eq2 <- 'gamma21*(x1^f211)*(x2^f221)-gamma22*(x2^f222)'
eq3 <- 'gamma31*(x1^f311)*(x3^f331)-gamma32*(x3^f332)'
equations <- c(eq1,eq2,eq3)
names(equations) <- vars
pars1 <- c('gamma11','f121','f131','gamma12','f112','f122','gamma13','f113','f133')
pars2 <- c('gamma21','f211','f221','gamma22','f222')
pars3 <- c('gamma31','f311','f331','gamma32','f332')
pars <- c(pars1,pars2,pars3)
lin_pars <- c('gamma11','gamma12','gamma13','gamma21','gamma22','gamma31','gamma32')
nlin_pars <- setdiff(pars,lin_pars)
theta1 <- c(0.4,-1,-1,3,0.5,-0.1,2,0.75,-0.2)
theta2 <- c(3,0.5,-0.1,1.5,0.5)
theta3 <- c(2,0.75,-0.2,5,0.5)
theta <- c(theta1,theta2,theta3)
names(theta) <- pars
x0 <- c(0.5,0.5,1)
names(x0) <- vars
n <- 100
time <- seq(0,4,length.out=n)
model_out <- solve_ode(equations,theta,x0,time)
x_det <- model_out[,vars]
SNR <- 10
sigma_x <- apply(x_det, 2, sd)
sigma <- signif(sigma_x / SNR, digits=2)
print(sigma)
obs <- list()
for(i in 1:length(vars)) {
obs[[i]] <- x_det[,i] + rnorm(n,0,sigma[i])
}
names(obs) <- vars
pdf(file="../out/solution-gma-SNR10.pdf")
par(mfrow=c(1,1))
plot(time,model_out[,'x1'],'l',ylab="",ylim=c(0,1.5), main=expression(GMA~System~with~SNR==10))
lines(time,model_out[,'x2'])
lines(time,model_out[,'x3'])
points(time,obs$x1,pch=1)
points(time,obs$x2,pch=2)
points(time,obs$x3,pch=4)
dev.off()
pars_min <- c(0, -1.1, -1.1, 0, 0, -1.1, 0, 0, -1.1, 0, 0, -1.1, 0, 0, 0, 0, -1.1, 0, 0)
#pars_min <- pars_min * 2
names(pars_min) <- pars
pars_max <- c(6, 0, 0, 6, 1, 0, 6, 1, 0, 6, 1, 0, 6, 1, 6, 1, 0, 6, 1)
#pars_max <- pars_max * 2
names(pars_max) <- pars
priorInf=c(0.1,1,3,5)
# nlin_init <- rnorm(length(theta[nlin_pars]),theta[nlin_pars],
# + priorInf[1]*abs(theta[nlin_pars]))
# names(nlin_init) <- nlin_pars
#
# NLSest <- simode(equations=equations, pars=pars, fixed=x0, time=time, obs=obs,
# nlin_pars=nlin_pars, start=nlin_init, lower=pars_min, upper=pars_max,
# im_method = "non-separable",
# simode_ctrl=simode.control(optim_type = "im"))
# par(mfrow=c(1,3))
# plot(NLSest, type="fit", show="im")
# SLSest <- simode(equations=equations, pars=pars, fixed=x0, time=time, obs=obs,
# nlin_pars=nlin_pars, start=nlin_init, lower=pars_min, upper=pars_max,
# simode_ctrl=simode.control(optim_type = "im"))
# plot(SLSest, type="fit", show="im")
unlink("log")
N <- 500
set.seed(1000)
cl <- makeForkCluster(16, outfile="log")
registerDoParallel(cl)
args <- c('equations', 'pars', 'time', 'x0', 'theta',
'nlin_pars', 'x_det', 'vars', 'sigma')
results <- list()
for(ip in 1:4){
results <- foreach(j=1:N, .packages='simode') %dorng% {
# for(j in 1:N) {
SLSmc <- NULL
NLSmc <- NULL
while (TRUE) {
#print("beginloop")
obs <- list()
for(i in 1:length(vars)) {
obs[[i]] <- x_det[,i] + rnorm(n,0,sigma[i])
}
names(obs) <- vars
nlin_init <- rnorm(length(theta[nlin_pars]),theta[nlin_pars],
+ priorInf[ip]*abs(theta[nlin_pars]))
names(nlin_init) <- nlin_pars
ptimeNLS <- system.time({
NLSmc <- simode(equations=equations, pars=pars, fixed=x0, time=time, obs=obs,
nlin_pars=nlin_pars, start=nlin_init,
lower=pars_min, upper=pars_max,
im_method = "non-separable",
simode_ctrl=simode.control(optim_type = "im"))})
if (is.null(NLSmc) || !is.numeric(NLSmc$im_pars_est)) {
print("should repeat NLS call")
next
}
ptimeSLS <- system.time({
SLSmc <- simode(equations=equations, pars=pars, fixed=x0, time=time, obs=obs,
nlin_pars=nlin_pars, start=nlin_init,
lower=pars_min, upper=pars_max,
simode_ctrl=simode.control(optim_type = "im"))})
if (is.null(SLSmc) || !is.numeric(SLSmc$im_pars_est)) {
print("should repeat SLS call")
next
}
break
}
#print(paste0("NLS num:", is.numeric(NLSmc$im_pars_est), " SLS num:", is.numeric(SLSmc$im_pars_est), " num NLS:", length(NLSmc$im_pars_est), " num SLS:", length(SLSmc$im_pars_est)))
list(NLSmc=NLSmc,SLSmc=SLSmc,ptimeNLS=ptimeNLS,ptimeSLS=ptimeSLS)
#results[[j]] <- list(NLSmc=NLSmc,SLSmc=SLSmc,ptimeNLS=ptimeNLS,ptimeSLS=ptimeSLS)
}
NLSmc_im_loss_vals <- sapply(results,function(x) x$NLSmc$im_loss)
SLSmc_im_loss_vals <- sapply(results,function(x) x$SLSmc$im_loss)
NLS_im_vars=sapply(results,function(x) x$NLSmc$im_pars_est)
SLS_im_vars=sapply(results,function(x) x$SLSmc$im_pars_est)
NLSmc_time=list()
SLSmc_time=list()
for (mc in 1:N){
NLSmc_time[mc]<- results[[mc]]$ptimeNLS[3]
SLSmc_time[mc]<- results[[mc]]$ptimeSLS[3]
}
#mean(unlist(NLSmc_im_loss_vals))
#mean(unlist(SLSmc_im_loss_vals))
#mean(unlist(NLSmc_time))
#mean(unlist(SLSmc_time))
loss_df=data.frame(NLSmc=unlist(NLSmc_im_loss_vals),SLSmc=unlist(SLSmc_im_loss_vals),
NLSest_gamma11=NLS_im_vars['gamma11',],NLSest_f121=NLS_im_vars['f121',],NLSest_f131=NLS_im_vars['f131',],
NLSest_gamma12=NLS_im_vars['gamma12',],NLSest_f112=NLS_im_vars['f112',],NLSest_f122=NLS_im_vars['f122',],
NLSest_gamma13=NLS_im_vars['gamma13',],NLSest_f113=NLS_im_vars['f113',],NLSest_f133=NLS_im_vars['f133',],
NLSest_gamma21=NLS_im_vars['gamma21',],NLSest_f211=NLS_im_vars['f211',],NLSest_f221=NLS_im_vars['f221',],
NLSest_gamma22=NLS_im_vars['gamma22',],NLSest_f222=NLS_im_vars['f222',],
NLSest_gamma31=NLS_im_vars['gamma31',],NLSest_f311=NLS_im_vars['f311',],NLSest_f331=NLS_im_vars['f331',],
NLSest_gamma32=NLS_im_vars['gamma32',],NLSest_f332=NLS_im_vars['f332',],
SLSest_gamma11=SLS_im_vars['gamma11',],SLSest_f121=SLS_im_vars['f121',],SLSest_f131=SLS_im_vars['f131',],
SLSest_gamma12=SLS_im_vars['gamma12',],SLSest_f112=SLS_im_vars['f112',],SLSest_f122=SLS_im_vars['f122',],
SLSest_gamma13=SLS_im_vars['gamma13',],SLSest_f113=SLS_im_vars['f113',],SLSest_f133=SLS_im_vars['f133',],
SLSest_gamma21=SLS_im_vars['gamma21',],SLSest_f211=SLS_im_vars['f211',],SLSest_f221=SLS_im_vars['f221',],
SLSest_gamma22=SLS_im_vars['gamma22',],SLSest_f222=SLS_im_vars['f222',],
SLSest_gamma31=SLS_im_vars['gamma31',],SLSest_f311=SLS_im_vars['f311',],SLSest_f331=SLS_im_vars['f331',],
SLSest_gamma32=SLS_im_vars['gamma32',],SLSest_f332=SLS_im_vars['f332',]
)
time_df=data.frame(NLStime=unlist(NLSmc_time),SLStime=unlist(SLSmc_time))
write.csv(loss_df, file = paste0(ip, "-NLStoSLSloss.csv"))
write.csv(time_df, file = paste0(ip, "-NLStoSLStime.csv"))
}
#plot(unlist(NLSmc_im_loss_vals),type='l')
#lines(unlist(SLSmc_im_loss_vals),col="red")
|
## Create in-silico prep names
getPrepsDef <- function(numLatent, numPrepsPer, numMeasPer) {
lat <- rep(paste("L", sep="", 1:numLatent), each=numPrepsPer*numMeasPer)
prep <- rep(paste("P", sep="", 1:numPrepsPer), each=numMeasPer, numLatent)
meas <- rep(paste("M", sep="", 1:numMeasPer), numLatent*numPrepsPer)
paste(sep=".", lat, prep, meas)
}
## Create preps to techs map
getTechsDef <- function(preps, noTechsPerLatent){
prep <- getPrep(preps)
names(prep) <- preps
uniquePrep <- unique(prep)
## There should be at least 3 observations for each technology
stopifnot(all(3*noTechsPerLatent <= summary(as.factor((getLatentFromPrep(uniquePrep))))))
techsDef <- rep("",length(preps))
names(techsDef) <- preps
numTechs <- noTechsPerLatent*noLatent(preps)
techs <- toupper(letters[1:numTechs])
latents <- getLatent(preps)
for(i in seq_along(unique(latents))){
l <- unique(latents)[i]
ltechs <- techs[(noTechsPerLatent*(i-1)+1) : (noTechsPerLatent*i) ]
prepL <- unique(prep[latents==l])
techVec <- rep(ltechs,length(prepL))
count <- 1
for(p in sample(prepL)){
techsDef[names(prep[prep==p])] <- paste(l,".",techVec[count],sep="")
count <- count+1
}
}
techsDef
}
## Check preps, we check that
chkpreps <- function(preps) {
## 1. they are ordered
stopifnot(!is.unsorted(preps))
## 2. unique
stopifnot(!any(duplicated(preps)))
## 3. have the right pattern: latent dot prep dot meas
stopifnot(all(grepl("^[^\\.]+\\.[^\\.]+\\.[^\\.]+$", preps)))
}
## techs is a named list, sorted
## names is "preps"
## values are the corresponding technology
chktechs <- function(techs){
## 1. they are ordered by prep
stopifnot(!is.unsorted(names(techs)))
## 2. tech names (the preps) are not duplicated
stopifnot(!any(duplicated(names(techs))))
## 3. have the right pattern: latent dot tech
## and names are latent.prep.meas
stopifnot(all(grepl("^[^\\.]+\\.[^\\.]+$", techs)))
stopifnot(all(grepl("^[^\\.]+\\.[^\\.]+\\.[^\\.]+$", names(techs))))
}
chkprepnames <- function(prepnames) {
## 1. they are ordered
stopifnot(!is.unsorted(prepnames))
## 2. unique
stopifnot(!any(duplicated(prepnames)))
## 3. have the right pattern: latent dot prep
stopifnot(all(grepl("^[^\\.]+\\.[^\\.]+$", prepnames)))
}
## Table of contents of functions to query preps
##
## 1 number of things, a single non-negative integer:
## --------------------------------------------------
## noMeas - number of measurements
## noMeasUni - number of measurements from non-identifyable preps
## noMeasMulti - number of measurements from identifyable preps
## noLatent - number of latent variables
## noPreps - number of _all_ prep variables, including the ones
## that cannot be identified
## noPrepsUni - number of prep variables that cannot be
## identified
## noPrepsMulti - number of prep variables that can be identified
##
## 2 number of things, grouped by other things
## -------------------------------------------
## noPrepsPer - number of _all_ preps per latent variables
## noMeasPerPer - number of measurements per preps per latent
## variables. This is a list of named integer
## vectors. Both the list and the vectors are
## ordered alphabetically. It contains _all_ preps.
## noMeasPer - number of measurements per preps. It contains _all_
## preps, in a named integer vector, ordered alphabetically.
##
## 3 query properties of the measuments
## ------------------------------------
## getPrep - which preps the different measurements belong to.
## a character vector, contains _all_ measurements.
## getLatent - which latent variables the measurements belong to.
## a character vector, contains _all_ measurements.
## getPrepUni - like getPrep, but only for measurements from
## non-identifyable preps
## getPrepMulti - like getPrep, but only for measurements from
## identifyable preps
## getLatentUni - like getLatent, but only for measurements from
## non-identifyable preps
## getLatentMulti - like getLatent, but only for measurements from
## identifyable preps
## getLLatentFromPrep - which latent variables the given preps belong to
## getLSatentFromPrep - which latent variables the given preps belong to
##
## 4 query names of preps and latent variables
## -------------------------------------------
## latentNames - names of latent variables, sorted alphabetically
## prepNames - _all_ prep names, sorted alphabetically.
## uniPrepNames - names of the non-identifiable preps
## multiPrepNames - names of the identifyable preps
## uniMeasNames - names of the measurements that belong to
## non-identifyable preps
## multiMeasNames - names of the measurement that belong to
## identifiable preps
## Total number of measurements
noMeas <- function(preps) {
chkpreps(preps)
length(preps)
}
## Number of measurements that belong to non-identifyable preps
noMeasUni <- function(preps) {
chkpreps(preps)
nos <- noMeasPer(preps)
sel <- names(nos)[nos==1]
pr <- getPrep(preps)
sum(pr %in% sel)
}
## Number of measuements that belong to identifyable preps
noMeasMulti <- function(preps) {
chkpreps(preps)
nos <- noMeasPer(preps)
sel <- names(nos)[nos!=1]
pr <- getPrep(preps)
sum(pr %in% sel)
}
## Number of latent variables
noLatent <- function(preps) {
chkpreps(preps)
length(unique(sapply(strsplit(preps, ".", fixed=TRUE), "[", 1)))
}
## Number of latent variables
noLatentFromTechs <- function(techs) {
chkpreps(techs)
length(unique(getLatentFromTechs(techs)))
}
## Number of technologies
noTechs <- function(techs){
chktechs(techs)
length(unique(techs))
}
## Number of _all_ preps
noPreps <- function(preps) {
chkpreps(preps)
pr <- sub("\\.[^\\.]+$", "", preps)
length(unique(pr))
}
## Number or real preps, i.e. preps with more than one measurement
noPrepsUni <- function(preps) {
chkpreps(preps)
nom <- noMeasPer(preps)
sum(nom==1)
}
## Number or real preps, i.e. preps with more than one measurement
noPrepsMulti <- function(preps) {
chkpreps(preps)
nom <- noMeasPer(preps)
sum(nom!=1)
}
## Number of preps per latent variable
noPrepsPer <- function(preps) {
chkpreps(preps)
lp <- strsplit(sub("\\.[^\\.]*$", "", preps), ".", fixed=TRUE)
tapply(sapply(lp, "[", 2), sapply(lp, "[", 1),
function(x) length(unique(x)))
}
## Number of measurements per preps per latent variables
noMeasPerPer <- function(preps) {
chkpreps(preps)
sp <- strsplit(preps, "\\.")
tapply(lapply(sp, "[", 2:3), sapply(sp, "[", 1), function(lp) {
tapply(sapply(lp, "[", 2), sapply(lp, "[", 1), function(x)
length(unique(x)))
}, simplify=FALSE)
}
## Number of measurements per preps
noMeasPer <- function(preps) {
chkpreps(preps)
pr <- sub("\\.[^\\.]+$", "", preps)
tab <- table(pr)
structure(as.vector(tab), names=dimnames(tab)[[1]])
}
## Prep ids of the measurements
getPrep <- function(preps) {
chkpreps(preps)
sub("\\.[^\\.]*$", "", preps)
}
## Latent variables of the measurements
getLatent <- function(preps) {
chkpreps(preps)
sub("\\..*$", "", preps)
}
## Prep ids of the measuments from non-identifyable preps
getPrepUni <- function(preps) {
chkpreps(preps)
up <- uniPrepNames(preps)
pr <- getPrep(preps)
pr[ pr %in% up ]
}
## Prep ids of the measurements from identifyable preps
getPrepMulti <- function(preps) {
chkpreps(preps)
mp <- multiPrepNames(preps)
pr <- getPrep(preps)
pr[ pr %in% mp ]
}
## Latent variables for measurements from non-identifyable preps
getLatentUni <- function(preps) {
chkpreps(preps)
up <- uniPrepNames(preps)
pr <- getPrep(preps)
la <- getLatent(preps)
la[ pr %in% up ]
}
## Latent variables for mesurements from identifyable preps
getLatentMulti <- function(preps) {
chkpreps(preps)
mp <- multiPrepNames(preps)
pr <- getPrep(preps)
la <- getLatent(preps)
la[ pr %in% mp ]
}
## All prep names
prepNames <- function(preps) {
chkpreps(preps)
sort(unique(getPrep(preps)))
}
## Names of non-identifyable preps
uniPrepNames <- function(preps) {
chkpreps(preps)
no <- noMeasPer(preps)
names(no)[no==1]
}
## Names of identifyable preps
multiPrepNames <- function(preps) {
chkpreps(preps)
no <- noMeasPer(preps)
names(no)[no!=1]
}
## Which latent variable for the given prep names
getLatentFromPrep <- function(prepnames) {
chkprepnames(prepnames)
sub("\\..*$", "", prepnames)
}
getLatentFromTechs <- function(techs) {
chktechs(techs)
sub("\\..*$", "", techs)
}
## Names of latent variables
latentNames <- function(preps) {
chkpreps(preps)
unique(getLatent(preps))
}
## Names of the measuments from non-identifyable preps
uniMeasNames <- function(preps) {
chkpreps(preps)
up <- uniPrepNames(preps)
preps[ getPrep(preps) %in% up ]
}
## Names of the measuments from identifyable preps
multiMeasNames <- function(preps) {
chkpreps(preps)
mp <- multiPrepNames(preps)
preps[ getPrep(preps) %in% mp ]
}
## Names of the rows and columns of the psi matrix
psiNames <- function(preps) {
latentNames <- latentNames(preps)
c(paste("L.", sep="", latentNames), paste("S.", sep="", latentNames))
}
|
/code/SCM/R/preps.R
|
permissive
|
carushi/mrna-prot
|
R
| false
| false
| 9,383
|
r
|
## Create in-silico prep names
getPrepsDef <- function(numLatent, numPrepsPer, numMeasPer) {
lat <- rep(paste("L", sep="", 1:numLatent), each=numPrepsPer*numMeasPer)
prep <- rep(paste("P", sep="", 1:numPrepsPer), each=numMeasPer, numLatent)
meas <- rep(paste("M", sep="", 1:numMeasPer), numLatent*numPrepsPer)
paste(sep=".", lat, prep, meas)
}
## Create preps to techs map
getTechsDef <- function(preps, noTechsPerLatent){
prep <- getPrep(preps)
names(prep) <- preps
uniquePrep <- unique(prep)
## There should be at least 3 observations for each technology
stopifnot(all(3*noTechsPerLatent <= summary(as.factor((getLatentFromPrep(uniquePrep))))))
techsDef <- rep("",length(preps))
names(techsDef) <- preps
numTechs <- noTechsPerLatent*noLatent(preps)
techs <- toupper(letters[1:numTechs])
latents <- getLatent(preps)
for(i in seq_along(unique(latents))){
l <- unique(latents)[i]
ltechs <- techs[(noTechsPerLatent*(i-1)+1) : (noTechsPerLatent*i) ]
prepL <- unique(prep[latents==l])
techVec <- rep(ltechs,length(prepL))
count <- 1
for(p in sample(prepL)){
techsDef[names(prep[prep==p])] <- paste(l,".",techVec[count],sep="")
count <- count+1
}
}
techsDef
}
## Check preps, we check that
chkpreps <- function(preps) {
## 1. they are ordered
stopifnot(!is.unsorted(preps))
## 2. unique
stopifnot(!any(duplicated(preps)))
## 3. have the right pattern: latent dot prep dot meas
stopifnot(all(grepl("^[^\\.]+\\.[^\\.]+\\.[^\\.]+$", preps)))
}
## techs is a named list, sorted
## names is "preps"
## values are the corresponding technology
chktechs <- function(techs){
## 1. they are ordered by prep
stopifnot(!is.unsorted(names(techs)))
## 2. tech names (the preps) are not duplicated
stopifnot(!any(duplicated(names(techs))))
## 3. have the right pattern: latent dot tech
## and names are latent.prep.meas
stopifnot(all(grepl("^[^\\.]+\\.[^\\.]+$", techs)))
stopifnot(all(grepl("^[^\\.]+\\.[^\\.]+\\.[^\\.]+$", names(techs))))
}
chkprepnames <- function(prepnames) {
## 1. they are ordered
stopifnot(!is.unsorted(prepnames))
## 2. unique
stopifnot(!any(duplicated(prepnames)))
## 3. have the right pattern: latent dot prep
stopifnot(all(grepl("^[^\\.]+\\.[^\\.]+$", prepnames)))
}
## Table of contents of functions to query preps
##
## 1 number of things, a single non-negative integer:
## --------------------------------------------------
## noMeas - number of measurements
## noMeasUni - number of measurements from non-identifyable preps
## noMeasMulti - number of measurements from identifyable preps
## noLatent - number of latent variables
## noPreps - number of _all_ prep variables, including the ones
## that cannot be identified
## noPrepsUni - number of prep variables that cannot be
## identified
## noPrepsMulti - number of prep variables that can be identified
##
## 2 number of things, grouped by other things
## -------------------------------------------
## noPrepsPer - number of _all_ preps per latent variables
## noMeasPerPer - number of measurements per preps per latent
## variables. This is a list of named integer
## vectors. Both the list and the vectors are
## ordered alphabetically. It contains _all_ preps.
## noMeasPer - number of measurements per preps. It contains _all_
## preps, in a named integer vector, ordered alphabetically.
##
## 3 query properties of the measuments
## ------------------------------------
## getPrep - which preps the different measurements belong to.
## a character vector, contains _all_ measurements.
## getLatent - which latent variables the measurements belong to.
## a character vector, contains _all_ measurements.
## getPrepUni - like getPrep, but only for measurements from
## non-identifyable preps
## getPrepMulti - like getPrep, but only for measurements from
## identifyable preps
## getLatentUni - like getLatent, but only for measurements from
## non-identifyable preps
## getLatentMulti - like getLatent, but only for measurements from
## identifyable preps
## getLLatentFromPrep - which latent variables the given preps belong to
## getLSatentFromPrep - which latent variables the given preps belong to
##
## 4 query names of preps and latent variables
## -------------------------------------------
## latentNames - names of latent variables, sorted alphabetically
## prepNames - _all_ prep names, sorted alphabetically.
## uniPrepNames - names of the non-identifiable preps
## multiPrepNames - names of the identifyable preps
## uniMeasNames - names of the measurements that belong to
## non-identifyable preps
## multiMeasNames - names of the measurement that belong to
## identifiable preps
## Total number of measurements
noMeas <- function(preps) {
chkpreps(preps)
length(preps)
}
## Number of measurements that belong to non-identifyable preps
noMeasUni <- function(preps) {
chkpreps(preps)
nos <- noMeasPer(preps)
sel <- names(nos)[nos==1]
pr <- getPrep(preps)
sum(pr %in% sel)
}
## Number of measuements that belong to identifyable preps
noMeasMulti <- function(preps) {
chkpreps(preps)
nos <- noMeasPer(preps)
sel <- names(nos)[nos!=1]
pr <- getPrep(preps)
sum(pr %in% sel)
}
## Number of latent variables
noLatent <- function(preps) {
chkpreps(preps)
length(unique(sapply(strsplit(preps, ".", fixed=TRUE), "[", 1)))
}
## Number of latent variables
noLatentFromTechs <- function(techs) {
chkpreps(techs)
length(unique(getLatentFromTechs(techs)))
}
## Number of technologies
noTechs <- function(techs){
chktechs(techs)
length(unique(techs))
}
## Number of _all_ preps
noPreps <- function(preps) {
chkpreps(preps)
pr <- sub("\\.[^\\.]+$", "", preps)
length(unique(pr))
}
## Number or real preps, i.e. preps with more than one measurement
noPrepsUni <- function(preps) {
chkpreps(preps)
nom <- noMeasPer(preps)
sum(nom==1)
}
## Number or real preps, i.e. preps with more than one measurement
noPrepsMulti <- function(preps) {
chkpreps(preps)
nom <- noMeasPer(preps)
sum(nom!=1)
}
## Number of preps per latent variable
noPrepsPer <- function(preps) {
chkpreps(preps)
lp <- strsplit(sub("\\.[^\\.]*$", "", preps), ".", fixed=TRUE)
tapply(sapply(lp, "[", 2), sapply(lp, "[", 1),
function(x) length(unique(x)))
}
## Number of measurements per preps per latent variables
noMeasPerPer <- function(preps) {
chkpreps(preps)
sp <- strsplit(preps, "\\.")
tapply(lapply(sp, "[", 2:3), sapply(sp, "[", 1), function(lp) {
tapply(sapply(lp, "[", 2), sapply(lp, "[", 1), function(x)
length(unique(x)))
}, simplify=FALSE)
}
## Number of measurements per preps
noMeasPer <- function(preps) {
chkpreps(preps)
pr <- sub("\\.[^\\.]+$", "", preps)
tab <- table(pr)
structure(as.vector(tab), names=dimnames(tab)[[1]])
}
## Prep ids of the measurements
getPrep <- function(preps) {
chkpreps(preps)
sub("\\.[^\\.]*$", "", preps)
}
## Latent variables of the measurements
getLatent <- function(preps) {
chkpreps(preps)
sub("\\..*$", "", preps)
}
## Prep ids of the measuments from non-identifyable preps
getPrepUni <- function(preps) {
chkpreps(preps)
up <- uniPrepNames(preps)
pr <- getPrep(preps)
pr[ pr %in% up ]
}
## Prep ids of the measurements from identifyable preps
getPrepMulti <- function(preps) {
chkpreps(preps)
mp <- multiPrepNames(preps)
pr <- getPrep(preps)
pr[ pr %in% mp ]
}
## Latent variables for measurements from non-identifyable preps
getLatentUni <- function(preps) {
chkpreps(preps)
up <- uniPrepNames(preps)
pr <- getPrep(preps)
la <- getLatent(preps)
la[ pr %in% up ]
}
## Latent variables for mesurements from identifyable preps
getLatentMulti <- function(preps) {
chkpreps(preps)
mp <- multiPrepNames(preps)
pr <- getPrep(preps)
la <- getLatent(preps)
la[ pr %in% mp ]
}
## All prep names
prepNames <- function(preps) {
chkpreps(preps)
sort(unique(getPrep(preps)))
}
## Names of non-identifyable preps
uniPrepNames <- function(preps) {
chkpreps(preps)
no <- noMeasPer(preps)
names(no)[no==1]
}
## Names of identifyable preps
multiPrepNames <- function(preps) {
chkpreps(preps)
no <- noMeasPer(preps)
names(no)[no!=1]
}
## Which latent variable for the given prep names
getLatentFromPrep <- function(prepnames) {
chkprepnames(prepnames)
sub("\\..*$", "", prepnames)
}
getLatentFromTechs <- function(techs) {
chktechs(techs)
sub("\\..*$", "", techs)
}
## Names of latent variables
latentNames <- function(preps) {
chkpreps(preps)
unique(getLatent(preps))
}
## Names of the measuments from non-identifyable preps
uniMeasNames <- function(preps) {
chkpreps(preps)
up <- uniPrepNames(preps)
preps[ getPrep(preps) %in% up ]
}
## Names of the measuments from identifyable preps
multiMeasNames <- function(preps) {
chkpreps(preps)
mp <- multiPrepNames(preps)
preps[ getPrep(preps) %in% mp ]
}
## Names of the rows and columns of the psi matrix
psiNames <- function(preps) {
latentNames <- latentNames(preps)
c(paste("L.", sep="", latentNames), paste("S.", sep="", latentNames))
}
|
library(git2r)
### Name: repository
### Title: Open a repository
### Aliases: repository
### ** Examples
## Not run:
##D ## Initialize a temporary repository
##D path <- tempfile(pattern="git2r-")
##D dir.create(path)
##D repo <- init(path)
##D
##D # Configure a user
##D config(repo, user.name="Alice", user.email="alice@example.org")
##D
##D ## Create a file, add and commit
##D writeLines("Hello world!", file.path(path, "test-1.txt"))
##D add(repo, 'test-1.txt')
##D commit_1 <- commit(repo, "Commit message")
##D
##D ## Make one more commit
##D writeLines(c("Hello world!", "HELLO WORLD!"), file.path(path, "test-1.txt"))
##D add(repo, 'test-1.txt')
##D commit(repo, "Next commit message")
##D
##D ## Create one more file
##D writeLines("Hello world!", file.path(path, "test-2.txt"))
##D
##D ## Brief summary of repository
##D repo
##D
##D ## Summary of repository
##D summary(repo)
##D
##D ## Workdir of repository
##D workdir(repo)
##D
##D ## Check if repository is bare
##D is_bare(repo)
##D
##D ## Check if repository is empty
##D is_empty(repo)
##D
##D ## Check if repository is a shallow clone
##D is_shallow(repo)
##D
##D ## List all references in repository
##D references(repo)
##D
##D ## List all branches in repository
##D branches(repo)
##D
##D ## Get HEAD of repository
##D repository_head(repo)
##D
##D ## Check if HEAD is head
##D is_head(repository_head(repo))
##D
##D ## Check if HEAD is local
##D is_local(repository_head(repo))
##D
##D ## List all tags in repository
##D tags(repo)
## End(Not run)
|
/data/genthat_extracted_code/git2r/examples/repository.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,546
|
r
|
library(git2r)
### Name: repository
### Title: Open a repository
### Aliases: repository
### ** Examples
## Not run:
##D ## Initialize a temporary repository
##D path <- tempfile(pattern="git2r-")
##D dir.create(path)
##D repo <- init(path)
##D
##D # Configure a user
##D config(repo, user.name="Alice", user.email="alice@example.org")
##D
##D ## Create a file, add and commit
##D writeLines("Hello world!", file.path(path, "test-1.txt"))
##D add(repo, 'test-1.txt')
##D commit_1 <- commit(repo, "Commit message")
##D
##D ## Make one more commit
##D writeLines(c("Hello world!", "HELLO WORLD!"), file.path(path, "test-1.txt"))
##D add(repo, 'test-1.txt')
##D commit(repo, "Next commit message")
##D
##D ## Create one more file
##D writeLines("Hello world!", file.path(path, "test-2.txt"))
##D
##D ## Brief summary of repository
##D repo
##D
##D ## Summary of repository
##D summary(repo)
##D
##D ## Workdir of repository
##D workdir(repo)
##D
##D ## Check if repository is bare
##D is_bare(repo)
##D
##D ## Check if repository is empty
##D is_empty(repo)
##D
##D ## Check if repository is a shallow clone
##D is_shallow(repo)
##D
##D ## List all references in repository
##D references(repo)
##D
##D ## List all branches in repository
##D branches(repo)
##D
##D ## Get HEAD of repository
##D repository_head(repo)
##D
##D ## Check if HEAD is head
##D is_head(repository_head(repo))
##D
##D ## Check if HEAD is local
##D is_local(repository_head(repo))
##D
##D ## List all tags in repository
##D tags(repo)
## End(Not run)
|
# This generates a preprocessing pipeline to handle categorical features
# @param task: the task
# @param impact.encoding.boundary: See autoxgboost
# @return CPOpipeline to transform categorical features
generateCatFeatPipeline = function(task, impact.encoding.boundary) {
cat.pipeline = cpoFixFactors()
d = getTaskData(task, target.extra = TRUE)$data
feat.cols = colnames(d)[vlapply(d, is.factor)]
#categ.featureset = task$feature.information$categ.featureset
#if (!is.null(categ.featureset)) {
# for(cf in categ.featureset)
# cat.pipeline %<>>% cpoFeatureHashing(affect.names = cf)
# feat.cols = setdiff(feat.cols, unlist(categ.featureset))
#}
impact.cols = colnames(d)[vlapply(d, function(x) is.factor(x) && nlevels(x) > impact.encoding.boundary)]
dummy.cols = setdiff(feat.cols, impact.cols)
if (length(dummy.cols) > 0L)
cat.pipeline %<>>% cpoDummyEncode(affect.names = dummy.cols, infixdot = TRUE)
if (length(impact.cols) > 0L) {
if (getTaskType(task) == "classif") {
cat.pipeline %<>>% cpoImpactEncodeClassif(affect.names = impact.cols)
} else {
cat.pipeline %<>>% cpoImpactEncodeRegr(affect.names = impact.cols)
}
}
return(cat.pipeline)
}
|
/R/generateCatFeatPipeline.R
|
no_license
|
peipeiwu1119/autoxgboost
|
R
| false
| false
| 1,214
|
r
|
# This generates a preprocessing pipeline to handle categorical features
# @param task: the task
# @param impact.encoding.boundary: See autoxgboost
# @return CPOpipeline to transform categorical features
generateCatFeatPipeline = function(task, impact.encoding.boundary) {
cat.pipeline = cpoFixFactors()
d = getTaskData(task, target.extra = TRUE)$data
feat.cols = colnames(d)[vlapply(d, is.factor)]
#categ.featureset = task$feature.information$categ.featureset
#if (!is.null(categ.featureset)) {
# for(cf in categ.featureset)
# cat.pipeline %<>>% cpoFeatureHashing(affect.names = cf)
# feat.cols = setdiff(feat.cols, unlist(categ.featureset))
#}
impact.cols = colnames(d)[vlapply(d, function(x) is.factor(x) && nlevels(x) > impact.encoding.boundary)]
dummy.cols = setdiff(feat.cols, impact.cols)
if (length(dummy.cols) > 0L)
cat.pipeline %<>>% cpoDummyEncode(affect.names = dummy.cols, infixdot = TRUE)
if (length(impact.cols) > 0L) {
if (getTaskType(task) == "classif") {
cat.pipeline %<>>% cpoImpactEncodeClassif(affect.names = impact.cols)
} else {
cat.pipeline %<>>% cpoImpactEncodeRegr(affect.names = impact.cols)
}
}
return(cat.pipeline)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulator.R
\name{compare_simulated_observed}
\alias{compare_simulated_observed}
\title{Compares simulated and observed variant allele counts}
\usage{
compare_simulated_observed(simulated, observed, depths)
}
\arguments{
\item{simulated}{the simulated variant allele counts}
\item{observed}{the observed variant allele counts}
\item{depths}{the total depths}
}
\description{
Compares simulated and observed variant allele counts
}
\keyword{internal}
|
/man/compare_simulated_observed.Rd
|
permissive
|
alkodsi/ctDNAtools
|
R
| false
| true
| 530
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulator.R
\name{compare_simulated_observed}
\alias{compare_simulated_observed}
\title{Compares simulated and observed variant allele counts}
\usage{
compare_simulated_observed(simulated, observed, depths)
}
\arguments{
\item{simulated}{the simulated variant allele counts}
\item{observed}{the observed variant allele counts}
\item{depths}{the total depths}
}
\description{
Compares simulated and observed variant allele counts
}
\keyword{internal}
|
# Load the "household_power_consumption.txt" dataset and make a
# histogram of the global active power, measured on 01/02/2007
# and 02/02/2007.
# This script should be run from the same directory as the dataset.
# Load the full dataset and retain only the data from 01/02/2007 and 02/02/2007
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";",
na.strings=c("NA","?"), nrows = 69516, stringsAsFactors=FALSE)
data <- data[data$Date=="1/2/2007"|data$Date=="2/2/2007",]
# Convert the "Date" and "Time" columns into Date/Time objects, stored in a new column
data$DateTime <- with(data, strptime(paste(Date,Time), format="%d/%m/%Y %H:%M:%S"))
# Initialize a png device, make the plot and close the device.
png("plot1.png", width=480, height=480, units="px")
hist(data$Global_active_power, col="red", xlab="Global Active Power (kilowatts)",
main="Global Active Power")
dev.off()
|
/plot1.R
|
no_license
|
AllaertF/ExData_Plotting1
|
R
| false
| false
| 927
|
r
|
# Load the "household_power_consumption.txt" dataset and make a
# histogram of the global active power, measured on 01/02/2007
# and 02/02/2007.
# This script should be run from the same directory as the dataset.
# Load the full dataset and retain only the data from 01/02/2007 and 02/02/2007
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";",
na.strings=c("NA","?"), nrows = 69516, stringsAsFactors=FALSE)
data <- data[data$Date=="1/2/2007"|data$Date=="2/2/2007",]
# Convert the "Date" and "Time" columns into Date/Time objects, stored in a new column
data$DateTime <- with(data, strptime(paste(Date,Time), format="%d/%m/%Y %H:%M:%S"))
# Initialize a png device, make the plot and close the device.
png("plot1.png", width=480, height=480, units="px")
hist(data$Global_active_power, col="red", xlab="Global Active Power (kilowatts)",
main="Global Active Power")
dev.off()
|
#******************************************Logistic Regression Case Study************************************************
setwd("E:/BA360/R/Proactive Attrition Management-Logistic Regression Case Study")
# Importing the data
mydata1<-read.csv("logistic.csv")
#****************************************Data Analysis***************************************
str(mydata1)
View(mydata1)
# excluding variables `CUSTOMER` & `CSA`
mydata= subset(mydata1,select = -c(CUSTOMER,CSA))
## Create user defined function for descriptive analysis
var_Summ=function(x){
if(class(x)=="numeric"){
Var_Type=class(x)
n<-length(x)
nmiss<-sum(is.na(x))
mean<-mean(x,na.rm=T)
std<-sd(x,na.rm=T)
var<-var(x,na.rm=T)
min<-min(x,na.rm=T)
p1<-quantile(x,0.01,na.rm=T)
p5<-quantile(x,0.05,na.rm=T)
p10<-quantile(x,0.1,na.rm=T)
q1<-quantile(x,0.25,na.rm=T)
q2<-quantile(x,0.5,na.rm=T)
q3<-quantile(x,0.75,na.rm=T)
p90<-quantile(x,0.9,na.rm=T)
p95<-quantile(x,0.95,na.rm=T)
p99<-quantile(x,0.99,na.rm=T)
max<-max(x,na.rm=T)
UC1=mean(x,na.rm=T)+3*sd(x,na.rm=T)
LC1=mean(x,na.rm=T)-3*sd(x,na.rm=T)
UC2=quantile(x,0.99,na.rm=T)
LC2=quantile(x,0.01,na.rm=T)
iqr=IQR(x,na.rm=T)
UC3=q3+1.5*iqr
LC3=q1-1.5*iqr
ot1<-max>UC1 | min<LC1
ot2<-max>UC2 | min<LC2
ot3<-max>UC3 | min<LC3
return(c(Var_Type=Var_Type, n=n,nmiss=nmiss,mean=mean,std=std,var=var,min=min,p1=p1,p5=p5,p10=p10,q1=q1,q2=q2,q3=q3,p90=p90,p95=p95,p99=p99,max=max,ot_m1=ot1,ot_m2=ot2,ot_m2=ot3))
}
else{
Var_Type=class(x)
n<-length(x)
nmiss<-sum(is.na(x))
fre<-table(x)
prop<-prop.table(table(x))
#x[is.na(x)]<-x[which.max(prop.table(table(x)))]
return(c(Var_Type=Var_Type, n=n,nmiss=nmiss,freq=fre,proportion=prop))
}
}
# Vector of numerical variables
num_var= sapply(mydata,is.numeric)
Other_var= !sapply(mydata,is.numeric)
View(Other_var)
# Applying above defined function on numerical variables
my_num_data<-t(data.frame(apply(mydata[num_var], 2, var_Summ)))
my_cat_data<-t(data.frame(apply(mydata[Other_var], 2, var_Summ)))
View(my_num_data)
View(my_cat_data)
write.csv(my_num_data, file = "num_data_summary.csv")
# Missing values
apply(is.na(mydata[,]),2,sum)
mydata <- mydata[!is.na(mydata$CHURN),]
# Missing Value Treatment
mydata[,num_var] <- apply(data.frame(mydata[,num_var]), 2, function(x){x <- replace(x, is.na(x), mean(x, na.rm=TRUE))})
mydata[,Other_var] <- apply(data.frame(mydata[,Other_var]), 2, function(x){x <- replace(x, is.na(x), which.max(prop.table(table(x))))})
# Outlier Treatment
M1_fun <- function(x){
quantiles <- quantile(x, c(.01, .99 ),na.rm=TRUE )
# Above line will calc the P1 and P99
x[x < quantiles[1] ] <- quantiles[1] # if value < P1, then P1
x[ x > quantiles[2] ] <- quantiles[2] # if value > P99, then P99
x
}
mydata[,num_var] <- apply(data.frame(mydata[,num_var]), 2, M1_fun)
TESTDATA <- t(data.frame(apply(mydata[num_var], 2, var_Summ)))
write.csv(TESTDATA, file = "TESTDATA.csv")
# Correlation matrix
corrm<- cor(mydata[,num_var]) ### CORRELATION MATRIX
View(corrm)
write.csv(corrm, file = "corrm1.csv")
#****************************************Feature Engineering **************************************************
# Selecting important categorical varibales using 'chisquare test'
freq_table <- table(mydata$CHURN, mydata$CHILDREN)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$CREDITA) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$CREDITAA) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$CREDITB) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$CREDITC) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$CREDITDE) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$CREDITGY)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$CREDITZ)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$PRIZMRUR) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$PRIZMUB) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$PRIZMTWN) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$REFURB) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$WEBCAP) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$OCCPROF)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$OCCCLER)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$OCCCRFT)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$OCCSTUD)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$OCCHMKR)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$OCCRET) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$OCCSELF)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$MARRYYES)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$MARRYNO) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$MAILORD) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$MAILRES) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$MAILFLAG)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$TRAVEL)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$PCOWN)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$NEWCELLY) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$NEWCELLN)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$TRUCK)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$RV)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$CREDITCD) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$INCOME) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$MCYCLE)
chisq.test(freq_table)
# Variable reduction using step wise regression
fitt <- step(lm(CHURN ~ REVENUE
+MOU
+RECCHRGE
+DIRECTAS
+OVERAGE
+ROAM
+CHANGEM
+CHANGER
+DROPVCE
+BLCKVCE
+UNANSVCE
+CUSTCARE
+THREEWAY
+MOUREC
+OUTCALLS
+INCALLS
+PEAKVCE
+OPEAKVCE
+DROPBLK
+CALLFWDV
+CALLWAIT
+MONTHS
+UNIQSUBS
+ACTVSUBS
+PHONES
+MODELS
+EQPDAYS
+AGE1
+AGE2
+CREDITA
+CREDITAA
+CREDITB
+CREDITC
+CREDITDE
+PRIZMRUR
+PRIZMUB
+PRIZMTWN
+REFURB
+WEBCAP
+OCCRET
+MARRYNO
+MAILORD
+MAILRES
+NEWCELLY
+CREDITCD
+INCOME, data = mydata), direction = "both")
summary(fitt)
# Transformed Variables
mydata$root_MOU <- sqrt(mydata$MOU)
mydata$root_EQPDAYS<- round(sqrt(mydata$EQPDAYS))
mydata$root_OVERAGE <- sqrt(mydata$OVERAGE)
# Dividing dataset into "training" and "testing"
testing<- mydata[(mydata$CHURNDEP==0.5),]
training <- mydata[(mydata$CHURNDEP!=0.5),]
testing$CHURNDEP <- NULL
training$CHURNDEP<- NULL
nrow(training)
nrow(testing)
#********************************************** Model Building *********************************************************
# Building Models for "training" dataset
fit<-glm(CHURN ~ REVENUE + root_MOU + RECCHRGE + root_OVERAGE + ROAM +
CHANGEM + CHANGER + DROPVCE + CUSTCARE + THREEWAY +
INCALLS + PEAKVCE + OPEAKVCE + DROPBLK + CALLWAIT + MONTHS + UNIQSUBS +
ACTVSUBS + PHONES + root_EQPDAYS + AGE1 + CREDITAA +
CREDITB + CREDITC + CREDITDE + PRIZMRUR + PRIZMUB +
REFURB + WEBCAP + MARRYNO + MAILRES + NEWCELLY ,data = training,
family = binomial(logit))
# Output of Logistic Regression
summary(fit)
ls(fit)
fit$model
coeff<-fit$coef #Coefficients of model
write.csv(coeff, "coeff.csv")
# Multicollinierity Checking using VIF
library(car)
asd <- as.matrix(vif(fit))
write.csv(asd, "vif1.csv")
# Concordance checking
source("Concordance.R")
Concordance(fit) ## concordance- 0.6221
# Running Stepwise regression
step1=step(fit, direction = "both")
summary(step1)
# Final Model
fit2<-glm(CHURN ~ REVENUE + root_MOU + RECCHRGE + root_OVERAGE + ROAM +
CHANGEM + CHANGER + DROPVCE + THREEWAY +
INCALLS + PEAKVCE + OPEAKVCE + DROPBLK + MONTHS + UNIQSUBS +
ACTVSUBS + PHONES + root_EQPDAYS + AGE1 +
CREDITB + CREDITC + CREDITDE + PRIZMUB +
REFURB + WEBCAP + MARRYNO + MAILRES + NEWCELLY,data = training,
family = binomial(logit))
summary(fit2)
source("Concordance.R")
Concordance(fit2) ## concordance- 0.62175
# Multicollinierity Checking using VIF
coeff<-fit2$coef #Coefficients of model
write.csv(coeff, "coeff2.csv")
library(car)
asd2 <- as.matrix(vif(fit2))
write.csv(asd2, "vif2.csv")
# Running anova
anova(fit2,fit, test = 'Chisq')
# Writing model coefficients
write.csv(fit2$coefficients,"Final_model_coeff.csv")
# Getting the standardized beta coefficients
install.packages("QuantPsyc")
library(QuantPsyc)
stb= data.frame(lm.beta(fit2))
View(stb)
#*************************************VALIDATION ******************************************
#Decile Scoring
## Training dataset
train1<- cbind(training, Prob=predict(fit2, type="response"))
View(train1)
##Creating Deciles
decLocations <- quantile(train1$Prob, probs = seq(0.1,0.9,by=0.1))
train1$decile <- findInterval(train1$Prob,c(-Inf,decLocations, Inf))
View(train1)
require(dplyr)
train1$decile<-factor(train1$decile)
decile_grp<-group_by(train1,decile)
decile_summ_train<-summarize(decile_grp, total_cnt=n(), min_prob=min(p=Prob), max_prob=max(Prob), CHURN_cnt=sum(CHURN),
non_CHURN_cnt=total_cnt -CHURN_cnt )
decile_summ_train<-arrange(decile_summ_train, desc(decile))
View(decile_summ_train)
write.csv(decile_summ_train,"fit_train_DA1.csv",row.names = F)
##Testing dataset
test1<- cbind(testing, Prob=predict(fit2,testing, type="response"))
View(test1)
##Creating Deciles
decLocations <- quantile(test1$Prob, probs = seq(0.1,0.9,by=0.1))
test1$decile <- findInterval(test1$Prob,c(-Inf,decLocations, Inf))
names(test1)
test1$decile<-factor(test1$decile)
decile_grp<-group_by(test1,decile)
decile_summ_test<-summarize(decile_grp, total_cnt=n(), min_prob=min(p=Prob), max_prob=max(Prob), CHURN_cnt=sum(CHURN),
non_CHURN_cnt=total_cnt -CHURN_cnt )
decile_summ_test<-arrange(decile_summ_test, desc(decile))
View(decile_summ_test)
write.csv(decile_summ_test,"fit_test_DA1.csv",row.names = F)
#**************************************************************************************************************************
|
/Final_Code.R
|
no_license
|
vikas1296/Customer-Churn-
|
R
| false
| false
| 11,709
|
r
|
#******************************************Logistic Regression Case Study************************************************
setwd("E:/BA360/R/Proactive Attrition Management-Logistic Regression Case Study")
# Importing the data
mydata1<-read.csv("logistic.csv")
#****************************************Data Analysis***************************************
str(mydata1)
View(mydata1)
# excluding variables `CUSTOMER` & `CSA`
mydata= subset(mydata1,select = -c(CUSTOMER,CSA))
## Create user defined function for descriptive analysis
var_Summ=function(x){
if(class(x)=="numeric"){
Var_Type=class(x)
n<-length(x)
nmiss<-sum(is.na(x))
mean<-mean(x,na.rm=T)
std<-sd(x,na.rm=T)
var<-var(x,na.rm=T)
min<-min(x,na.rm=T)
p1<-quantile(x,0.01,na.rm=T)
p5<-quantile(x,0.05,na.rm=T)
p10<-quantile(x,0.1,na.rm=T)
q1<-quantile(x,0.25,na.rm=T)
q2<-quantile(x,0.5,na.rm=T)
q3<-quantile(x,0.75,na.rm=T)
p90<-quantile(x,0.9,na.rm=T)
p95<-quantile(x,0.95,na.rm=T)
p99<-quantile(x,0.99,na.rm=T)
max<-max(x,na.rm=T)
UC1=mean(x,na.rm=T)+3*sd(x,na.rm=T)
LC1=mean(x,na.rm=T)-3*sd(x,na.rm=T)
UC2=quantile(x,0.99,na.rm=T)
LC2=quantile(x,0.01,na.rm=T)
iqr=IQR(x,na.rm=T)
UC3=q3+1.5*iqr
LC3=q1-1.5*iqr
ot1<-max>UC1 | min<LC1
ot2<-max>UC2 | min<LC2
ot3<-max>UC3 | min<LC3
return(c(Var_Type=Var_Type, n=n,nmiss=nmiss,mean=mean,std=std,var=var,min=min,p1=p1,p5=p5,p10=p10,q1=q1,q2=q2,q3=q3,p90=p90,p95=p95,p99=p99,max=max,ot_m1=ot1,ot_m2=ot2,ot_m2=ot3))
}
else{
Var_Type=class(x)
n<-length(x)
nmiss<-sum(is.na(x))
fre<-table(x)
prop<-prop.table(table(x))
#x[is.na(x)]<-x[which.max(prop.table(table(x)))]
return(c(Var_Type=Var_Type, n=n,nmiss=nmiss,freq=fre,proportion=prop))
}
}
# Vector of numerical variables
num_var= sapply(mydata,is.numeric)
Other_var= !sapply(mydata,is.numeric)
View(Other_var)
# Applying above defined function on numerical variables
my_num_data<-t(data.frame(apply(mydata[num_var], 2, var_Summ)))
my_cat_data<-t(data.frame(apply(mydata[Other_var], 2, var_Summ)))
View(my_num_data)
View(my_cat_data)
write.csv(my_num_data, file = "num_data_summary.csv")
# Missing values
apply(is.na(mydata[,]),2,sum)
mydata <- mydata[!is.na(mydata$CHURN),]
# Missing Value Treatment
mydata[,num_var] <- apply(data.frame(mydata[,num_var]), 2, function(x){x <- replace(x, is.na(x), mean(x, na.rm=TRUE))})
mydata[,Other_var] <- apply(data.frame(mydata[,Other_var]), 2, function(x){x <- replace(x, is.na(x), which.max(prop.table(table(x))))})
# Outlier Treatment
M1_fun <- function(x){
quantiles <- quantile(x, c(.01, .99 ),na.rm=TRUE )
# Above line will calc the P1 and P99
x[x < quantiles[1] ] <- quantiles[1] # if value < P1, then P1
x[ x > quantiles[2] ] <- quantiles[2] # if value > P99, then P99
x
}
mydata[,num_var] <- apply(data.frame(mydata[,num_var]), 2, M1_fun)
TESTDATA <- t(data.frame(apply(mydata[num_var], 2, var_Summ)))
write.csv(TESTDATA, file = "TESTDATA.csv")
# Correlation matrix
corrm<- cor(mydata[,num_var]) ### CORRELATION MATRIX
View(corrm)
write.csv(corrm, file = "corrm1.csv")
#****************************************Feature Engineering **************************************************
# Selecting important categorical varibales using 'chisquare test'
freq_table <- table(mydata$CHURN, mydata$CHILDREN)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$CREDITA) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$CREDITAA) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$CREDITB) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$CREDITC) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$CREDITDE) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$CREDITGY)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$CREDITZ)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$PRIZMRUR) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$PRIZMUB) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$PRIZMTWN) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$REFURB) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$WEBCAP) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$OCCPROF)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$OCCCLER)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$OCCCRFT)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$OCCSTUD)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$OCCHMKR)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$OCCRET) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$OCCSELF)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$MARRYYES)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$MARRYNO) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$MAILORD) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$MAILRES) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$MAILFLAG)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$TRAVEL)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$PCOWN)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$NEWCELLY) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$NEWCELLN)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$TRUCK)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$RV)
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$CREDITCD) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$INCOME) #significant
chisq.test(freq_table)
freq_table <- table(mydata$CHURN, mydata$MCYCLE)
chisq.test(freq_table)
# Variable reduction using step wise regression
fitt <- step(lm(CHURN ~ REVENUE
+MOU
+RECCHRGE
+DIRECTAS
+OVERAGE
+ROAM
+CHANGEM
+CHANGER
+DROPVCE
+BLCKVCE
+UNANSVCE
+CUSTCARE
+THREEWAY
+MOUREC
+OUTCALLS
+INCALLS
+PEAKVCE
+OPEAKVCE
+DROPBLK
+CALLFWDV
+CALLWAIT
+MONTHS
+UNIQSUBS
+ACTVSUBS
+PHONES
+MODELS
+EQPDAYS
+AGE1
+AGE2
+CREDITA
+CREDITAA
+CREDITB
+CREDITC
+CREDITDE
+PRIZMRUR
+PRIZMUB
+PRIZMTWN
+REFURB
+WEBCAP
+OCCRET
+MARRYNO
+MAILORD
+MAILRES
+NEWCELLY
+CREDITCD
+INCOME, data = mydata), direction = "both")
summary(fitt)
# Transformed Variables
mydata$root_MOU <- sqrt(mydata$MOU)
mydata$root_EQPDAYS<- round(sqrt(mydata$EQPDAYS))
mydata$root_OVERAGE <- sqrt(mydata$OVERAGE)
# Dividing dataset into "training" and "testing"
testing<- mydata[(mydata$CHURNDEP==0.5),]
training <- mydata[(mydata$CHURNDEP!=0.5),]
testing$CHURNDEP <- NULL
training$CHURNDEP<- NULL
nrow(training)
nrow(testing)
#********************************************** Model Building *********************************************************
# Building Models for "training" dataset
fit<-glm(CHURN ~ REVENUE + root_MOU + RECCHRGE + root_OVERAGE + ROAM +
CHANGEM + CHANGER + DROPVCE + CUSTCARE + THREEWAY +
INCALLS + PEAKVCE + OPEAKVCE + DROPBLK + CALLWAIT + MONTHS + UNIQSUBS +
ACTVSUBS + PHONES + root_EQPDAYS + AGE1 + CREDITAA +
CREDITB + CREDITC + CREDITDE + PRIZMRUR + PRIZMUB +
REFURB + WEBCAP + MARRYNO + MAILRES + NEWCELLY ,data = training,
family = binomial(logit))
# Output of Logistic Regression
summary(fit)
ls(fit)
fit$model
coeff<-fit$coef #Coefficients of model
write.csv(coeff, "coeff.csv")
# Multicollinierity Checking using VIF
library(car)
asd <- as.matrix(vif(fit))
write.csv(asd, "vif1.csv")
# Concordance checking
source("Concordance.R")
Concordance(fit) ## concordance- 0.6221
# Running Stepwise regression
step1=step(fit, direction = "both")
summary(step1)
# Final Model
fit2<-glm(CHURN ~ REVENUE + root_MOU + RECCHRGE + root_OVERAGE + ROAM +
CHANGEM + CHANGER + DROPVCE + THREEWAY +
INCALLS + PEAKVCE + OPEAKVCE + DROPBLK + MONTHS + UNIQSUBS +
ACTVSUBS + PHONES + root_EQPDAYS + AGE1 +
CREDITB + CREDITC + CREDITDE + PRIZMUB +
REFURB + WEBCAP + MARRYNO + MAILRES + NEWCELLY,data = training,
family = binomial(logit))
summary(fit2)
source("Concordance.R")
Concordance(fit2) ## concordance- 0.62175
# Multicollinierity Checking using VIF
coeff<-fit2$coef #Coefficients of model
write.csv(coeff, "coeff2.csv")
library(car)
asd2 <- as.matrix(vif(fit2))
write.csv(asd2, "vif2.csv")
# Running anova
anova(fit2,fit, test = 'Chisq')
# Writing model coefficients
write.csv(fit2$coefficients,"Final_model_coeff.csv")
# Getting the standardized beta coefficients
install.packages("QuantPsyc")
library(QuantPsyc)
stb= data.frame(lm.beta(fit2))
View(stb)
#*************************************VALIDATION ******************************************
#Decile Scoring
## Training dataset
train1<- cbind(training, Prob=predict(fit2, type="response"))
View(train1)
##Creating Deciles
decLocations <- quantile(train1$Prob, probs = seq(0.1,0.9,by=0.1))
train1$decile <- findInterval(train1$Prob,c(-Inf,decLocations, Inf))
View(train1)
require(dplyr)
train1$decile<-factor(train1$decile)
decile_grp<-group_by(train1,decile)
decile_summ_train<-summarize(decile_grp, total_cnt=n(), min_prob=min(p=Prob), max_prob=max(Prob), CHURN_cnt=sum(CHURN),
non_CHURN_cnt=total_cnt -CHURN_cnt )
decile_summ_train<-arrange(decile_summ_train, desc(decile))
View(decile_summ_train)
write.csv(decile_summ_train,"fit_train_DA1.csv",row.names = F)
##Testing dataset
test1<- cbind(testing, Prob=predict(fit2,testing, type="response"))
View(test1)
##Creating Deciles
decLocations <- quantile(test1$Prob, probs = seq(0.1,0.9,by=0.1))
test1$decile <- findInterval(test1$Prob,c(-Inf,decLocations, Inf))
names(test1)
test1$decile<-factor(test1$decile)
decile_grp<-group_by(test1,decile)
decile_summ_test<-summarize(decile_grp, total_cnt=n(), min_prob=min(p=Prob), max_prob=max(Prob), CHURN_cnt=sum(CHURN),
non_CHURN_cnt=total_cnt -CHURN_cnt )
decile_summ_test<-arrange(decile_summ_test, desc(decile))
View(decile_summ_test)
write.csv(decile_summ_test,"fit_test_DA1.csv",row.names = F)
#**************************************************************************************************************************
|
################################################################################
## ui ##
################################################################################
library(shiny)
shinyUI(
pageWithSidebar(
# Application Title
headerPanel("Simple Model for MPG Prediction"),
sidebarPanel(
p("This application allows the user to obtain a gas
consumption prediction based on a basic set of features."),
h4("Model Specification"),
p("The predictions are based on a linear regression model
specified with transmission type, horsepower, weight, and the
interactions between horsepower and transmission, and weight
and transmission."),
h4("Power:"),
p("Enter a number of horsepower for the vehicle."),
numericInput("hp", "Gross Horsepower", 150, min=50, max=500, step=5),
h4("Weight:"),
p("Enter the weight of the vehicle in pounds."),
numericInput("wt", "Weight (lb)", 3200, min=1000, max=8000, step=100),
h4("Transmission type:"),
p("Select the type of transmission."),
radioButtons("trans", "Transmission",
c("Automatic" = "Auto", "Manual" = "Manual")),
submitButton("Predict MPG for this Car!")
),
mainPanel(
h3("Results of prediction"),
h4("Based on the input you entered:"),
verbatimTextOutput("prediction"),
h4("Mileage per Gallon comparison"),
p("The following plot shows the distribution of MPG based on the
data from the 1974 Motor Trend US magazine. The dashed black
line corresponds to the mean MPG for the 32 cars in the dataset,
and the red line corresponds to the prediction based on the
input you entered."),
plotOutput("newHist")
)
))
|
/ui.R
|
no_license
|
cpatinof/mpgPredictiveModel
|
R
| false
| false
| 2,166
|
r
|
################################################################################
## ui ##
################################################################################
library(shiny)
shinyUI(
pageWithSidebar(
# Application Title
headerPanel("Simple Model for MPG Prediction"),
sidebarPanel(
p("This application allows the user to obtain a gas
consumption prediction based on a basic set of features."),
h4("Model Specification"),
p("The predictions are based on a linear regression model
specified with transmission type, horsepower, weight, and the
interactions between horsepower and transmission, and weight
and transmission."),
h4("Power:"),
p("Enter a number of horsepower for the vehicle."),
numericInput("hp", "Gross Horsepower", 150, min=50, max=500, step=5),
h4("Weight:"),
p("Enter the weight of the vehicle in pounds."),
numericInput("wt", "Weight (lb)", 3200, min=1000, max=8000, step=100),
h4("Transmission type:"),
p("Select the type of transmission."),
radioButtons("trans", "Transmission",
c("Automatic" = "Auto", "Manual" = "Manual")),
submitButton("Predict MPG for this Car!")
),
mainPanel(
h3("Results of prediction"),
h4("Based on the input you entered:"),
verbatimTextOutput("prediction"),
h4("Mileage per Gallon comparison"),
p("The following plot shows the distribution of MPG based on the
data from the 1974 Motor Trend US magazine. The dashed black
line corresponds to the mean MPG for the 32 cars in the dataset,
and the red line corresponds to the prediction based on the
input you entered."),
plotOutput("newHist")
)
))
|
library(httr)
# 1. Find OAuth settings for github:
# http://developer.github.com/v3/oauth/
oauth_endpoints("github")
# 2. To make your own application, register at at
# https://github.com/settings/applications. Use any URL for the homepage URL
# (http://github.com is fine) and http://localhost:1410 as the callback url
#
# Replace your key and secret below.
myapp <- oauth_app("courseraquiz",
key = "25bf4b6d9c471aaaa0fb",
secret = "d744b4109da07a4b28c32a07ce0998e01d7bdc0e")
# 3. Get OAuth credentials
github_token <- oauth2.0_token(oauth_endpoints("github"), myapp)
# 4. Use API
gtoken <- config(token = github_token)
req <- GET("https://api.github.com/rate_limit", gtoken)
stop_for_status(req)
content(req)
|
/cleaning_data/week2/quiz.R
|
no_license
|
tamimcsedu19/datasciencecoursera
|
R
| false
| false
| 766
|
r
|
library(httr)
# 1. Find OAuth settings for github:
# http://developer.github.com/v3/oauth/
oauth_endpoints("github")
# 2. To make your own application, register at at
# https://github.com/settings/applications. Use any URL for the homepage URL
# (http://github.com is fine) and http://localhost:1410 as the callback url
#
# Replace your key and secret below.
myapp <- oauth_app("courseraquiz",
key = "25bf4b6d9c471aaaa0fb",
secret = "d744b4109da07a4b28c32a07ce0998e01d7bdc0e")
# 3. Get OAuth credentials
github_token <- oauth2.0_token(oauth_endpoints("github"), myapp)
# 4. Use API
gtoken <- config(token = github_token)
req <- GET("https://api.github.com/rate_limit", gtoken)
stop_for_status(req)
content(req)
|
library(XML)
source("xmlFaster.R")
nps <- xmlParse("NPS_Results.xml")
system.time(data3 <- xmlToDF(nps,xpath = "/TABLE/NPS_RESULTS" ))
data3$Created <- strptime(data3$Created, "%Y-%m-%dT%H:%M:%S")
View(data3)
|
/Random Statistical Analysis/NPS.R
|
permissive
|
dmpe/R
|
R
| false
| false
| 210
|
r
|
library(XML)
source("xmlFaster.R")
nps <- xmlParse("NPS_Results.xml")
system.time(data3 <- xmlToDF(nps,xpath = "/TABLE/NPS_RESULTS" ))
data3$Created <- strptime(data3$Created, "%Y-%m-%dT%H:%M:%S")
View(data3)
|
library(dplyr)
library(tidyverse)
library(mapdata)
library(maps)
library(RColorBrewer)
library(gganimate)
gt <- read_csv("data/database.csv")
worldmap <- map_data("world")
newworld <- worldmap %>%
filter(region != "Antarctica")
newworld$region <- recode(newworld$region
,'USA' = 'United States'
,'UK' = 'United Kingdom'
)
world <- ggplot() +
geom_polygon(data = newworld, aes(x = long, y = lat, group = group), fill = "grey", color = "#4e4d47") +
coord_quickmap() +
theme_void()
gtclean <- gt %>%
filter(crit1 == 1, crit2 == 1, nkill > 0) %>%
group_by(country_txt, iyear, nkill) %>%
summarise(
count = n(),
) %>%
mutate(
killed = nkill * count
) %>%
group_by(country_txt, iyear) %>%
summarise(killed = sum(killed))
#from graph 2 text
skilled2 <- gtclean %>%
group_by(country_txt) %>%
summarise(killed = sum(killed)) %>%
filter(killed > 11000) %>%
arrange(desc(killed))
ggplot(skilled2, aes(reorder(country_txt, -killed), weight=killed)) +
geom_bar(width=0.75) +
labs(title = "Number of suicides per 100.000 people ", x="State", y="Suicides") +
theme_minimal() +
theme(plot.title = element_text(size = 14, face = "bold"), axis.title = element_text(size = 8),
axis.title.y = element_text(margin=margin(t = 0, r = 10, b = 0, l = 0)),
axis.title.x = element_text(margin=margin(t = 10, r = 0, b = 0, l = 0)))
#for geom_line
skilled <- gtclean %>%
group_by(iyear) %>%
summarise(killed = sum(killed))
grouped <- inner_join(newworld, gtclean, by = c('region' = 'country_txt')) %>%
filter(region != "Antarctica")
myPalette <- colorRampPalette(rev(brewer.pal(6, "OrRd")))
map <- world +
geom_polygon(data = grouped, aes(x = long, y = lat, group = group, fill = killed, frame = iyear), color = "#4e4d47") +
coord_quickmap() +
scale_fill_gradientn(colours = rev(myPalette(5)),
na.value="#4e4d47",
breaks = c(1, 10, 50, 200, 1000, 8000),
trans = "log10",
name = "People Killed",
guide = guide_legend(keyheight = unit(2, units = "mm"), keywidth=unit(6, units = "mm"),
label.position = "bottom", title.position = 'top', nrow=1)) +
theme_void() +
theme(plot.title = element_text(size = 14, hjust = 0.05, face = "bold", color = "#4e4d47"),
plot.caption = element_text(size = 10, hjust = 0.97, vjust = 1.2, color = "#4e4d47"),
legend.position = c(0.11, 0.01),
plot.background = element_rect(fill = "#f5f5f2", color = NA)) +
scale_colour_brewer(palette = "Set1") +
labs(title = "Number of People Who Died of Terrorist Attacks in",
caption="Source: start.umd.edu | By Martin Stepanek")
gganimate(map, ani.width = 900, ani.height = 500)
ggplot(skilled, aes(x=iyear, y=killed)) +
geom_line(color = rev(myPalette(1))) +
scale_x_continuous(breaks = seq(1970, 2016, 2)) +
labs(y="Killed", title="Terrorist Attacks from 1970 to 2016", x="Year") +
theme_minimal() +
theme(plot.title = element_text(size = 14, face = "bold", color = "#4e4d47"),
axis.title = element_text(size = 8, color = "#4e4d47"),
axis.title.y = element_text(margin=margin(t = 0, r = 10, b = 0, l = 0)),
axis.title.x = element_text(margin=margin(t = 10, r = 0, b = 0, l = 0)),
plot.background = element_rect(fill = "#f5f5f2", color = NA))
#scatter-plot
gtscatter1 <- gt %>%
filter(crit1 == 1, crit2 == 1, nkill > 0) %>%
group_by(region_txt, iyear, nkill) %>%
summarise(
count = n(),
) %>%
mutate(
killed = nkill * count
) %>%
group_by(region_txt, iyear) %>%
summarise(killed = sum(killed)) %>%
mutate(
id = paste(region_txt, iyear, sep="")
)
gtscatter2 <- gt %>%
filter(crit1 == 1, crit2 == 1, nkill > 0) %>%
group_by(region_txt, iyear) %>%
summarise(
count = n(),
) %>%
mutate(
id = paste(region_txt, iyear, sep="")
)
scattergrouped <- inner_join(gtscatter1, gtscatter2, by="id") %>%
select("region_txt.x", "iyear.x", "killed", "count") %>%
mutate(ratio = killed / count) %>%
filter(region_txt.x == "Middle East & North Africa")
scattergrouped2 <- inner_join(gtscatter1, gtscatter2, by="id") %>%
select("region_txt.x", "iyear.x", "killed", "count") %>%
mutate(ratio = killed / count) %>%
filter(region_txt.x == "Sub-Saharan Africa")
scattergrouped3 <- inner_join(gtscatter1, gtscatter2, by="id") %>%
select("region_txt.x", "iyear.x", "killed", "count") %>%
mutate(ratio = killed / count) %>%
filter(region_txt.x == "South Asia")
scattergrouped4 <- inner_join(gtscatter1, gtscatter2, by="id") %>%
select("region_txt.x", "iyear.x", "killed", "count") %>%
mutate(ratio = killed / count) %>%
filter(region_txt.x == "North America")
scattergrouped5 <- inner_join(gtscatter1, gtscatter2, by="id") %>%
select("region_txt.x", "iyear.x", "killed", "count") %>%
mutate(ratio = killed / count) %>%
filter(region_txt.x == "Western Europe")
ggplot() +
geom_line(data = skilled, aes(x=iyear, y=killed, group = 1, color = "#B30000")) +
geom_line(data = scattergrouped, aes(x = iyear.x, y = killed, group =1, color = "#e6550d")) +
geom_line(data = scattergrouped2, aes(x = iyear.x, y = killed, group =1, color = "#FDD49E")) +
geom_line(data = scattergrouped3, aes(x = iyear.x, y = killed, group =1, color = "#FDBB84")) +
geom_line(data = scattergrouped4, aes(x = iyear.x, y = killed, group =1, color = "black")) +
geom_line(data = scattergrouped5, aes(x = iyear.x, y = killed, group =1, color = "grey")) +
scale_x_continuous(breaks = seq(1970, 2016, 2)) +
labs(y="Killed", title="Terrorist Attacks from 1970 to 2016", x="Year") +
theme_minimal() +
theme(plot.title = element_text(size = 14, face = "bold", color = "#4e4d47"),
axis.title = element_text(size = 8, color = "#4e4d47"),
axis.title.y = element_text(margin=margin(t = 0, r = 10, b = 0, l = 0)),
axis.title.x = element_text(margin=margin(t = 10, r = 0, b = 0, l = 0)),
plot.background = element_rect(fill = "#f5f5f2", color = NA),
legend.position = "top",
legend.direction = "horizontal",
legend.justification = c(-0.01,0)) +
scale_colour_manual(name = '',
values =c('#B30000'='#B30000','#e6550d'='#e6550d', "#FDD49E" = "#FDD49E",
"#FDBB84" = "#FDBB84", "black" = "black", "grey" = "grey"),
labels = c('Total','Middle East & North Africa', "Sub-Saharan Africa",
"South Asia","North America","Western Europe"))
geom_line(data = alcohol, aes(x = Year, y = count, group = 1), color = "red")
ggplot(scattergrouped, aes(reorder(region_txt.x, -ratio), weight=ratio)) +
geom_bar(width=0.75) +
labs(title = "Number of suicides per 100.000 people ", x="State", y="Suicides") +
theme_minimal() +
theme(plot.title = element_text(size = 14, face = "bold"), axis.title = element_text(size = 8),
axis.title.y = element_text(margin=margin(t = 0, r = 10, b = 0, l = 0)),
axis.title.x = element_text(margin=margin(t = 10, r = 0, b = 0, l = 0)))
|
/terrorism.R
|
no_license
|
stepanekm/global-terrorism
|
R
| false
| false
| 7,232
|
r
|
library(dplyr)
library(tidyverse)
library(mapdata)
library(maps)
library(RColorBrewer)
library(gganimate)
gt <- read_csv("data/database.csv")
worldmap <- map_data("world")
newworld <- worldmap %>%
filter(region != "Antarctica")
newworld$region <- recode(newworld$region
,'USA' = 'United States'
,'UK' = 'United Kingdom'
)
world <- ggplot() +
geom_polygon(data = newworld, aes(x = long, y = lat, group = group), fill = "grey", color = "#4e4d47") +
coord_quickmap() +
theme_void()
gtclean <- gt %>%
filter(crit1 == 1, crit2 == 1, nkill > 0) %>%
group_by(country_txt, iyear, nkill) %>%
summarise(
count = n(),
) %>%
mutate(
killed = nkill * count
) %>%
group_by(country_txt, iyear) %>%
summarise(killed = sum(killed))
#from graph 2 text
skilled2 <- gtclean %>%
group_by(country_txt) %>%
summarise(killed = sum(killed)) %>%
filter(killed > 11000) %>%
arrange(desc(killed))
ggplot(skilled2, aes(reorder(country_txt, -killed), weight=killed)) +
geom_bar(width=0.75) +
labs(title = "Number of suicides per 100.000 people ", x="State", y="Suicides") +
theme_minimal() +
theme(plot.title = element_text(size = 14, face = "bold"), axis.title = element_text(size = 8),
axis.title.y = element_text(margin=margin(t = 0, r = 10, b = 0, l = 0)),
axis.title.x = element_text(margin=margin(t = 10, r = 0, b = 0, l = 0)))
#for geom_line
skilled <- gtclean %>%
group_by(iyear) %>%
summarise(killed = sum(killed))
grouped <- inner_join(newworld, gtclean, by = c('region' = 'country_txt')) %>%
filter(region != "Antarctica")
myPalette <- colorRampPalette(rev(brewer.pal(6, "OrRd")))
map <- world +
geom_polygon(data = grouped, aes(x = long, y = lat, group = group, fill = killed, frame = iyear), color = "#4e4d47") +
coord_quickmap() +
scale_fill_gradientn(colours = rev(myPalette(5)),
na.value="#4e4d47",
breaks = c(1, 10, 50, 200, 1000, 8000),
trans = "log10",
name = "People Killed",
guide = guide_legend(keyheight = unit(2, units = "mm"), keywidth=unit(6, units = "mm"),
label.position = "bottom", title.position = 'top', nrow=1)) +
theme_void() +
theme(plot.title = element_text(size = 14, hjust = 0.05, face = "bold", color = "#4e4d47"),
plot.caption = element_text(size = 10, hjust = 0.97, vjust = 1.2, color = "#4e4d47"),
legend.position = c(0.11, 0.01),
plot.background = element_rect(fill = "#f5f5f2", color = NA)) +
scale_colour_brewer(palette = "Set1") +
labs(title = "Number of People Who Died of Terrorist Attacks in",
caption="Source: start.umd.edu | By Martin Stepanek")
gganimate(map, ani.width = 900, ani.height = 500)
ggplot(skilled, aes(x=iyear, y=killed)) +
geom_line(color = rev(myPalette(1))) +
scale_x_continuous(breaks = seq(1970, 2016, 2)) +
labs(y="Killed", title="Terrorist Attacks from 1970 to 2016", x="Year") +
theme_minimal() +
theme(plot.title = element_text(size = 14, face = "bold", color = "#4e4d47"),
axis.title = element_text(size = 8, color = "#4e4d47"),
axis.title.y = element_text(margin=margin(t = 0, r = 10, b = 0, l = 0)),
axis.title.x = element_text(margin=margin(t = 10, r = 0, b = 0, l = 0)),
plot.background = element_rect(fill = "#f5f5f2", color = NA))
#scatter-plot
gtscatter1 <- gt %>%
filter(crit1 == 1, crit2 == 1, nkill > 0) %>%
group_by(region_txt, iyear, nkill) %>%
summarise(
count = n(),
) %>%
mutate(
killed = nkill * count
) %>%
group_by(region_txt, iyear) %>%
summarise(killed = sum(killed)) %>%
mutate(
id = paste(region_txt, iyear, sep="")
)
gtscatter2 <- gt %>%
filter(crit1 == 1, crit2 == 1, nkill > 0) %>%
group_by(region_txt, iyear) %>%
summarise(
count = n(),
) %>%
mutate(
id = paste(region_txt, iyear, sep="")
)
scattergrouped <- inner_join(gtscatter1, gtscatter2, by="id") %>%
select("region_txt.x", "iyear.x", "killed", "count") %>%
mutate(ratio = killed / count) %>%
filter(region_txt.x == "Middle East & North Africa")
scattergrouped2 <- inner_join(gtscatter1, gtscatter2, by="id") %>%
select("region_txt.x", "iyear.x", "killed", "count") %>%
mutate(ratio = killed / count) %>%
filter(region_txt.x == "Sub-Saharan Africa")
scattergrouped3 <- inner_join(gtscatter1, gtscatter2, by="id") %>%
select("region_txt.x", "iyear.x", "killed", "count") %>%
mutate(ratio = killed / count) %>%
filter(region_txt.x == "South Asia")
scattergrouped4 <- inner_join(gtscatter1, gtscatter2, by="id") %>%
select("region_txt.x", "iyear.x", "killed", "count") %>%
mutate(ratio = killed / count) %>%
filter(region_txt.x == "North America")
scattergrouped5 <- inner_join(gtscatter1, gtscatter2, by="id") %>%
select("region_txt.x", "iyear.x", "killed", "count") %>%
mutate(ratio = killed / count) %>%
filter(region_txt.x == "Western Europe")
ggplot() +
geom_line(data = skilled, aes(x=iyear, y=killed, group = 1, color = "#B30000")) +
geom_line(data = scattergrouped, aes(x = iyear.x, y = killed, group =1, color = "#e6550d")) +
geom_line(data = scattergrouped2, aes(x = iyear.x, y = killed, group =1, color = "#FDD49E")) +
geom_line(data = scattergrouped3, aes(x = iyear.x, y = killed, group =1, color = "#FDBB84")) +
geom_line(data = scattergrouped4, aes(x = iyear.x, y = killed, group =1, color = "black")) +
geom_line(data = scattergrouped5, aes(x = iyear.x, y = killed, group =1, color = "grey")) +
scale_x_continuous(breaks = seq(1970, 2016, 2)) +
labs(y="Killed", title="Terrorist Attacks from 1970 to 2016", x="Year") +
theme_minimal() +
theme(plot.title = element_text(size = 14, face = "bold", color = "#4e4d47"),
axis.title = element_text(size = 8, color = "#4e4d47"),
axis.title.y = element_text(margin=margin(t = 0, r = 10, b = 0, l = 0)),
axis.title.x = element_text(margin=margin(t = 10, r = 0, b = 0, l = 0)),
plot.background = element_rect(fill = "#f5f5f2", color = NA),
legend.position = "top",
legend.direction = "horizontal",
legend.justification = c(-0.01,0)) +
scale_colour_manual(name = '',
values =c('#B30000'='#B30000','#e6550d'='#e6550d', "#FDD49E" = "#FDD49E",
"#FDBB84" = "#FDBB84", "black" = "black", "grey" = "grey"),
labels = c('Total','Middle East & North Africa', "Sub-Saharan Africa",
"South Asia","North America","Western Europe"))
geom_line(data = alcohol, aes(x = Year, y = count, group = 1), color = "red")
ggplot(scattergrouped, aes(reorder(region_txt.x, -ratio), weight=ratio)) +
geom_bar(width=0.75) +
labs(title = "Number of suicides per 100.000 people ", x="State", y="Suicides") +
theme_minimal() +
theme(plot.title = element_text(size = 14, face = "bold"), axis.title = element_text(size = 8),
axis.title.y = element_text(margin=margin(t = 0, r = 10, b = 0, l = 0)),
axis.title.x = element_text(margin=margin(t = 10, r = 0, b = 0, l = 0)))
|
headsize<-source("c:\\allwork\\rsplus\\chap8headsize.dat")$value
#
headsize.std<-sweep(headsize,2,sqrt(apply(headsize,2,var)),FUN="/")
#
#
headsize1<-headsize.std[,1:2]
headsize2<-headsize.std[,3:4]
r11<-cor(headsize1)
r22<-cor(headsize2)
r12<-c(cor(headsize1[,1],headsize2[,1]),cor(headsize1[,1],headsize2[,2]),
cor(headsize1[,2],headsize2[,1]),cor(headsize1[,2],headsize2[,2]))
#
r12<-matrix(r12,ncol=2,byrow=T)
r21<-t(r12)
#
R1<-solve(r11)%*%r12%*%solve(r22)%*%r21
R2<-solve(r22)%*%r21%*%solve(r11)%*%r12
R1
R2
#
eigen(R1)
eigen(R2)
#
sqrt(eigen(R1)$values)
#
girth1<-0.69*headsize.std[,1]+0.72*headsize.std[,2]
girth2<-0.74*headsize.std[,3]+0.67*headsize.std[,4]
shape1<-0.71*headsize.std[,1]-0.71*headsize.std[,2]
shape2<-0.70*headsize.std[,3]-0.71*headsize.std[,4]
cor(girth1,girth2)
cor(shape1,shape2)
#
par(mfrow=c(1,2))
plot(girth1,girth2)
plot(shape1,shape2)
#
#
#
r22<-matrix(c(1.0,0.044,-0.106,-0.180,0.044,1.0,-0.208,-0.192,-0.106,-0.208,1.0,0.492,
-0.180,-0.192,0.492,1.0),ncol=4,byrow=T)
r11<-matrix(c(1.0,0.212,0.212,1.0),ncol=2,byrow=2)
r12<-matrix(c(0.124,-0.164,-0.101,-0.158,0.098,0.308,-0.270,-0.183),ncol=4,byrow=T)
r21<-t(r12)
#
E1<-solve(r11)%*%r12%*%solve(r22)%*%r21
E2<-solve(r22)%*%r21%*%solve(r11)%*%r12
#
E1
E2
#
eigen(E1)
eigen(E2)
|
/RSPCMA/R/rsplus8.r
|
no_license
|
lbraun/applied_mathematics
|
R
| false
| false
| 1,266
|
r
|
headsize<-source("c:\\allwork\\rsplus\\chap8headsize.dat")$value
#
headsize.std<-sweep(headsize,2,sqrt(apply(headsize,2,var)),FUN="/")
#
#
headsize1<-headsize.std[,1:2]
headsize2<-headsize.std[,3:4]
r11<-cor(headsize1)
r22<-cor(headsize2)
r12<-c(cor(headsize1[,1],headsize2[,1]),cor(headsize1[,1],headsize2[,2]),
cor(headsize1[,2],headsize2[,1]),cor(headsize1[,2],headsize2[,2]))
#
r12<-matrix(r12,ncol=2,byrow=T)
r21<-t(r12)
#
R1<-solve(r11)%*%r12%*%solve(r22)%*%r21
R2<-solve(r22)%*%r21%*%solve(r11)%*%r12
R1
R2
#
eigen(R1)
eigen(R2)
#
sqrt(eigen(R1)$values)
#
girth1<-0.69*headsize.std[,1]+0.72*headsize.std[,2]
girth2<-0.74*headsize.std[,3]+0.67*headsize.std[,4]
shape1<-0.71*headsize.std[,1]-0.71*headsize.std[,2]
shape2<-0.70*headsize.std[,3]-0.71*headsize.std[,4]
cor(girth1,girth2)
cor(shape1,shape2)
#
par(mfrow=c(1,2))
plot(girth1,girth2)
plot(shape1,shape2)
#
#
#
r22<-matrix(c(1.0,0.044,-0.106,-0.180,0.044,1.0,-0.208,-0.192,-0.106,-0.208,1.0,0.492,
-0.180,-0.192,0.492,1.0),ncol=4,byrow=T)
r11<-matrix(c(1.0,0.212,0.212,1.0),ncol=2,byrow=2)
r12<-matrix(c(0.124,-0.164,-0.101,-0.158,0.098,0.308,-0.270,-0.183),ncol=4,byrow=T)
r21<-t(r12)
#
E1<-solve(r11)%*%r12%*%solve(r22)%*%r21
E2<-solve(r22)%*%r21%*%solve(r11)%*%r12
#
E1
E2
#
eigen(E1)
eigen(E2)
|
options(shiny.maxRequestSize=50*1024^2)
library(shiny)
library(broom)
library(gt)
library(tidyverse)
library(shinythemes)
ui <- fluidPage(theme = shinytheme("cerulean"),
# Application title
titlePanel("Linear Modelling"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(width=3,
fileInput("file","Select file", accept = ".csv"),
#selectInput("sep",label="Separator",choices = c(",",";")),
radioButtons("sep", "Separator",
choices = c(Comma = ",",
Semicolon = ";"),
selected = ","),
hr(),
uiOutput("varselect1"),
selectInput("type",label="Type",choices = c("numeric","categorical")),
uiOutput("varselect2"),
uiOutput("varselect3"),
hr(),
selectInput("plotting",label="Plot",choices = c("none","box","scatter")),
uiOutput("varselect4"),
hr(),
actionButton("go", "Go")
),
# Show a plot of the generated distribution
mainPanel(
div(tableOutput("lm"), style = "font-size:180%"),
hr(),
plotOutput("plot",height = "800px")
)
)
)
server <- function(input, output) {
df <- reactive({
file <- input$file
if (is.null(file)) {
return(NULL)
}
if (input$sep == ",") {
read_csv(file$datapath)
}
else if (input$sep == ";") {
read_csv2(file$datapath)
}
})
output$varselect1 <- renderUI({
cols <- names(df())
selectInput("dv", "Dependent variable",choices=cols)
})
output$varselect2 <- renderUI({
cols <- names(df())
selectInput("iv1", "Independent variable 1",choices=cols)
})
output$varselect3 <- renderUI({
cols <- names(df())
selectInput("iv2", "Independent variable 2",choices=c("none",cols))
})
output$varselect4 <- renderUI({
cols <- names(df())
selectInput("facet", "Facet",choices=c("none",cols))
})
mod <- eventReactive(input$go, {
if (input$iv2 == "none" & input$type == "numeric") {
lm(reformulate(termlabels = c(input$iv1), response = input$dv),data=df())
}
else if (input$iv2 != "none" & input$type == "numeric") {
lm(reformulate(termlabels = c(input$iv1,input$iv2), response = input$dv),data=df())
}
else if (input$iv2 == "none" & !input$type == "numeric") {
glm(reformulate(termlabels = c(input$iv1), response = input$dv),
family="binomial",data=df())
}
else if (input$iv2 != "none" & !input$type == "numeric") {
glm(reformulate(termlabels = c(input$iv1,input$iv2), response = input$dv),
family="binomial",data=df())
}
})
plot <- eventReactive(input$go, {
if(input$plotting == "none"){}
else if(input$plotting == "scatter") {
df() %>%
ggplot(aes_string(input$iv1,input$dv)) +
theme_minimal(base_size=16) +
geom_point(alpha= .7) +
if(input$facet != "none") {
facet_wrap(~get(input$facet))
} else{NULL}
}
else if(input$plotting == "box") {
df() %>%
ggplot(aes_string(input$iv1,input$dv,group=input$iv1)) +
theme_minimal(base_size=16) +
geom_point(alpha= .2) +
geom_boxplot(alpha=0,outlier.shape = NA) +
if(input$facet != "none") {
facet_wrap(~get(input$facet))
} else{NULL}
}
})
output$lm <- renderTable(width="200px",{
mod() %>%
tidy() %>%
mutate_if(is.numeric,round,3) %>%
gt()
})
output$plot <- renderPlot({
plot()
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
/app.R
|
no_license
|
PeerChristensen/eLMo
|
R
| false
| false
| 4,283
|
r
|
options(shiny.maxRequestSize=50*1024^2)
library(shiny)
library(broom)
library(gt)
library(tidyverse)
library(shinythemes)
ui <- fluidPage(theme = shinytheme("cerulean"),
# Application title
titlePanel("Linear Modelling"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(width=3,
fileInput("file","Select file", accept = ".csv"),
#selectInput("sep",label="Separator",choices = c(",",";")),
radioButtons("sep", "Separator",
choices = c(Comma = ",",
Semicolon = ";"),
selected = ","),
hr(),
uiOutput("varselect1"),
selectInput("type",label="Type",choices = c("numeric","categorical")),
uiOutput("varselect2"),
uiOutput("varselect3"),
hr(),
selectInput("plotting",label="Plot",choices = c("none","box","scatter")),
uiOutput("varselect4"),
hr(),
actionButton("go", "Go")
),
# Show a plot of the generated distribution
mainPanel(
div(tableOutput("lm"), style = "font-size:180%"),
hr(),
plotOutput("plot",height = "800px")
)
)
)
server <- function(input, output) {
df <- reactive({
file <- input$file
if (is.null(file)) {
return(NULL)
}
if (input$sep == ",") {
read_csv(file$datapath)
}
else if (input$sep == ";") {
read_csv2(file$datapath)
}
})
output$varselect1 <- renderUI({
cols <- names(df())
selectInput("dv", "Dependent variable",choices=cols)
})
output$varselect2 <- renderUI({
cols <- names(df())
selectInput("iv1", "Independent variable 1",choices=cols)
})
output$varselect3 <- renderUI({
cols <- names(df())
selectInput("iv2", "Independent variable 2",choices=c("none",cols))
})
output$varselect4 <- renderUI({
cols <- names(df())
selectInput("facet", "Facet",choices=c("none",cols))
})
mod <- eventReactive(input$go, {
if (input$iv2 == "none" & input$type == "numeric") {
lm(reformulate(termlabels = c(input$iv1), response = input$dv),data=df())
}
else if (input$iv2 != "none" & input$type == "numeric") {
lm(reformulate(termlabels = c(input$iv1,input$iv2), response = input$dv),data=df())
}
else if (input$iv2 == "none" & !input$type == "numeric") {
glm(reformulate(termlabels = c(input$iv1), response = input$dv),
family="binomial",data=df())
}
else if (input$iv2 != "none" & !input$type == "numeric") {
glm(reformulate(termlabels = c(input$iv1,input$iv2), response = input$dv),
family="binomial",data=df())
}
})
plot <- eventReactive(input$go, {
if(input$plotting == "none"){}
else if(input$plotting == "scatter") {
df() %>%
ggplot(aes_string(input$iv1,input$dv)) +
theme_minimal(base_size=16) +
geom_point(alpha= .7) +
if(input$facet != "none") {
facet_wrap(~get(input$facet))
} else{NULL}
}
else if(input$plotting == "box") {
df() %>%
ggplot(aes_string(input$iv1,input$dv,group=input$iv1)) +
theme_minimal(base_size=16) +
geom_point(alpha= .2) +
geom_boxplot(alpha=0,outlier.shape = NA) +
if(input$facet != "none") {
facet_wrap(~get(input$facet))
} else{NULL}
}
})
output$lm <- renderTable(width="200px",{
mod() %>%
tidy() %>%
mutate_if(is.numeric,round,3) %>%
gt()
})
output$plot <- renderPlot({
plot()
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
rm(list=ls())
## NOTE: TO RUN THE SEARCH CODE
## YOU WILL HAVE TO USE YOUR OWN API ACCESS INFO
library("XML")
library("RCurl")
search.amazon <- function(Keywords, SearchIndex = 'All', AWSAccessKeyId, AWSsecretkey, AssociateTag, ResponseGroup, Operation = 'ItemSearch'){
library(digest)
library(RCurl)
base.html.string <- "http://ecs.amazonaws.com/onca/xml?"
SearchIndex <- match.arg(SearchIndex, c('All',
'Apparel',
'Appliances',
'ArtsAndCrafts',
'Automotive',
'Baby',
'Beauty',
'Blended',
'Books',
'Classical',
'DigitalMusic',
'DVD',
'Electronics',
'ForeignBooks',
'Garden',
'GourmetFood',
'Grocery',
'HealthPersonalCare',
'Hobbies',
'HomeGarden',
'HomeImprovement',
'Industrial',
'Jewelry',
'KindleStore',
'Kitchen',
'Lighting',
'Magazines',
'Marketplace',
'Miscellaneous',
'MobileApps',
'MP3Downloads',
'Music',
'MusicalInstruments',
'MusicTracks',
'OfficeProducts',
'OutdoorLiving',
'Outlet',
'PCHardware',
'PetSupplies',
'Photo',
'Shoes',
'Software',
'SoftwareVideoGames',
'SportingGoods',
'Tools',
'Toys',
'UnboxVideo',
'VHS',
'Video',
'VideoGames',
'Watches',
'Wireless',
'WirelessAccessories'))
Operation <- match.arg(Operation, c('ItemSearch',
'ItemLookup',
'BrowseNodeLookup',
'CartAdd',
'CartClear',
'CartCreate',
'CartGet',
'CartModify',
'SimilarityLookup'))
ResponseGroup <- match.arg(ResponseGroup, c('Accessories',
'AlternateVersions',
'BrowseNodeInfo',
'BrowseNodes',
'Cart',
'CartNewReleases',
'CartTopSellers',
'CartSimilarities',
'Collections',
'EditorialReview',
'Images',
'ItemAttributes',
'ItemIds',
'Large',
'Medium',
'MostGifted',
'MostWishedFor',
'NewReleases',
'OfferFull',
'OfferListings',
'Offers',
'OfferSummary',
'PromotionSummary',
'RelatedItems',
'Request',
'Reviews',
'SalesRank',
'SearchBins',
'Similarities',
'Small',
'TopSellers',
'Tracks',
'Variations',
'VariationImages',
'VariationMatrix',
'VariationOffers',
'VariationSummary'),
several.ok = TRUE)
version.request = '2011-08-01'
Service = 'AWSECommerceService'
if(!is.character(AWSsecretkey)){
message('The AWSsecretkey should be entered as a character vect, ie be qouted')
}
pb.txt <- Sys.time()
pb.date <- as.POSIXct(pb.txt, tz = Sys.timezone)
Timestamp = strtrim(format(pb.date, tz = "GMT", usetz = TRUE, "%Y-%m-%dT%H:%M:%S.000Z"), 24)
str = paste('GET\necs.amazonaws.com\n/onca/xml\n',
'AWSAccessKeyId=', curlEscape(AWSAccessKeyId),
'&AssociateTag=', AssociateTag,
'&Keywords=', curlEscape(Keywords),
'&Operation=', curlEscape(Operation),
'&ResponseGroup=', curlEscape(ResponseGroup),
'&SearchIndex=', curlEscape(SearchIndex),
'&Service=AWSECommerceService',
'&Timestamp=', gsub('%2E','.',gsub('%2D', '-', curlEscape(Timestamp))),
'&Version=', version.request,
sep = '')
## signature test
#Signature = curlEscape(base64(hmac( enc2utf8((AWSsecretkey)), enc2utf8(str1), algo = 'sha256', serialize = FALSE, raw = TRUE)))
Signature = curlEscape(base64(hmac( enc2utf8((AWSsecretkey)), enc2utf8(str), algo = 'sha256', serialize = FALSE, raw = TRUE)))
AmazonURL <- paste(base.html.string,
'AWSAccessKeyId=', AWSAccessKeyId,
'&AssociateTag=', AssociateTag,
'&Keywords=', Keywords,
'&Operation=',Operation,
'&ResponseGroup=',ResponseGroup,
'&SearchIndex=', SearchIndex,
'&Service=AWSECommerceService',
'&Timestamp=', Timestamp,
'&Version=', version.request,
'&Signature=', Signature,
sep = '')
AmazonResult <- getURL(AmazonURL)
return(AmazonResult)
}
###Function for Movie Poster:
Keywords=productID
AWSAccessKeyId=AWSAccessKeyId
AWSsecretkey=AWSsecretkey
AssociateTag=AssociateTag
getPicture<-function(productid){
productid<-as.character(productid)
gg<-search.amazon(Keywords=productid,ResponseGroup = 'Images',AWSAccessKeyId=AWSAccessKeyId,AWSsecretkey=AWSsecretkey, AssociateTag=AssociateTag)
doc<-xmlParse(gg)
picnode = xmlRoot(doc)[["Items"]][["Item"]][["ImageSets"]][["ImageSet"]][["MediumImage"]]
picvalue<-as.character(sapply(xmlChildren(picnode), function(node) xmlValue(node)))
return(picvalue[1])
}
getPicture(Keywords)
###Function for Director:
getDirector<-function(productid){
productid<-as.character(productid)
gg<-search.amazon(Keywords=productid,ResponseGroup = 'Images',AWSAccessKeyId=AWSAccessKeyId,AWSsecretkey=AWSsecretkey, AssociateTag=AssociateTag)
doc<-xmlParse(gg)
picnode = xmlRoot(doc)[["Items"]][["Item"]][["ImageSets"]][["ImageSet"]][["MediumImage"]]
picvalue<-as.character(sapply(xmlChildren(picnode), function(node) xmlValue(node)))
return(picvalue[1])
}
getPicture(Keywords)
doc<-xmlParse(gg)
getInfo<-function(productid,att){
productid<-as.character(productid)
gg<-search.amazon(Keywords=productid,ResponseGroup = 'ItemAttributes',AWSAccessKeyId=AWSAccessKeyId,AWSsecretkey=AWSsecretkey, AssociateTag=AssociateTag)
doc<-xmlParse(gg)
attnode = xmlRoot(doc)[["Items"]][["Item"]][["ItemAttributes"]][[att]]
attvalue<-as.character(sapply(xmlChildren(attnode), function(node) xmlValue(node)))
return(attvalue)
}
director<-getInfo(Keywords,"Director")
actor<-getInfo(Keywords,"Actor")
#####Read as NA if no Genre/Title
# tryCatch({
# title<- output("Title")
# },
# error =function(err){title<-NA})
# tryCatch({
# genre<-output("Genre")
# },
# error =function(err){genre<-NA})
# actors<-output("Actor")
# Might need to bind PRODUCT ID
# product_i<-cbind(title,genre)
# Example of getting an info
# titlenode=xmlRoot(doc)[["Items"]][["Item"]][["ItemAttributes"]][["Title"]]
# title<-as.character(sapply(xmlChildren(titlenode), function(node) xmlValue(node)))
|
/lib/api_access.r
|
no_license
|
lleiou/A-Movie-For-You
|
R
| false
| false
| 10,036
|
r
|
rm(list=ls())
## NOTE: TO RUN THE SEARCH CODE
## YOU WILL HAVE TO USE YOUR OWN API ACCESS INFO
library("XML")
library("RCurl")
search.amazon <- function(Keywords, SearchIndex = 'All', AWSAccessKeyId, AWSsecretkey, AssociateTag, ResponseGroup, Operation = 'ItemSearch'){
library(digest)
library(RCurl)
base.html.string <- "http://ecs.amazonaws.com/onca/xml?"
SearchIndex <- match.arg(SearchIndex, c('All',
'Apparel',
'Appliances',
'ArtsAndCrafts',
'Automotive',
'Baby',
'Beauty',
'Blended',
'Books',
'Classical',
'DigitalMusic',
'DVD',
'Electronics',
'ForeignBooks',
'Garden',
'GourmetFood',
'Grocery',
'HealthPersonalCare',
'Hobbies',
'HomeGarden',
'HomeImprovement',
'Industrial',
'Jewelry',
'KindleStore',
'Kitchen',
'Lighting',
'Magazines',
'Marketplace',
'Miscellaneous',
'MobileApps',
'MP3Downloads',
'Music',
'MusicalInstruments',
'MusicTracks',
'OfficeProducts',
'OutdoorLiving',
'Outlet',
'PCHardware',
'PetSupplies',
'Photo',
'Shoes',
'Software',
'SoftwareVideoGames',
'SportingGoods',
'Tools',
'Toys',
'UnboxVideo',
'VHS',
'Video',
'VideoGames',
'Watches',
'Wireless',
'WirelessAccessories'))
Operation <- match.arg(Operation, c('ItemSearch',
'ItemLookup',
'BrowseNodeLookup',
'CartAdd',
'CartClear',
'CartCreate',
'CartGet',
'CartModify',
'SimilarityLookup'))
ResponseGroup <- match.arg(ResponseGroup, c('Accessories',
'AlternateVersions',
'BrowseNodeInfo',
'BrowseNodes',
'Cart',
'CartNewReleases',
'CartTopSellers',
'CartSimilarities',
'Collections',
'EditorialReview',
'Images',
'ItemAttributes',
'ItemIds',
'Large',
'Medium',
'MostGifted',
'MostWishedFor',
'NewReleases',
'OfferFull',
'OfferListings',
'Offers',
'OfferSummary',
'PromotionSummary',
'RelatedItems',
'Request',
'Reviews',
'SalesRank',
'SearchBins',
'Similarities',
'Small',
'TopSellers',
'Tracks',
'Variations',
'VariationImages',
'VariationMatrix',
'VariationOffers',
'VariationSummary'),
several.ok = TRUE)
version.request = '2011-08-01'
Service = 'AWSECommerceService'
if(!is.character(AWSsecretkey)){
message('The AWSsecretkey should be entered as a character vect, ie be qouted')
}
pb.txt <- Sys.time()
pb.date <- as.POSIXct(pb.txt, tz = Sys.timezone)
Timestamp = strtrim(format(pb.date, tz = "GMT", usetz = TRUE, "%Y-%m-%dT%H:%M:%S.000Z"), 24)
str = paste('GET\necs.amazonaws.com\n/onca/xml\n',
'AWSAccessKeyId=', curlEscape(AWSAccessKeyId),
'&AssociateTag=', AssociateTag,
'&Keywords=', curlEscape(Keywords),
'&Operation=', curlEscape(Operation),
'&ResponseGroup=', curlEscape(ResponseGroup),
'&SearchIndex=', curlEscape(SearchIndex),
'&Service=AWSECommerceService',
'&Timestamp=', gsub('%2E','.',gsub('%2D', '-', curlEscape(Timestamp))),
'&Version=', version.request,
sep = '')
## signature test
#Signature = curlEscape(base64(hmac( enc2utf8((AWSsecretkey)), enc2utf8(str1), algo = 'sha256', serialize = FALSE, raw = TRUE)))
Signature = curlEscape(base64(hmac( enc2utf8((AWSsecretkey)), enc2utf8(str), algo = 'sha256', serialize = FALSE, raw = TRUE)))
AmazonURL <- paste(base.html.string,
'AWSAccessKeyId=', AWSAccessKeyId,
'&AssociateTag=', AssociateTag,
'&Keywords=', Keywords,
'&Operation=',Operation,
'&ResponseGroup=',ResponseGroup,
'&SearchIndex=', SearchIndex,
'&Service=AWSECommerceService',
'&Timestamp=', Timestamp,
'&Version=', version.request,
'&Signature=', Signature,
sep = '')
AmazonResult <- getURL(AmazonURL)
return(AmazonResult)
}
###Function for Movie Poster:
Keywords=productID
AWSAccessKeyId=AWSAccessKeyId
AWSsecretkey=AWSsecretkey
AssociateTag=AssociateTag
getPicture<-function(productid){
productid<-as.character(productid)
gg<-search.amazon(Keywords=productid,ResponseGroup = 'Images',AWSAccessKeyId=AWSAccessKeyId,AWSsecretkey=AWSsecretkey, AssociateTag=AssociateTag)
doc<-xmlParse(gg)
picnode = xmlRoot(doc)[["Items"]][["Item"]][["ImageSets"]][["ImageSet"]][["MediumImage"]]
picvalue<-as.character(sapply(xmlChildren(picnode), function(node) xmlValue(node)))
return(picvalue[1])
}
getPicture(Keywords)
###Function for Director:
getDirector<-function(productid){
productid<-as.character(productid)
gg<-search.amazon(Keywords=productid,ResponseGroup = 'Images',AWSAccessKeyId=AWSAccessKeyId,AWSsecretkey=AWSsecretkey, AssociateTag=AssociateTag)
doc<-xmlParse(gg)
picnode = xmlRoot(doc)[["Items"]][["Item"]][["ImageSets"]][["ImageSet"]][["MediumImage"]]
picvalue<-as.character(sapply(xmlChildren(picnode), function(node) xmlValue(node)))
return(picvalue[1])
}
getPicture(Keywords)
doc<-xmlParse(gg)
getInfo<-function(productid,att){
productid<-as.character(productid)
gg<-search.amazon(Keywords=productid,ResponseGroup = 'ItemAttributes',AWSAccessKeyId=AWSAccessKeyId,AWSsecretkey=AWSsecretkey, AssociateTag=AssociateTag)
doc<-xmlParse(gg)
attnode = xmlRoot(doc)[["Items"]][["Item"]][["ItemAttributes"]][[att]]
attvalue<-as.character(sapply(xmlChildren(attnode), function(node) xmlValue(node)))
return(attvalue)
}
director<-getInfo(Keywords,"Director")
actor<-getInfo(Keywords,"Actor")
#####Read as NA if no Genre/Title
# tryCatch({
# title<- output("Title")
# },
# error =function(err){title<-NA})
# tryCatch({
# genre<-output("Genre")
# },
# error =function(err){genre<-NA})
# actors<-output("Actor")
# Might need to bind PRODUCT ID
# product_i<-cbind(title,genre)
# Example of getting an info
# titlenode=xmlRoot(doc)[["Items"]][["Item"]][["ItemAttributes"]][["Title"]]
# title<-as.character(sapply(xmlChildren(titlenode), function(node) xmlValue(node)))
|
chebyshev.t.recurrences <- function( n, normalized=FALSE )
{
###
### This function returns a data frame with n+1 rows and four columns
### containing the coefficients c, d, e and f of the recurrence relations
### for the order k Chebyshev polynomial of the first kind, Tk(x),
### and for orders k=0,1,...,n
###
### Parameter
### n = integer highest order
### normalized = boolean value. If true, recurrences are for normalized polynomials
###
if ( n < 0 )
stop( "negative highest polynomial order" )
if ( n != round( n ) )
stop( "highest polynomial order is not integer" )
np1 <- n + 1
r <- data.frame( matrix( nrow=np1, ncol=4 ) )
names( r ) <- c( "c", "d", "e", "f" )
j <- 0
k <- 1
if ( normalized ) {
while ( j <= n ) {
r[k,"c"] <- 1
r[k,"d"] <- 0
if ( j == 0 ) {
r[k,"e"] <- sqrt( 2 )
}
else {
r[k,"e"] <- 2
}
if ( j == 0 ) {
r[k,"f"] <- 0
}
else {
if ( j == 1 ) {
r[k,"f"] <- sqrt( 2 )
}
else {
r[k,"f"] <- 1
}
}
j <- j + 1
k <- k + 1
}
return( r )
}
else {
r$c <- rep( 1, np1 )
r$d <- rep( 0, np1 )
r$e <- rep( 2, np1 )
r[1,"e"] <- 1
r$f <- rep( 1, np1 )
return( r )
}
return( NULL )
}
|
/R/chebyshev.t.recurrences.R
|
no_license
|
cran/orthopolynom
|
R
| false
| false
| 1,594
|
r
|
chebyshev.t.recurrences <- function( n, normalized=FALSE )
{
###
### This function returns a data frame with n+1 rows and four columns
### containing the coefficients c, d, e and f of the recurrence relations
### for the order k Chebyshev polynomial of the first kind, Tk(x),
### and for orders k=0,1,...,n
###
### Parameter
### n = integer highest order
### normalized = boolean value. If true, recurrences are for normalized polynomials
###
if ( n < 0 )
stop( "negative highest polynomial order" )
if ( n != round( n ) )
stop( "highest polynomial order is not integer" )
np1 <- n + 1
r <- data.frame( matrix( nrow=np1, ncol=4 ) )
names( r ) <- c( "c", "d", "e", "f" )
j <- 0
k <- 1
if ( normalized ) {
while ( j <= n ) {
r[k,"c"] <- 1
r[k,"d"] <- 0
if ( j == 0 ) {
r[k,"e"] <- sqrt( 2 )
}
else {
r[k,"e"] <- 2
}
if ( j == 0 ) {
r[k,"f"] <- 0
}
else {
if ( j == 1 ) {
r[k,"f"] <- sqrt( 2 )
}
else {
r[k,"f"] <- 1
}
}
j <- j + 1
k <- k + 1
}
return( r )
}
else {
r$c <- rep( 1, np1 )
r$d <- rep( 0, np1 )
r$e <- rep( 2, np1 )
r[1,"e"] <- 1
r$f <- rep( 1, np1 )
return( r )
}
return( NULL )
}
|
source("loader/monthly_new_editor_article_creators.R")
months = load_monthly_new_editor_article_creators(reload=T)
months$registration_month = as.Date(months$registration_month)
normalized.relative.funnel = rbind(
months[,
list(
wiki,
registration_month,
transition = "New editors / Registered users",
prop = new_editors / registered_users
),
],
months[,
list(
wiki,
registration_month,
transition = "Page creators / New editors",
prop = new_page_creators / new_editors
),
],
months[,
list(
wiki,
registration_month,
transition = "Article page publishers / Page creators",
prop = new_article_creators / new_page_creators
),
],
months[,
list(
wiki,
registration_month,
transition = "Draft article publishers / Article page publishers",
prop = new_draft_creators / new_article_creators
),
]
)
normalized.relative.funnel$transition = factor(
normalized.relative.funnel$transition,
levels = c(
"New editors / Registered users",
"Page creators / New editors",
"Article page publishers / Page creators",
"Draft article publishers / Article page publishers"
)
)
svg("new_editors/plots/relative.funnel_props.enwiki.svg",
width=7,
height=7)
ggplot(
normalized.relative.funnel[
registration_month < "2013-11-01" &
wiki == "enwiki",
],
aes(
x=registration_month,
y=prop
)
) +
facet_wrap(~ transition, ncol=1) +
geom_bar(fill="#CCCCCC", color="black", stat="identity") +
scale_y_continuous("Proportion of new editors") +
scale_x_date("Registration month") +
theme_bw()
dev.off()
svg("new_editors/plots/new_editors.counts.enwiki.svg",
width=7,
height=2)
ggplot(
months[
registration_month < "2013-11-01" &
wiki == "enwiki",
],
aes(
x=registration_month,
y=new_editors
)
) +
geom_bar(fill="#CCCCCC", color="black", stat="identity") +
scale_y_continuous("New editors") +
scale_x_date("Registration month") +
theme_bw()
dev.off()
svg("new_editors/plots/new_page_creators.counts.enwiki.svg",
width=7,
height=2)
ggplot(
months[
registration_month < "2013-11-01" &
wiki == "enwiki",
],
aes(
x=registration_month,
y=new_page_creators
)
) +
geom_bar(fill="#CCCCCC", color="black", stat="identity") +
scale_y_continuous("New page creators") +
scale_x_date("Registration month") +
theme_bw()
dev.off()
svg("new_editors/plots/new_article_creators.counts.enwiki.svg",
width=7,
height=2)
ggplot(
months[
registration_month < "2013-11-01" &
wiki == "enwiki",
],
aes(
x=registration_month,
y=new_article_creators
)
) +
geom_bar(fill="#CCCCCC", color="black", stat="identity") +
scale_y_continuous("New article creators") +
scale_x_date("Registration month") +
theme_bw()
dev.off()
svg("new_editors/plots/new_draft_creators.counts.enwiki.svg",
width=7,
height=2)
ggplot(
months[
registration_month < "2013-11-01" &
wiki == "enwiki",
],
aes(
x=registration_month,
y=new_draft_creators
)
) +
geom_bar(fill="#CCCCCC", color="black", stat="identity") +
scale_y_continuous("New draft creators") +
scale_x_date("Registration month") +
theme_bw()
dev.off()
|
/R/new_editors/exploration.enwiki.R
|
permissive
|
halfak/Wikipedia-article-creation-research
|
R
| false
| false
| 3,110
|
r
|
source("loader/monthly_new_editor_article_creators.R")
months = load_monthly_new_editor_article_creators(reload=T)
months$registration_month = as.Date(months$registration_month)
normalized.relative.funnel = rbind(
months[,
list(
wiki,
registration_month,
transition = "New editors / Registered users",
prop = new_editors / registered_users
),
],
months[,
list(
wiki,
registration_month,
transition = "Page creators / New editors",
prop = new_page_creators / new_editors
),
],
months[,
list(
wiki,
registration_month,
transition = "Article page publishers / Page creators",
prop = new_article_creators / new_page_creators
),
],
months[,
list(
wiki,
registration_month,
transition = "Draft article publishers / Article page publishers",
prop = new_draft_creators / new_article_creators
),
]
)
normalized.relative.funnel$transition = factor(
normalized.relative.funnel$transition,
levels = c(
"New editors / Registered users",
"Page creators / New editors",
"Article page publishers / Page creators",
"Draft article publishers / Article page publishers"
)
)
svg("new_editors/plots/relative.funnel_props.enwiki.svg",
width=7,
height=7)
ggplot(
normalized.relative.funnel[
registration_month < "2013-11-01" &
wiki == "enwiki",
],
aes(
x=registration_month,
y=prop
)
) +
facet_wrap(~ transition, ncol=1) +
geom_bar(fill="#CCCCCC", color="black", stat="identity") +
scale_y_continuous("Proportion of new editors") +
scale_x_date("Registration month") +
theme_bw()
dev.off()
svg("new_editors/plots/new_editors.counts.enwiki.svg",
width=7,
height=2)
ggplot(
months[
registration_month < "2013-11-01" &
wiki == "enwiki",
],
aes(
x=registration_month,
y=new_editors
)
) +
geom_bar(fill="#CCCCCC", color="black", stat="identity") +
scale_y_continuous("New editors") +
scale_x_date("Registration month") +
theme_bw()
dev.off()
svg("new_editors/plots/new_page_creators.counts.enwiki.svg",
width=7,
height=2)
ggplot(
months[
registration_month < "2013-11-01" &
wiki == "enwiki",
],
aes(
x=registration_month,
y=new_page_creators
)
) +
geom_bar(fill="#CCCCCC", color="black", stat="identity") +
scale_y_continuous("New page creators") +
scale_x_date("Registration month") +
theme_bw()
dev.off()
svg("new_editors/plots/new_article_creators.counts.enwiki.svg",
width=7,
height=2)
ggplot(
months[
registration_month < "2013-11-01" &
wiki == "enwiki",
],
aes(
x=registration_month,
y=new_article_creators
)
) +
geom_bar(fill="#CCCCCC", color="black", stat="identity") +
scale_y_continuous("New article creators") +
scale_x_date("Registration month") +
theme_bw()
dev.off()
svg("new_editors/plots/new_draft_creators.counts.enwiki.svg",
width=7,
height=2)
ggplot(
months[
registration_month < "2013-11-01" &
wiki == "enwiki",
],
aes(
x=registration_month,
y=new_draft_creators
)
) +
geom_bar(fill="#CCCCCC", color="black", stat="identity") +
scale_y_continuous("New draft creators") +
scale_x_date("Registration month") +
theme_bw()
dev.off()
|
# rankhospital.R
# Moaeed Sajid
# V1 1/12/17
# Args (state, outcome, num = "best)
# Return hospital for a particular chosen state, outcome and position
rankhospital <- function(state, outcome, num = "best") {
#install.packages('plyr')
#library('plyr')
##Task - Read outcome data
outcomef <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
##Task - Check the state and outcome are valid
#Columns are heart attack [,11], heart failure [,17], pneumonia [,23]
#Check if state entered is in the state list
statevalid <- state %in% c(unique(outcomef$State))
if (statevalid == "FALSE") {
stop ("Invalid State")
}
# IMPROVED THIS BELOW - Check outcome is one of three predefined ones
#outcomevalid <- outcome %in% c("heart attack", "heart failure", "pneumonia")
#if (outcomevalid == "FALSE") {
# stop ("Invalid Outcome")
#}
# Choosing the correct colum in table for outcome, else stop for invalid outcome
outcomecol <- "NULL"
if (outcome == "heart attack") {
outcomecol <- 11
}
if (outcome == "heart failure") {
outcomecol <- 17
}
if (outcome == "pneumonia") {
outcomecol <- 23
}
if (outcomecol == "NULL") {
stop ("Invalid Outcome")
}
## Return hospital name in that state with the given rank
## 30-day death rate
#Retrieve a subset of data with just the state
substate <- subset(outcomef, (State == state))
#Reduce data further with just the 2 rows that concern us
filtrows <- (substate[c(2,outcomecol)])
# Remove not available before converting to numeric
totalavail <- subset(filtrows, (filtrows[,2] != 'Not Available'))
totalavail[,2] <- as.numeric(totalavail[,2])
# Sort by outcome and then name before counting the total results
arrangeta <- arrange(totalavail,totalavail[,2],totalavail[,1])
countta <- nrow (arrangeta)
#print (countta)
#Calculate ranking
if (num == "best") {
num <- 1
}
if (num == "worst") {
num <- countta
}
#Convert to numeric
ranking <- as.numeric(num)
#print(ranking)
if (countta < ranking) {
print ("NA")
}
else {
print(arrangeta[ranking,1])
}
}
|
/rankhospital.R
|
no_license
|
moaeedsajid/HospitalAssignment
|
R
| false
| false
| 2,695
|
r
|
# rankhospital.R
# Moaeed Sajid
# V1 1/12/17
# Args (state, outcome, num = "best)
# Return hospital for a particular chosen state, outcome and position
rankhospital <- function(state, outcome, num = "best") {
#install.packages('plyr')
#library('plyr')
##Task - Read outcome data
outcomef <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
##Task - Check the state and outcome are valid
#Columns are heart attack [,11], heart failure [,17], pneumonia [,23]
#Check if state entered is in the state list
statevalid <- state %in% c(unique(outcomef$State))
if (statevalid == "FALSE") {
stop ("Invalid State")
}
# IMPROVED THIS BELOW - Check outcome is one of three predefined ones
#outcomevalid <- outcome %in% c("heart attack", "heart failure", "pneumonia")
#if (outcomevalid == "FALSE") {
# stop ("Invalid Outcome")
#}
# Choosing the correct colum in table for outcome, else stop for invalid outcome
outcomecol <- "NULL"
if (outcome == "heart attack") {
outcomecol <- 11
}
if (outcome == "heart failure") {
outcomecol <- 17
}
if (outcome == "pneumonia") {
outcomecol <- 23
}
if (outcomecol == "NULL") {
stop ("Invalid Outcome")
}
## Return hospital name in that state with the given rank
## 30-day death rate
#Retrieve a subset of data with just the state
substate <- subset(outcomef, (State == state))
#Reduce data further with just the 2 rows that concern us
filtrows <- (substate[c(2,outcomecol)])
# Remove not available before converting to numeric
totalavail <- subset(filtrows, (filtrows[,2] != 'Not Available'))
totalavail[,2] <- as.numeric(totalavail[,2])
# Sort by outcome and then name before counting the total results
arrangeta <- arrange(totalavail,totalavail[,2],totalavail[,1])
countta <- nrow (arrangeta)
#print (countta)
#Calculate ranking
if (num == "best") {
num <- 1
}
if (num == "worst") {
num <- countta
}
#Convert to numeric
ranking <- as.numeric(num)
#print(ranking)
if (countta < ranking) {
print ("NA")
}
else {
print(arrangeta[ranking,1])
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{DT_apple}
\alias{DT_apple}
\title{Quarterly reported EBIT from Apple as data.table object from 1995
to 2020.}
\format{
A quarterly data.table object from 1995 to 2020
}
\usage{
data(DT_apple)
}
\description{
Quarterly reported EBIT from Apple as data.table object from 1995
to 2020.
}
\keyword{datasets}
|
/man/DT_apple.Rd
|
permissive
|
thfuchs/tsRNN
|
R
| false
| true
| 410
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{DT_apple}
\alias{DT_apple}
\title{Quarterly reported EBIT from Apple as data.table object from 1995
to 2020.}
\format{
A quarterly data.table object from 1995 to 2020
}
\usage{
data(DT_apple)
}
\description{
Quarterly reported EBIT from Apple as data.table object from 1995
to 2020.
}
\keyword{datasets}
|
options(na.action=na.exclude) # preserve missings
options(contrasts=c('contr.treatment', 'contr.poly')) #ensure constrast type
library(survival)
#
# Test out the revised model.matrix code
#
test1 <- data.frame(time= c(9, 3,1,1,6,6,8),
status=c(1,NA,1,0,1,1,0),
x= c(0, 2,1,1,1,0,0),
z= factor(c('a', 'a', 'b', 'b', 'c', 'c', 'a')))
fit1 <- coxph(Surv(time, status) ~ z, test1, iter=1)
fit2 <- coxph(Surv(time, status) ~z, test1, x=T, iter=1)
all.equal(model.matrix(fit1), fit2$x)
# This has no level 'b', make sure dummies recode properly
test2 <- data.frame(time= c(9, 3,1,1,6,6,8),
status=c(1,NA,1,0,1,1,0),
x= c(0, 2,1,1,1,0,0),
z= factor(c('a', 'a', 'a', 'a', 'c', 'c', 'a')))
ftest <- model.frame(fit1, data=test2)
all.equal(levels(ftest$z), levels(test1$z))
# xtest will have one more row than the others, since it does not delete
# the observation with a missing value for status
xtest <- model.matrix(fit1, data=test2)
dummy <- fit2$x
dummy[,1] <- 0
all.equal(xtest[-2,], dummy, check.attributes=FALSE)
# The case of a strata by factor interaction
# Use iter=0 since there are too many covariates and it won't converge
test1$x2 <- factor(rep(1:2, length=7))
fit3 <- coxph(Surv(time, status) ~ strata(x2)*z, test1, iter=0)
xx <- model.matrix(fit3)
all.equal(attr(xx, "assign"), c(2,2,3,3))
all.equal(colnames(xx), c("zb", "zc", "strata(x2)x2=2:zb",
"strata(x2)x2=2:zc"))
all.equal(attr(xx, "contrasts"),
list("strata(x2)"= "contr.treatment", z="contr.treatment"))
fit3b <- coxph(Surv(time, status) ~ strata(x2)*z, test1, iter=0, x=TRUE)
all.equal(fit3b$x, xx)
# A model with a tt term
fit4 <- coxph(Surv(time, status) ~ tt(x) + x, test1, iter=0,
tt = function(x, t, ...) x*t)
ff <- model.frame(fit4)
# There is 1 subject in the final risk set, 4 at risk at time 6, 6 at time 1
# The .strata. variable numbers from last time point to first
all.equal(ff$.strata., rep(1:3, c(1, 4,6)))
all.equal(ff[["tt(x)"]], ff$x* c(9,6,1)[ff$.strata.])
xx <- model.matrix(fit4)
all.equal(xx[,1], ff[[2]], check.attributes=FALSE)
|
/Tools/DECoN-master/Windows/packrat/lib-R/survival/tests/model.matrix.R
|
permissive
|
robinwijngaard/TFM_code
|
R
| false
| false
| 2,276
|
r
|
options(na.action=na.exclude) # preserve missings
options(contrasts=c('contr.treatment', 'contr.poly')) #ensure constrast type
library(survival)
#
# Test out the revised model.matrix code
#
test1 <- data.frame(time= c(9, 3,1,1,6,6,8),
status=c(1,NA,1,0,1,1,0),
x= c(0, 2,1,1,1,0,0),
z= factor(c('a', 'a', 'b', 'b', 'c', 'c', 'a')))
fit1 <- coxph(Surv(time, status) ~ z, test1, iter=1)
fit2 <- coxph(Surv(time, status) ~z, test1, x=T, iter=1)
all.equal(model.matrix(fit1), fit2$x)
# This has no level 'b', make sure dummies recode properly
test2 <- data.frame(time= c(9, 3,1,1,6,6,8),
status=c(1,NA,1,0,1,1,0),
x= c(0, 2,1,1,1,0,0),
z= factor(c('a', 'a', 'a', 'a', 'c', 'c', 'a')))
ftest <- model.frame(fit1, data=test2)
all.equal(levels(ftest$z), levels(test1$z))
# xtest will have one more row than the others, since it does not delete
# the observation with a missing value for status
xtest <- model.matrix(fit1, data=test2)
dummy <- fit2$x
dummy[,1] <- 0
all.equal(xtest[-2,], dummy, check.attributes=FALSE)
# The case of a strata by factor interaction
# Use iter=0 since there are too many covariates and it won't converge
test1$x2 <- factor(rep(1:2, length=7))
fit3 <- coxph(Surv(time, status) ~ strata(x2)*z, test1, iter=0)
xx <- model.matrix(fit3)
all.equal(attr(xx, "assign"), c(2,2,3,3))
all.equal(colnames(xx), c("zb", "zc", "strata(x2)x2=2:zb",
"strata(x2)x2=2:zc"))
all.equal(attr(xx, "contrasts"),
list("strata(x2)"= "contr.treatment", z="contr.treatment"))
fit3b <- coxph(Surv(time, status) ~ strata(x2)*z, test1, iter=0, x=TRUE)
all.equal(fit3b$x, xx)
# A model with a tt term
fit4 <- coxph(Surv(time, status) ~ tt(x) + x, test1, iter=0,
tt = function(x, t, ...) x*t)
ff <- model.frame(fit4)
# There is 1 subject in the final risk set, 4 at risk at time 6, 6 at time 1
# The .strata. variable numbers from last time point to first
all.equal(ff$.strata., rep(1:3, c(1, 4,6)))
all.equal(ff[["tt(x)"]], ff$x* c(9,6,1)[ff$.strata.])
xx <- model.matrix(fit4)
all.equal(xx[,1], ff[[2]], check.attributes=FALSE)
|
.ts <- c("id", "name", "description", "status", "app", "type",
"created_by", "created_time", "executed_by", "start_time", "end_time",
"execution_status", "price", "inputs", "outputs", "project",
"batch", "batch_input", "batch_by", "parent", "batch_group",
"errors", "warnings")
Task <- setRefClass("Task", contains = "Item",
fields = list(id = "characterORNULL",
name = "characterORNULL",
description = "characterORNULL",
status = "characterORNULL",
app = "characterORNULL",
type = "characterORNULL",
created_by = "characterORNULL",
created_time = "characterORNULL",
executed_by = "characterORNULL",
start_time = "characterORNULL",
end_time = "characterORNULL",
execution_status = "listORNULL",
price = "listORNULL",
inputs = "listORNULL",
outputs = "listORNULL",
project = "characterORNULL",
batch = "logicalORNULL",
batch_input = "characterORNULL",
batch_by = "listORNULL",
parent = "characterORNULL",
batch_group = "listORNULL",
errors = "listORNULL",
warnings = "listORNULL"),
methods = list(
# initialize = function(execution_status = NULL, ...) {
# if (!is.null(execution_status)) {
# .self$execution_status <<- do.call(EStatus, execution_status)
# }
# callSuper(...)
# },
update = function(name = NULL,
description = NULL,
inputs = NULL, ...) {
if (is.null(name) && is.null(description) && !is.null(inputs)) {
res = auth$api(path = paste0("tasks/", id, "/inputs"),
body = inputs, method = "PATCH", ...)
return(update())
}
body = list(name = name,
description = description,
inputs = inputs)
if (all(sapply(body, is.null))) {
res = auth$api(path = paste0("tasks/", id),
method = "GET", ...)
} else {
res = auth$api(path = paste0("tasks/", id),
body = body, method = "PATCH",
...)
}
# update object
for (nm in .ts) .self$field(nm, res[[nm]])
.asTask(res)
},
getInputs = function(...) {
auth$api(path = paste0("tasks/", id, "/inputs"),
method = "GET", ...)
},
get_input = function(...) {
getInputs(...)
},
delete = function(...) {
auth$api(path = paste0("tasks/", id),
method = "DELETE", ...)
},
abort = function(...) {
# turn this into a list
req <- auth$api(path = paste0("tasks/", id, "/actions/abort"),
method = "POST", ...)
# update object
for (nm in .ts) .self$field(nm, req[[nm]])
.asTask(req)
},
monitor = function(time = 30, ...) {
# TODO:
# set hook function
# get hook
t0 <- Sys.time()
message("Monitoring ...")
while (TRUE) {
# get status
d <- tolower(update()$status)
.fun <- getTaskHook(d)
res <- .fun(...)
if (!is.logical(res) || isTRUE(res)) {
break
}
Sys.sleep(time)
}
},
file = function(...) {
auth$file(project = project, origin.task = id, ...)
},
download = function(destfile, ..., method = "curl") {
if (is.null(outputs)) update()
fids <- sapply(outputs, function(x) x$path)
p <- auth$project(id = project)
for (fid in fids) {
fl <- p$file(id = fid)
message("downloading: ", fl$name)
fl$download(destfile, ..., method = method)
}
},
run = function(...) {
# turn this into a list
req <- auth$api(path = paste0("tasks/", id, "/actions/run"),
method = "POST", ...)
# update object
for (nm in .ts) {
.self$field(nm, req[[nm]])
}
.asTask(req)
},
show = function() {
.showFields(.self, "== Task ==", .ts)
}
))
.asTask <- function(x) {
res <- do.call(Task, x)
res$response <- response(x)
res
}
TaskList <- setListClass("Task", contains = "Item0")
.asTaskList <- function(x) {
obj <- TaskList(lapply(x$items, .asTask))
obj@href <- x$href
obj@response <- response(x)
obj
}
# Hook
TaskHook <- setRefClass("TaskHook",
fields = list(
queued = "function",
draft = "function",
running = "function",
completed = "function",
aborted = "function",
failed = "function"),
methods = list(
initialize = function(queued = NULL,
draft = NULL,
running = NULL,
completed = NULL,
aborted = NULL,
failed = NULL, ...) {
if (is.null(completed)) {
completed <<- function(...) {
cat("\r", "completed")
return(TRUE)
}
}
if (is.null(queued)) {
queued <<- function(...) {
cat("\r", "queued")
return(FALSE)
}
}
if (is.null(draft)) {
draft <<- function(....) {
# should not happen in a running task
message("draft")
return(FALSE)
}
}
if (is.null(running)) {
running <<- function(...) {
cat("\r", "running ...")
return(FALSE)
}
}
if (is.null(aborted)) {
aborted <<- function(...) {
message("aborted")
return(TRUE)
}
}
if (is.null(failed)) {
failed <<- function(...) {
cat("\r", "failed")
return(TRUE)
}
}
},
setHook = function(status = c("queued", "draft", "running",
"completed", "aborted", "failed"), fun) {
stopifnot(is.function(fun))
status <- match.arg(status)
.self$field(status, fun)
},
getHook = function(status = c("queued", "draft", "running",
"completed", "aborted", "failed")) {
status <- match.arg(status)
.self[[status]]
}
))
#' set task function hook
#'
#' set task function hook according to
#'
#' @param status one of "queued", "draft", "running",
#' "completed", "aborted", or "failed".
#' @param fun function it must return a TRUE or FALSE in the end of
#' function body, when it's TRUE this function will also terminate
#' monitor process, if FALSE, function called, but not going
#' to terminate task monitoring process.
#'
#' @rdname TaskHook
#' @return object from setHook and getHook.
#' @export setTaskHook
#' @examples
#' getTaskHook("completed")
#' setTaskHook("completed", function() {
#' message("completed")
#' return(TRUE)
#' })
setTaskHook = function(status = c("queued", "draft", "running",
"completed", "aborted", "failed"), fun) {
status <- match.arg(status)
stopifnot(is.function(fun))
options("sevenbridges")$sevenbridges$taskhook$setHook(status, fun)
}
#' @rdname TaskHook
#' @export getTaskHook
getTaskHook = function(status = c("queued", "draft", "running",
"completed", "aborted", "failed")) {
status <- match.arg(status)
options("sevenbridges")$sevenbridges$taskhook$getHook(status)
}
#' @rdname delete-methods
#' @aliases delete,Task-method
setMethod("delete", "Task", function(obj) {
obj$delete()
})
setGeneric("asTaskInput", function(object) standardGeneric("asTaskInput"))
setMethod("asTaskInput", "Files", function(object) {
list(class = unbox("File"),
path = unbox(object$id),
name = unbox(object$name))
})
setMethod("asTaskInput", "FilesList", function(object) {
lapply(object, function(x){
asTaskInput(x)
})
})
setMethod("asTaskInput", "list", function(object) {
id.file <- sapply(object, is, "Files")
id.lst <- sapply(object, is, "FilesList")
if (sum(id.file)) {
res.f <- object[id.file]
} else {
res.f <- NULL
}
if (sum(id.lst)) {
res.l <- object[id.lst]
res.l <- do.call(c, lapply(object[id.lst], function(x) {
# x here is FilesList
lapply(x, function(x) x)
# return a pure list
}))
} else {
res.l <- NULL
}
res <- c(res.f, res.l)
if (length(res)) {
return(asTaskInput(FilesList(res)))
} else {
stop("Not every list entries are Files or FilesList object")
}
})
setMethod("asTaskInput", "ANY", function(object) {
object
})
#' batch function for task batch execution
#'
#' batch function for task batch execution
#'
#' @param input character, ID of the input on which you wish to batch on.
#' You would usually batch on the input containing a list of files.
#' If left out, default batching criteria defined in the app is used.
#' @param criteria a character vector, for example.
#' \code{c("metadata.sample_id", "metadata.library_id")}. The meaning of the
#' above batch_by dictionary is - group inputs (usually files) first on sample
#' ID and then on library ID. If NULL, using type "ITEM" by default.
#' @param type Criteria on which to batch on - can be in two formats."ITEM" and
#' "CRITERIA". If you wish to batch per item in the input (usually a file)
#' using "ITEM". If you wish a more complex criteria, specify the "CRITERIA"
#' on which you wish to group inputs on. Please check examples.
#' @return a list of 'batch_input' and 'batch_by' used for task batch
#' @export batch
#' @examples
#' batch(input = "fastq") # by ITEM
#' batch(input = "fastq", c("metadata.sample_id", "metadata.library_id"))
#' # shorthand for this
#' batch(input = "fastq", c("metadata.sample_id", "metadata.library_id"), type = "CRITERIA")
batch = function(input = NULL,
criteria = NULL,
type = c("ITEM", "CRITERIA")) {
if (is.null(input)) stop("Please specify the input id")
type = match.arg(type)
if (is.null(criteria)) {
if (type == "CRITERIA") {
stop("Please provide cretieria, for example c(\"metadata.sample_id\")")
}
} else {
if (type == "ITEM") {
message("criteria provided, convert type from ITEM to CRITERIA")
}
type = "CRITERIA"
}
if (length(criteria) == 1) criteria = list(criteria)
switch(type,
ITEM = {
res = list(type = "ITEM")
},
CRITERIA = {
if (is.null(criteria)) {
} else {
res = list(
type ="CRITERIA",
criteria = criteria
)
}
})
c(list(batch_input = input), list(batch_by = res))
}
|
/R/class-task.R
|
permissive
|
mlrdk/sevenbridges-r
|
R
| false
| false
| 15,403
|
r
|
.ts <- c("id", "name", "description", "status", "app", "type",
"created_by", "created_time", "executed_by", "start_time", "end_time",
"execution_status", "price", "inputs", "outputs", "project",
"batch", "batch_input", "batch_by", "parent", "batch_group",
"errors", "warnings")
Task <- setRefClass("Task", contains = "Item",
fields = list(id = "characterORNULL",
name = "characterORNULL",
description = "characterORNULL",
status = "characterORNULL",
app = "characterORNULL",
type = "characterORNULL",
created_by = "characterORNULL",
created_time = "characterORNULL",
executed_by = "characterORNULL",
start_time = "characterORNULL",
end_time = "characterORNULL",
execution_status = "listORNULL",
price = "listORNULL",
inputs = "listORNULL",
outputs = "listORNULL",
project = "characterORNULL",
batch = "logicalORNULL",
batch_input = "characterORNULL",
batch_by = "listORNULL",
parent = "characterORNULL",
batch_group = "listORNULL",
errors = "listORNULL",
warnings = "listORNULL"),
methods = list(
# initialize = function(execution_status = NULL, ...) {
# if (!is.null(execution_status)) {
# .self$execution_status <<- do.call(EStatus, execution_status)
# }
# callSuper(...)
# },
update = function(name = NULL,
description = NULL,
inputs = NULL, ...) {
if (is.null(name) && is.null(description) && !is.null(inputs)) {
res = auth$api(path = paste0("tasks/", id, "/inputs"),
body = inputs, method = "PATCH", ...)
return(update())
}
body = list(name = name,
description = description,
inputs = inputs)
if (all(sapply(body, is.null))) {
res = auth$api(path = paste0("tasks/", id),
method = "GET", ...)
} else {
res = auth$api(path = paste0("tasks/", id),
body = body, method = "PATCH",
...)
}
# update object
for (nm in .ts) .self$field(nm, res[[nm]])
.asTask(res)
},
getInputs = function(...) {
auth$api(path = paste0("tasks/", id, "/inputs"),
method = "GET", ...)
},
get_input = function(...) {
getInputs(...)
},
delete = function(...) {
auth$api(path = paste0("tasks/", id),
method = "DELETE", ...)
},
abort = function(...) {
# turn this into a list
req <- auth$api(path = paste0("tasks/", id, "/actions/abort"),
method = "POST", ...)
# update object
for (nm in .ts) .self$field(nm, req[[nm]])
.asTask(req)
},
monitor = function(time = 30, ...) {
# TODO:
# set hook function
# get hook
t0 <- Sys.time()
message("Monitoring ...")
while (TRUE) {
# get status
d <- tolower(update()$status)
.fun <- getTaskHook(d)
res <- .fun(...)
if (!is.logical(res) || isTRUE(res)) {
break
}
Sys.sleep(time)
}
},
file = function(...) {
auth$file(project = project, origin.task = id, ...)
},
download = function(destfile, ..., method = "curl") {
if (is.null(outputs)) update()
fids <- sapply(outputs, function(x) x$path)
p <- auth$project(id = project)
for (fid in fids) {
fl <- p$file(id = fid)
message("downloading: ", fl$name)
fl$download(destfile, ..., method = method)
}
},
run = function(...) {
# turn this into a list
req <- auth$api(path = paste0("tasks/", id, "/actions/run"),
method = "POST", ...)
# update object
for (nm in .ts) {
.self$field(nm, req[[nm]])
}
.asTask(req)
},
show = function() {
.showFields(.self, "== Task ==", .ts)
}
))
.asTask <- function(x) {
res <- do.call(Task, x)
res$response <- response(x)
res
}
TaskList <- setListClass("Task", contains = "Item0")
.asTaskList <- function(x) {
obj <- TaskList(lapply(x$items, .asTask))
obj@href <- x$href
obj@response <- response(x)
obj
}
# Hook
TaskHook <- setRefClass("TaskHook",
fields = list(
queued = "function",
draft = "function",
running = "function",
completed = "function",
aborted = "function",
failed = "function"),
methods = list(
initialize = function(queued = NULL,
draft = NULL,
running = NULL,
completed = NULL,
aborted = NULL,
failed = NULL, ...) {
if (is.null(completed)) {
completed <<- function(...) {
cat("\r", "completed")
return(TRUE)
}
}
if (is.null(queued)) {
queued <<- function(...) {
cat("\r", "queued")
return(FALSE)
}
}
if (is.null(draft)) {
draft <<- function(....) {
# should not happen in a running task
message("draft")
return(FALSE)
}
}
if (is.null(running)) {
running <<- function(...) {
cat("\r", "running ...")
return(FALSE)
}
}
if (is.null(aborted)) {
aborted <<- function(...) {
message("aborted")
return(TRUE)
}
}
if (is.null(failed)) {
failed <<- function(...) {
cat("\r", "failed")
return(TRUE)
}
}
},
setHook = function(status = c("queued", "draft", "running",
"completed", "aborted", "failed"), fun) {
stopifnot(is.function(fun))
status <- match.arg(status)
.self$field(status, fun)
},
getHook = function(status = c("queued", "draft", "running",
"completed", "aborted", "failed")) {
status <- match.arg(status)
.self[[status]]
}
))
#' set task function hook
#'
#' set task function hook according to
#'
#' @param status one of "queued", "draft", "running",
#' "completed", "aborted", or "failed".
#' @param fun function it must return a TRUE or FALSE in the end of
#' function body, when it's TRUE this function will also terminate
#' monitor process, if FALSE, function called, but not going
#' to terminate task monitoring process.
#'
#' @rdname TaskHook
#' @return object from setHook and getHook.
#' @export setTaskHook
#' @examples
#' getTaskHook("completed")
#' setTaskHook("completed", function() {
#' message("completed")
#' return(TRUE)
#' })
setTaskHook = function(status = c("queued", "draft", "running",
"completed", "aborted", "failed"), fun) {
status <- match.arg(status)
stopifnot(is.function(fun))
options("sevenbridges")$sevenbridges$taskhook$setHook(status, fun)
}
#' @rdname TaskHook
#' @export getTaskHook
getTaskHook = function(status = c("queued", "draft", "running",
"completed", "aborted", "failed")) {
status <- match.arg(status)
options("sevenbridges")$sevenbridges$taskhook$getHook(status)
}
#' @rdname delete-methods
#' @aliases delete,Task-method
setMethod("delete", "Task", function(obj) {
obj$delete()
})
setGeneric("asTaskInput", function(object) standardGeneric("asTaskInput"))
setMethod("asTaskInput", "Files", function(object) {
list(class = unbox("File"),
path = unbox(object$id),
name = unbox(object$name))
})
setMethod("asTaskInput", "FilesList", function(object) {
lapply(object, function(x){
asTaskInput(x)
})
})
setMethod("asTaskInput", "list", function(object) {
id.file <- sapply(object, is, "Files")
id.lst <- sapply(object, is, "FilesList")
if (sum(id.file)) {
res.f <- object[id.file]
} else {
res.f <- NULL
}
if (sum(id.lst)) {
res.l <- object[id.lst]
res.l <- do.call(c, lapply(object[id.lst], function(x) {
# x here is FilesList
lapply(x, function(x) x)
# return a pure list
}))
} else {
res.l <- NULL
}
res <- c(res.f, res.l)
if (length(res)) {
return(asTaskInput(FilesList(res)))
} else {
stop("Not every list entries are Files or FilesList object")
}
})
setMethod("asTaskInput", "ANY", function(object) {
object
})
#' batch function for task batch execution
#'
#' batch function for task batch execution
#'
#' @param input character, ID of the input on which you wish to batch on.
#' You would usually batch on the input containing a list of files.
#' If left out, default batching criteria defined in the app is used.
#' @param criteria a character vector, for example.
#' \code{c("metadata.sample_id", "metadata.library_id")}. The meaning of the
#' above batch_by dictionary is - group inputs (usually files) first on sample
#' ID and then on library ID. If NULL, using type "ITEM" by default.
#' @param type Criteria on which to batch on - can be in two formats."ITEM" and
#' "CRITERIA". If you wish to batch per item in the input (usually a file)
#' using "ITEM". If you wish a more complex criteria, specify the "CRITERIA"
#' on which you wish to group inputs on. Please check examples.
#' @return a list of 'batch_input' and 'batch_by' used for task batch
#' @export batch
#' @examples
#' batch(input = "fastq") # by ITEM
#' batch(input = "fastq", c("metadata.sample_id", "metadata.library_id"))
#' # shorthand for this
#' batch(input = "fastq", c("metadata.sample_id", "metadata.library_id"), type = "CRITERIA")
batch = function(input = NULL,
criteria = NULL,
type = c("ITEM", "CRITERIA")) {
if (is.null(input)) stop("Please specify the input id")
type = match.arg(type)
if (is.null(criteria)) {
if (type == "CRITERIA") {
stop("Please provide cretieria, for example c(\"metadata.sample_id\")")
}
} else {
if (type == "ITEM") {
message("criteria provided, convert type from ITEM to CRITERIA")
}
type = "CRITERIA"
}
if (length(criteria) == 1) criteria = list(criteria)
switch(type,
ITEM = {
res = list(type = "ITEM")
},
CRITERIA = {
if (is.null(criteria)) {
} else {
res = list(
type ="CRITERIA",
criteria = criteria
)
}
})
c(list(batch_input = input), list(batch_by = res))
}
|
#' Community detection, label propagation
#'
#' An implementation of community detection by label propagation in an undirected weighted graph based on
#' Raghavan, Albert, Kumara. Phys Rev E 76, 036106 (2007)
#'
#' @param unique_edges a data frame with columns a, b, weight representing the connections between nodes.
#' We assume undirected graph, and therefore b < a.
#' @param async_prop proportion of nodes to update before synchronous update
#' @param check_unique whether to check edges data frame for uniqueness
#'
#' @return a data frame with two columns,
#' node--node id (taken from a, b of input) and
#' label--unique cluster/community ID.
#' @import dplyr
#' @import futile.logger
#' @author Yuriy Sverchkov
#' @export
inferCommunitiesLP <- function( unique_edges, async_prop = .5, check_unique = F ){
# For this algorithm it's more convenient to just have all edges listed twice
flog.trace( "Converting distinct edges to bidirectional edges..." )
edges <- union_all( select( unique_edges, src = a, tgt = b, weight ),
select( unique_edges, src = b, tgt = a, weight ) )
if ( check_unique ) {
flog.trace( "Making sure edges are unique..." )
edges <- edges %>% distinct( src, tgt, .keep_all = T )
}
# Create node table and initialize label table
flog.trace( "Making node table..." )
nodes <- distinct( edges, node = src ) %>% mutate( label = node )
nodes_array <- nodes$node
repeat {
flog.trace( "Label propagation: Number of communities: %s.", nrow( distinct( nodes, label ) ) )
# Select first batch of nodes to update
first_batch <- nodes %>% select( node ) %>% sample_frac(async_prop )
# Propagate votes from first batch
first_batch_votes <- edges %>%
right_join( first_batch, by = c( "tgt" = "node" ) ) %>%
voteForLabelPropagation( nodes ) %>%
sample_n( 1 ) %>% ungroup()
# Update nodes
nodes <- left_join( nodes, first_batch_votes, by = "node" ) %>%
mutate( label = if_else( is.na( new_label ), label, new_label ) ) %>%
select( node, label )
# Get votes from all
votes <- voteForLabelPropagation( edges, nodes )
# Check whether we're done
checks <- votes %>% ungroup() %>%
left_join( nodes, by = "node" ) %>%
group_by( node ) %>%
summarize( concensus = any( label == new_label ) ) %>%
ungroup() %>%
summarize( done = all( concensus ) )
if ( checks$done ) break;
# Propagate votes from all
nodes <- votes %>%
sample_n( 1 ) %>%
ungroup() %>%
select( node, label = new_label )
}
return ( nodes )
}
|
/R/inferCommunitiesLP.R
|
permissive
|
sverchkov/CommunityInference
|
R
| false
| false
| 2,609
|
r
|
#' Community detection, label propagation
#'
#' An implementation of community detection by label propagation in an undirected weighted graph based on
#' Raghavan, Albert, Kumara. Phys Rev E 76, 036106 (2007)
#'
#' @param unique_edges a data frame with columns a, b, weight representing the connections between nodes.
#' We assume undirected graph, and therefore b < a.
#' @param async_prop proportion of nodes to update before synchronous update
#' @param check_unique whether to check edges data frame for uniqueness
#'
#' @return a data frame with two columns,
#' node--node id (taken from a, b of input) and
#' label--unique cluster/community ID.
#' @import dplyr
#' @import futile.logger
#' @author Yuriy Sverchkov
#' @export
inferCommunitiesLP <- function( unique_edges, async_prop = .5, check_unique = F ){
# For this algorithm it's more convenient to just have all edges listed twice
flog.trace( "Converting distinct edges to bidirectional edges..." )
edges <- union_all( select( unique_edges, src = a, tgt = b, weight ),
select( unique_edges, src = b, tgt = a, weight ) )
if ( check_unique ) {
flog.trace( "Making sure edges are unique..." )
edges <- edges %>% distinct( src, tgt, .keep_all = T )
}
# Create node table and initialize label table
flog.trace( "Making node table..." )
nodes <- distinct( edges, node = src ) %>% mutate( label = node )
nodes_array <- nodes$node
repeat {
flog.trace( "Label propagation: Number of communities: %s.", nrow( distinct( nodes, label ) ) )
# Select first batch of nodes to update
first_batch <- nodes %>% select( node ) %>% sample_frac(async_prop )
# Propagate votes from first batch
first_batch_votes <- edges %>%
right_join( first_batch, by = c( "tgt" = "node" ) ) %>%
voteForLabelPropagation( nodes ) %>%
sample_n( 1 ) %>% ungroup()
# Update nodes
nodes <- left_join( nodes, first_batch_votes, by = "node" ) %>%
mutate( label = if_else( is.na( new_label ), label, new_label ) ) %>%
select( node, label )
# Get votes from all
votes <- voteForLabelPropagation( edges, nodes )
# Check whether we're done
checks <- votes %>% ungroup() %>%
left_join( nodes, by = "node" ) %>%
group_by( node ) %>%
summarize( concensus = any( label == new_label ) ) %>%
ungroup() %>%
summarize( done = all( concensus ) )
if ( checks$done ) break;
# Propagate votes from all
nodes <- votes %>%
sample_n( 1 ) %>%
ungroup() %>%
select( node, label = new_label )
}
return ( nodes )
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/arrange_xdf.R
\name{arrange_.RxFileData}
\alias{arrange}
\alias{arrange_}
\alias{arrange_.RxFileData}
\title{Arrange the rows in an Xdf file}
\usage{
\method{arrange_}{RxFileData}(.data, ..., .outFile, .rxArgs, .dots)
}
\arguments{
\item{...}{List of unquoted variable names. Use \code{desc} to sort in descending order.}
\item{.outFile}{Output format for the returned data. If not supplied, create an xdf tbl; if \code{NULL}, return a data frame; if a character string naming a file, save an Xdf file at that location.}
\item{.rxArgs}{A list of RevoScaleR arguments. See \code{\link{rxArgs}} for details.}
\item{.dots}{Used to work around non-standard evaluation. See the dplyr documentation for details.}
\item{data}{An Xdf data source, tbl, or other RevoScaleR file data source.}
}
\value{
An object representing the sorted data. This depends on the \code{.outFile} argument: if missing, it will be an xdf tbl object; if \code{NULL}, a data frame; and if a filename, an Xdf data source referencing a file saved to that location.
}
\description{
Arrange the rows in an Xdf file
}
\details{
The underlying RevoScaleR function is \code{rxSort}. This has many sorting options, including removing duplicated keys, adding a column of frequency counts, and so on.
}
\seealso{
\code{\link{rxSort}}, \code{\link[dplyr]{arrange}} in package dplyr
}
|
/man/arrange.Rd
|
no_license
|
yueguoguo/dplyrXdf
|
R
| false
| true
| 1,425
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/arrange_xdf.R
\name{arrange_.RxFileData}
\alias{arrange}
\alias{arrange_}
\alias{arrange_.RxFileData}
\title{Arrange the rows in an Xdf file}
\usage{
\method{arrange_}{RxFileData}(.data, ..., .outFile, .rxArgs, .dots)
}
\arguments{
\item{...}{List of unquoted variable names. Use \code{desc} to sort in descending order.}
\item{.outFile}{Output format for the returned data. If not supplied, create an xdf tbl; if \code{NULL}, return a data frame; if a character string naming a file, save an Xdf file at that location.}
\item{.rxArgs}{A list of RevoScaleR arguments. See \code{\link{rxArgs}} for details.}
\item{.dots}{Used to work around non-standard evaluation. See the dplyr documentation for details.}
\item{data}{An Xdf data source, tbl, or other RevoScaleR file data source.}
}
\value{
An object representing the sorted data. This depends on the \code{.outFile} argument: if missing, it will be an xdf tbl object; if \code{NULL}, a data frame; and if a filename, an Xdf data source referencing a file saved to that location.
}
\description{
Arrange the rows in an Xdf file
}
\details{
The underlying RevoScaleR function is \code{rxSort}. This has many sorting options, including removing duplicated keys, adding a column of frequency counts, and so on.
}
\seealso{
\code{\link{rxSort}}, \code{\link[dplyr]{arrange}} in package dplyr
}
|
#### filter for values, which are Inf (infinite)
example.data <- c("1", "2", "3", "Hello", "5")
# create Nas
example.data <- as.numeric(example.data)
# find NAs
is.na(example.data)
# find non NAs
!is.na(example.data)
|
/0_Code_sniplets/Filter/Find_NAs.R
|
no_license
|
DrBuschle/R-knowledge
|
R
| false
| false
| 225
|
r
|
#### filter for values, which are Inf (infinite)
example.data <- c("1", "2", "3", "Hello", "5")
# create Nas
example.data <- as.numeric(example.data)
# find NAs
is.na(example.data)
# find non NAs
!is.na(example.data)
|
####################################################################################################
### The single server simulator system
#
# - to be used with the discrete event simulator
# - analogous to how the montyHall system was used by the monte carlo simulator
#
#
### To use the single server simulator system once in the discrete event simulator (in full verbose mode),
### enter the following function in the console
#
# results <- discreteEventSimulator(new.singleServerSystem(), endTime = 100 , verbose = TRUE)
# print.stats(results)
#
### To use the single server simulator system in the discrete event simulator repeatedly,say 2000 times.
# call the above in a for loop, storing the results in a list. or use the simulation.replications()
# function as in the example below (wich uses reps = 15). In either case the results will be a list
# of stats environments that can be printed with print.stats()).
# Below is an example with a rep = 15.
#
# singleServerSimulator <- function(){
# discreteEventSimulator(new.singleServerSystem(), endTime = 100)
# }
#
# results <- simulation.replications(15, singleServerSimulator)
#
# for(i in 1:length(results)){
# print.stats(results[[i]], header = paste("i=", i, ": ", sep=""), after = "\n")
# }
#
######################################################################################
# The single server system
#
# The simulator system for the simple single-server simulator from Chapter 3 of the text book
new.singleServerSystem <- function(){
new.discreteEventSystem(firstEvent.name = "A", measure = identity,
state = new.state(Ls = 0, Lq = 0),
stats = new.stats(N = 0, NW = 0, WS = 0, WQ = 0, T0 = 0, Tmax = 0),
updateFn.stats.default = base.statsUpdate,
updateFns = new.updateFns(A = c(arrival.eventUpdate, arrival.statsUpdate),
D = departure.eventUpdate,
E = c(end.eventUpdate, end.statsUpdate)))
}
### The random variables used in the simulator - interArrivalTime and serviceTime
create.interArrivalTime <- function(){
sample(1:8, 1, replace = TRUE)
}
create.serviceTime <- function(){
sample(3:12, 1, replace = TRUE)
}
### The state used for the single server system and accessor functions
#
# Event State (inside the state environment)
# Ls = number of customers with servers ( Ls == 0 or Ls == 1 for a single server system)
# Lq = number of customers waiting
queueIsEmpty <- function(state){
state$Lq == 0
}
incQueue <- function(state){
state$Lq <- state$Lq + 1
}
decQueue <- function(state){
state$Lq <- state$Lq - 1
}
serverIsIdle <- function(state){
state$Ls == 0
}
serverIsBusy <- function(state){
!serverIsIdle(state)
}
serverBecomesBusy <- function(state){
state$Ls <- state$Ls + 1
}
serverBecomesFree <- function(state){
state$Ls <- 0
}
#### Scheduling future events: after creation they are given to the scheduler to be placed on the FEL
schedule.arrivalEvent <- function(scheduler){
schedule.futureEvent(scheduler, "A", create.interArrivalTime())
}
schedule.departureEvent <- function(scheduler){
schedule.futureEvent(scheduler, "D", create.serviceTime())
}
### The event update routines -- must return a updateStatsInfo object
### (using a new.singleServer.updateStatsInfo(state) call)
# arrival.eventUpdate with full comments describing when state is updated and when events are generated
# - this has been commented out
# - a more condensed version follows
#
# arrival.eventUpdate <- function(state, scheduler, verbose){
# # process the new customer who just arrived with this arrival event
# if(serverIsBusy(state)){ # if server is busy, have the new customer join the line
# # update states: two states - qLength and serverBusy
# # do not need to update server as server remains busy
# incQueue(state)
#
# # Generate Events
# # - no events to generate
# }else{ # else server can take the new customer immediately
# # update states: two states - qLength and serverBusy
# # do not need to update queue as queue remains empty
# serverBecomesBusy(state)
#
# # Generate Events
# # - new departure event created for when server will finish serving the new customer
# schedule.departureEvent(scheduler)
# }
#
# # Generate next arrival event
# # when one arrival occurs, next arrival generated and scheduled
# schedule.arrivalEvent(scheduler)
#
# print.all.scheduler(scheduler, verbose, after = "\n")
# print.state(state, verbose, header = " ", after = '\n')
# }
arrival.eventUpdate <- function(state, scheduler, verbose){
if(serverIsBusy(state)){ # if server is busy, have the new customer join the line
incQueue(state)
} else { # else server can take the new customer immediately
serverBecomesBusy(state)
schedule.departureEvent(scheduler)
}
# Generate next arrival event when one arrival occurs, next arrival generated and scheduled
schedule.arrivalEvent(scheduler)
print.all.scheduler(scheduler, verbose, after = "\n")
print.state(state, verbose, header = " ", after = '\n')
}
# departure.eventUpdate with full comments describing when state is updated and when events are generated
# - this has been commented out
# - a more condensed version follows
#
# departure.eventUpdate <- function(state, scheduler, verbose){
# # process the server as the customer has now left
# # (the customer does not have to be considered as the customer has exited the system)
# if(queueIsEmpty(state)){ # if no new customer, server is idle
# # update states: two states - qLength and serverBusy
# # do not need to update queue as queue remains empty
# serverBecomesFree(state)
#
# # Generate Events
# # - no events to generate
# }else{ # else server takes a new customer from queue
# # update states: two states - qLength and serverBusy
# # do not need to update server as server remains busy
# decQueue(state) # queue shrinks by 1 since customer taken from queue
#
# # Generate Events
# # - since new customer taken from queue, new departure event created
# # for when server will finish serving the new customer
# schedule.departureEvent(scheduler)
# }
#
# print.all.scheduler(scheduler, verbose, after = "\n")
# print.state(state, verbose, header = " ", after = '\n')
# }
departure.eventUpdate <- function(state, scheduler, verbose){
if(queueIsEmpty(state)){ # if no new customer, server is idle
serverBecomesFree(state)
} else { # else server takes a new customer from queue and schedules a future departure
decQueue(state)
schedule.departureEvent(scheduler)
}
print.all.scheduler(scheduler, verbose, after = "\n")
print.state(state, verbose, header = " ", after = '\n')
}
# end.eventUpdate with full comments describing when state is updated and when events are generated
# - this has been commented out
# - a more condensed version follows
#
# end.eventUpdate <- function(state, scheduler, verbose){
# # Must be associated with event name 'E'
#
# # Can include state cleanup routines - not needed here
# # Will not schedule any new events as they would not be run - the simulation has ended
#
# # The 'E' event need not be written (not even this stub) if cleanup is not needed. System automatically creates one.
# # In this case the default statsUpdate is used for the 'E' statsUpdate
#
# # If a different 'E' statsUpdate is required, but no cleanup state change is need,
# # - this stub must be "written" and associated with the 'E' event along with the statsUpdate function
# # - this is the case here, see the statsUpdate section for details
#
# print.all.scheduler(scheduler, verbose, after = "\n")
# print.state(state, verbose, header = " ", after = '\n')
# }
end.eventUpdate <- function(state, scheduler, verbose){
print.all.scheduler(scheduler, verbose, after = "\n")
print.state(state, verbose, header = " ", after = '\n')
}
### Statistics section
# Inside the stats environment
#
# Basic Data
# N = Total Number of customers
# NW = Total Number of customers who have to queue
# WS = total service time
# WQ = total wait time
# T0 = total idle time (since this is a single server system)
# Tmax = total clock time
#
# Event State (a copy of the current state of the simulation is automatically added to stats)
# Ls = number of customers with servers ( Ls == 0 or Ls == 1 for a single server system)
# Lq = number of customers waiting
#
# Event Duration (i.e. time between the current event and the next event to be processed)
# timeDiff = current event duration
base.statsUpdate <- function(stats, verbose){ # this is the default stats update
with(stats, {
WS <- WS + timeDiff * Ls
WQ <- WQ + timeDiff * Lq
if(Ls == 0)
T0 <- T0 + timeDiff
Tmax <- Tmax + timeDiff
})
print.stats(stats, verbose, header = " ", after = "\n\n")
}
arrival.statsUpdate <- function(stats, verbose){
with(stats, {
N <- N + 1
if(Lq > 0) {
NW <- NW + 1
}
})
# the base statistics update also needs to be run
base.statsUpdate(stats, verbose)
}
end.statsUpdate <- function(stats, verbose){
# There are no updates as an 'end' event has no effect
# However we can't use the updateFn.stats.default as that runs the base.statsUpdate,
# which would compute stats based on a duration from the end evnet to the next scheduled event
# that will never happen and so produce incorrect statistics
# Consequently we need an explicity end.statsUpdate that actively does "no updates"
# We will print the final stats if in 'verbose' mode
print.stats(stats, verbose, header = " ", after = "\n")
}
#### Print Section - for debugging - to be used in the update routines created in this file
print.state <- function(state, verbose = TRUE, header = "", after = "") {
if(verbose){
cat(header)
cat("state(LS = ", state$Ls, ", LQ = ", state$Lq, sep = "")
cat(")")
cat(after)
}
}
print.stats <- function(stats, verbose = TRUE, header = "", after = "") {
if(verbose){
cat(header)
with(stats, {
cat("stats(N = ", N, ", NW = ", NW, ", WS = ", WS, ", WQ = ", WQ, ", T0 = ", T0, sep = "")
cat(", Tmax = ", Tmax, sep = "")
})
cat(")")
cat(after)
}
}
print.all.scheduler <- function(scheduler, verbose, header = "", after = ""){
if(verbose){
cat(header)
print.clock(scheduler, header = "Clock = ")
print.nextEvent.clockTime(scheduler, header = " -> ")
print.time.to.nextEvent(scheduler, header = " (timeDiff = ", after = ")")
print.currentEvent.name(scheduler, header = "\n Event = ")
print.nextEvent.name(scheduler, header = " (Next = ", after = ")\n")
print.eventsScheduledThisUpdate(scheduler, header = " Future events = ")
cat(after)
}
}
|
/2460/DES/singleServerSystem.R
|
no_license
|
MrRobot245/Cis-2460
|
R
| false
| false
| 11,297
|
r
|
####################################################################################################
### The single server simulator system
#
# - to be used with the discrete event simulator
# - analogous to how the montyHall system was used by the monte carlo simulator
#
#
### To use the single server simulator system once in the discrete event simulator (in full verbose mode),
### enter the following function in the console
#
# results <- discreteEventSimulator(new.singleServerSystem(), endTime = 100 , verbose = TRUE)
# print.stats(results)
#
### To use the single server simulator system in the discrete event simulator repeatedly,say 2000 times.
# call the above in a for loop, storing the results in a list. or use the simulation.replications()
# function as in the example below (wich uses reps = 15). In either case the results will be a list
# of stats environments that can be printed with print.stats()).
# Below is an example with a rep = 15.
#
# singleServerSimulator <- function(){
# discreteEventSimulator(new.singleServerSystem(), endTime = 100)
# }
#
# results <- simulation.replications(15, singleServerSimulator)
#
# for(i in 1:length(results)){
# print.stats(results[[i]], header = paste("i=", i, ": ", sep=""), after = "\n")
# }
#
######################################################################################
# The single server system
#
# The simulator system for the simple single-server simulator from Chapter 3 of the text book
new.singleServerSystem <- function(){
new.discreteEventSystem(firstEvent.name = "A", measure = identity,
state = new.state(Ls = 0, Lq = 0),
stats = new.stats(N = 0, NW = 0, WS = 0, WQ = 0, T0 = 0, Tmax = 0),
updateFn.stats.default = base.statsUpdate,
updateFns = new.updateFns(A = c(arrival.eventUpdate, arrival.statsUpdate),
D = departure.eventUpdate,
E = c(end.eventUpdate, end.statsUpdate)))
}
### The random variables used in the simulator - interArrivalTime and serviceTime
create.interArrivalTime <- function(){
sample(1:8, 1, replace = TRUE)
}
create.serviceTime <- function(){
sample(3:12, 1, replace = TRUE)
}
### The state used for the single server system and accessor functions
#
# Event State (inside the state environment)
# Ls = number of customers with servers ( Ls == 0 or Ls == 1 for a single server system)
# Lq = number of customers waiting
queueIsEmpty <- function(state){
state$Lq == 0
}
incQueue <- function(state){
state$Lq <- state$Lq + 1
}
decQueue <- function(state){
state$Lq <- state$Lq - 1
}
serverIsIdle <- function(state){
state$Ls == 0
}
serverIsBusy <- function(state){
!serverIsIdle(state)
}
serverBecomesBusy <- function(state){
state$Ls <- state$Ls + 1
}
serverBecomesFree <- function(state){
state$Ls <- 0
}
#### Scheduling future events: after creation they are given to the scheduler to be placed on the FEL
schedule.arrivalEvent <- function(scheduler){
schedule.futureEvent(scheduler, "A", create.interArrivalTime())
}
schedule.departureEvent <- function(scheduler){
schedule.futureEvent(scheduler, "D", create.serviceTime())
}
### The event update routines -- must return a updateStatsInfo object
### (using a new.singleServer.updateStatsInfo(state) call)
# arrival.eventUpdate with full comments describing when state is updated and when events are generated
# - this has been commented out
# - a more condensed version follows
#
# arrival.eventUpdate <- function(state, scheduler, verbose){
# # process the new customer who just arrived with this arrival event
# if(serverIsBusy(state)){ # if server is busy, have the new customer join the line
# # update states: two states - qLength and serverBusy
# # do not need to update server as server remains busy
# incQueue(state)
#
# # Generate Events
# # - no events to generate
# }else{ # else server can take the new customer immediately
# # update states: two states - qLength and serverBusy
# # do not need to update queue as queue remains empty
# serverBecomesBusy(state)
#
# # Generate Events
# # - new departure event created for when server will finish serving the new customer
# schedule.departureEvent(scheduler)
# }
#
# # Generate next arrival event
# # when one arrival occurs, next arrival generated and scheduled
# schedule.arrivalEvent(scheduler)
#
# print.all.scheduler(scheduler, verbose, after = "\n")
# print.state(state, verbose, header = " ", after = '\n')
# }
arrival.eventUpdate <- function(state, scheduler, verbose){
if(serverIsBusy(state)){ # if server is busy, have the new customer join the line
incQueue(state)
} else { # else server can take the new customer immediately
serverBecomesBusy(state)
schedule.departureEvent(scheduler)
}
# Generate next arrival event when one arrival occurs, next arrival generated and scheduled
schedule.arrivalEvent(scheduler)
print.all.scheduler(scheduler, verbose, after = "\n")
print.state(state, verbose, header = " ", after = '\n')
}
# departure.eventUpdate with full comments describing when state is updated and when events are generated
# - this has been commented out
# - a more condensed version follows
#
# departure.eventUpdate <- function(state, scheduler, verbose){
# # process the server as the customer has now left
# # (the customer does not have to be considered as the customer has exited the system)
# if(queueIsEmpty(state)){ # if no new customer, server is idle
# # update states: two states - qLength and serverBusy
# # do not need to update queue as queue remains empty
# serverBecomesFree(state)
#
# # Generate Events
# # - no events to generate
# }else{ # else server takes a new customer from queue
# # update states: two states - qLength and serverBusy
# # do not need to update server as server remains busy
# decQueue(state) # queue shrinks by 1 since customer taken from queue
#
# # Generate Events
# # - since new customer taken from queue, new departure event created
# # for when server will finish serving the new customer
# schedule.departureEvent(scheduler)
# }
#
# print.all.scheduler(scheduler, verbose, after = "\n")
# print.state(state, verbose, header = " ", after = '\n')
# }
departure.eventUpdate <- function(state, scheduler, verbose){
if(queueIsEmpty(state)){ # if no new customer, server is idle
serverBecomesFree(state)
} else { # else server takes a new customer from queue and schedules a future departure
decQueue(state)
schedule.departureEvent(scheduler)
}
print.all.scheduler(scheduler, verbose, after = "\n")
print.state(state, verbose, header = " ", after = '\n')
}
# end.eventUpdate with full comments describing when state is updated and when events are generated
# - this has been commented out
# - a more condensed version follows
#
# end.eventUpdate <- function(state, scheduler, verbose){
# # Must be associated with event name 'E'
#
# # Can include state cleanup routines - not needed here
# # Will not schedule any new events as they would not be run - the simulation has ended
#
# # The 'E' event need not be written (not even this stub) if cleanup is not needed. System automatically creates one.
# # In this case the default statsUpdate is used for the 'E' statsUpdate
#
# # If a different 'E' statsUpdate is required, but no cleanup state change is need,
# # - this stub must be "written" and associated with the 'E' event along with the statsUpdate function
# # - this is the case here, see the statsUpdate section for details
#
# print.all.scheduler(scheduler, verbose, after = "\n")
# print.state(state, verbose, header = " ", after = '\n')
# }
end.eventUpdate <- function(state, scheduler, verbose){
print.all.scheduler(scheduler, verbose, after = "\n")
print.state(state, verbose, header = " ", after = '\n')
}
### Statistics section
# Inside the stats environment
#
# Basic Data
# N = Total Number of customers
# NW = Total Number of customers who have to queue
# WS = total service time
# WQ = total wait time
# T0 = total idle time (since this is a single server system)
# Tmax = total clock time
#
# Event State (a copy of the current state of the simulation is automatically added to stats)
# Ls = number of customers with servers ( Ls == 0 or Ls == 1 for a single server system)
# Lq = number of customers waiting
#
# Event Duration (i.e. time between the current event and the next event to be processed)
# timeDiff = current event duration
base.statsUpdate <- function(stats, verbose){ # this is the default stats update
with(stats, {
WS <- WS + timeDiff * Ls
WQ <- WQ + timeDiff * Lq
if(Ls == 0)
T0 <- T0 + timeDiff
Tmax <- Tmax + timeDiff
})
print.stats(stats, verbose, header = " ", after = "\n\n")
}
arrival.statsUpdate <- function(stats, verbose){
with(stats, {
N <- N + 1
if(Lq > 0) {
NW <- NW + 1
}
})
# the base statistics update also needs to be run
base.statsUpdate(stats, verbose)
}
end.statsUpdate <- function(stats, verbose){
# There are no updates as an 'end' event has no effect
# However we can't use the updateFn.stats.default as that runs the base.statsUpdate,
# which would compute stats based on a duration from the end evnet to the next scheduled event
# that will never happen and so produce incorrect statistics
# Consequently we need an explicity end.statsUpdate that actively does "no updates"
# We will print the final stats if in 'verbose' mode
print.stats(stats, verbose, header = " ", after = "\n")
}
#### Print Section - for debugging - to be used in the update routines created in this file
print.state <- function(state, verbose = TRUE, header = "", after = "") {
if(verbose){
cat(header)
cat("state(LS = ", state$Ls, ", LQ = ", state$Lq, sep = "")
cat(")")
cat(after)
}
}
print.stats <- function(stats, verbose = TRUE, header = "", after = "") {
if(verbose){
cat(header)
with(stats, {
cat("stats(N = ", N, ", NW = ", NW, ", WS = ", WS, ", WQ = ", WQ, ", T0 = ", T0, sep = "")
cat(", Tmax = ", Tmax, sep = "")
})
cat(")")
cat(after)
}
}
print.all.scheduler <- function(scheduler, verbose, header = "", after = ""){
if(verbose){
cat(header)
print.clock(scheduler, header = "Clock = ")
print.nextEvent.clockTime(scheduler, header = " -> ")
print.time.to.nextEvent(scheduler, header = " (timeDiff = ", after = ")")
print.currentEvent.name(scheduler, header = "\n Event = ")
print.nextEvent.name(scheduler, header = " (Next = ", after = ")\n")
print.eventsScheduledThisUpdate(scheduler, header = " Future events = ")
cat(after)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sesh.R
\name{read_sesh}
\alias{read_sesh}
\title{Read a saved CSV to see critical package info.}
\usage{
read_sesh(path)
}
\arguments{
\item{path}{Valid path to a sesh saved CSV.}
}
\description{
Read a saved CSV to see critical package info.
}
|
/man/read_sesh.Rd
|
no_license
|
nathancday/sesh
|
R
| false
| true
| 323
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sesh.R
\name{read_sesh}
\alias{read_sesh}
\title{Read a saved CSV to see critical package info.}
\usage{
read_sesh(path)
}
\arguments{
\item{path}{Valid path to a sesh saved CSV.}
}
\description{
Read a saved CSV to see critical package info.
}
|
# General two group model
p <- 2 #Number of baseline covariates
# Set true values of coefficients
b0_or <- rep(0,p+1) # E(X,Y_base)
b1_or <- rep(0,2*p+4) # log(delta) | int, X, lagY, log(A) X*log(A) lagY*log(A)
b2_or <- rep(0,2*p+4) # Y | int, X, lagY, D-6, X*(D-6), lagY*(D-6)
b1_or[5] <- 0.9 # Effect of A on D
b1_or[6] <- 0.1 # Effect of X[1]*A on D
b2_or[1] <- 0.1 # Intercept
b2_or[3] <- 0.2 # Effect of X[2] on Y
b2_or[5] <- 0.2 # Effect of D on Y
b2_or[4] <- 0.9 # Effect of Y[t-1] on Y
b2_or[8] <- 0.02 # Effect of D*Y[t-1] on Y
b0_or <- rbind(b0_or,b0_or)
b1_or <- rbind(b1_or,b1_or)
b2_or <- rbind(b2_or,b2_or)
b0_or[2,1] <- 1
b1_or[2,] <- 0
b1_or[2,1] <- log(5.3)
b2_or[2,2] <- 0.3
b2_or[2,3] <- 0
b2_or[2,5] <- -0.2
b2_or[2,8] <- 0
s0_or <- 0.5+0.5*diag(p+1) # Covariance matrix of (X,Y_base)
tc_or <- t(chol(s0_or))
# Standard deviation of the error term in the compliance model
# and progression model
s1_or <- .1
s2_or <- .5
# Define the four models
prob_or <- c(1,0) # Group assignment probability (single group)
if(type==2){
prob_or <- c(0.8,0.2) # Group assignment probability (mixture groups)
}
n <- 1000 # Sample size
# True value of model parameters
params_or <- list(b0=b0_or,b1=b1_or,b2=b2_or,
tchols0=tc_or,s1=s1_or,s2=s2_or,
prob=prob_or)
|
/binary_covariate/SimDesign.R
|
no_license
|
qianguan/BayesianPolicySearch
|
R
| false
| false
| 1,451
|
r
|
# General two group model
p <- 2 #Number of baseline covariates
# Set true values of coefficients
b0_or <- rep(0,p+1) # E(X,Y_base)
b1_or <- rep(0,2*p+4) # log(delta) | int, X, lagY, log(A) X*log(A) lagY*log(A)
b2_or <- rep(0,2*p+4) # Y | int, X, lagY, D-6, X*(D-6), lagY*(D-6)
b1_or[5] <- 0.9 # Effect of A on D
b1_or[6] <- 0.1 # Effect of X[1]*A on D
b2_or[1] <- 0.1 # Intercept
b2_or[3] <- 0.2 # Effect of X[2] on Y
b2_or[5] <- 0.2 # Effect of D on Y
b2_or[4] <- 0.9 # Effect of Y[t-1] on Y
b2_or[8] <- 0.02 # Effect of D*Y[t-1] on Y
b0_or <- rbind(b0_or,b0_or)
b1_or <- rbind(b1_or,b1_or)
b2_or <- rbind(b2_or,b2_or)
b0_or[2,1] <- 1
b1_or[2,] <- 0
b1_or[2,1] <- log(5.3)
b2_or[2,2] <- 0.3
b2_or[2,3] <- 0
b2_or[2,5] <- -0.2
b2_or[2,8] <- 0
s0_or <- 0.5+0.5*diag(p+1) # Covariance matrix of (X,Y_base)
tc_or <- t(chol(s0_or))
# Standard deviation of the error term in the compliance model
# and progression model
s1_or <- .1
s2_or <- .5
# Define the four models
prob_or <- c(1,0) # Group assignment probability (single group)
if(type==2){
prob_or <- c(0.8,0.2) # Group assignment probability (mixture groups)
}
n <- 1000 # Sample size
# True value of model parameters
params_or <- list(b0=b0_or,b1=b1_or,b2=b2_or,
tchols0=tc_or,s1=s1_or,s2=s2_or,
prob=prob_or)
|
testlist <- list(hi = 0, lo = 9.83190635224081e-322, mu = 0, sig = 0)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result)
|
/gjam/inst/testfiles/tnormRcpp/libFuzzer_tnormRcpp/tnormRcpp_valgrind_files/1610044728-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 126
|
r
|
testlist <- list(hi = 0, lo = 9.83190635224081e-322, mu = 0, sig = 0)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stat_echo.R
\name{stat_echo}
\alias{stat_echo}
\title{Replicate copies of the original data for a blur/echo effect}
\usage{
stat_echo(mapping = NULL, data = NULL, geom = "point",
position = "identity", ..., na.rm = FALSE, n = 3,
alpha_factor = 0.5, size_increment = 1, x_offset = 0,
y_offset = 0, show.legend = NA, inherit.aes = TRUE)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link[=aes]{aes()}} or
\code{\link[=aes_]{aes_()}}. If specified and \code{inherit.aes = TRUE} (the
default), it is combined with the default mapping at the top level of the
plot. You must supply \code{mapping} if there is no plot mapping.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link[=ggplot]{ggplot()}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link[=fortify]{fortify()}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame}, and
will be used as the layer data.}
\item{geom}{The geometric object to use display the data}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{...}{Other arguments passed on to \code{\link[=layer]{layer()}}. These are
often aesthetics, used to set an aesthetic to a fixed value, like
\code{colour = "red"} or \code{size = 3}. They may also be parameters
to the paired geom/stat.}
\item{na.rm}{If \code{FALSE}, the default, missing values are removed with
a warning. If \code{TRUE}, missing values are silently removed.}
\item{n}{number of echoes}
\item{alpha_factor}{multiplication factor for 'alpha' with each echo}
\item{size_increment}{size change with each echo}
\item{x_offset, y_offset}{position offset for each echo}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.
It can also be a named logical vector to finely select the aesthetics to
display.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link[=borders]{borders()}}.}
}
\description{
Replicate copies of the original data for a blur/echo effect
}
|
/man/stat_echo.Rd
|
permissive
|
coolbutuseless/ggecho
|
R
| false
| true
| 2,720
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stat_echo.R
\name{stat_echo}
\alias{stat_echo}
\title{Replicate copies of the original data for a blur/echo effect}
\usage{
stat_echo(mapping = NULL, data = NULL, geom = "point",
position = "identity", ..., na.rm = FALSE, n = 3,
alpha_factor = 0.5, size_increment = 1, x_offset = 0,
y_offset = 0, show.legend = NA, inherit.aes = TRUE)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link[=aes]{aes()}} or
\code{\link[=aes_]{aes_()}}. If specified and \code{inherit.aes = TRUE} (the
default), it is combined with the default mapping at the top level of the
plot. You must supply \code{mapping} if there is no plot mapping.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link[=ggplot]{ggplot()}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link[=fortify]{fortify()}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame}, and
will be used as the layer data.}
\item{geom}{The geometric object to use display the data}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{...}{Other arguments passed on to \code{\link[=layer]{layer()}}. These are
often aesthetics, used to set an aesthetic to a fixed value, like
\code{colour = "red"} or \code{size = 3}. They may also be parameters
to the paired geom/stat.}
\item{na.rm}{If \code{FALSE}, the default, missing values are removed with
a warning. If \code{TRUE}, missing values are silently removed.}
\item{n}{number of echoes}
\item{alpha_factor}{multiplication factor for 'alpha' with each echo}
\item{size_increment}{size change with each echo}
\item{x_offset, y_offset}{position offset for each echo}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.
It can also be a named logical vector to finely select the aesthetics to
display.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link[=borders]{borders()}}.}
}
\description{
Replicate copies of the original data for a blur/echo effect
}
|
## Working directory:
setwd("D:/OneDrive - Inversiones Internacionales Grupo Sura S.A/Argentina/valoración/");options(warn=-1, scipen=100)
rm(list=lsf.str());rm(list=ls(all=TRUE))
## Bloomber directory:
bb_dir <- "X:/SIM/SOLUCIONES/ARGENTINA/input/"
curr_date <- as.Date("31082018", "%d%m%Y")
# curr_date <- Sys.Date() # Fecha actual
#1. PARÁMETROS INICIALES (Librerías, parámetros):-------------------------------------------------------------------------------------------------------------------
source("source/params_inter.R", echo=FALSE)
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
#2.GENERACIÓN CURVAS CERO CUPÓN DE REFERENCIA :----------------------------------------------------------------------------------------------------------------------
source("source/ref_curves.R", echo=FALSE)
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
#3.PUBLICACION RENTA FIJA INTERNACIONAL :----------------------------------------------------------------------------------------------------------------------------
update_curves <- TRUE # Si FALSE, las curvas de valoración son las generadas el día anterior.
source("source/valuation.R", echo=FALSE)
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
/__main__.R
|
no_license
|
veldanie/ProcesoValoracionAR
|
R
| false
| false
| 1,569
|
r
|
## Working directory:
setwd("D:/OneDrive - Inversiones Internacionales Grupo Sura S.A/Argentina/valoración/");options(warn=-1, scipen=100)
rm(list=lsf.str());rm(list=ls(all=TRUE))
## Bloomber directory:
bb_dir <- "X:/SIM/SOLUCIONES/ARGENTINA/input/"
curr_date <- as.Date("31082018", "%d%m%Y")
# curr_date <- Sys.Date() # Fecha actual
#1. PARÁMETROS INICIALES (Librerías, parámetros):-------------------------------------------------------------------------------------------------------------------
source("source/params_inter.R", echo=FALSE)
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
#2.GENERACIÓN CURVAS CERO CUPÓN DE REFERENCIA :----------------------------------------------------------------------------------------------------------------------
source("source/ref_curves.R", echo=FALSE)
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
#3.PUBLICACION RENTA FIJA INTERNACIONAL :----------------------------------------------------------------------------------------------------------------------------
update_curves <- TRUE # Si FALSE, las curvas de valoración son las generadas el día anterior.
source("source/valuation.R", echo=FALSE)
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
frss_apikey <- readLines("resources/frss_apikey.txt") #to be specifid
maintext <- function(inputURL, host="http://frss.schloegl.net/",parsed=TRUE){
library(XML)
library(rjson)
requestURL <- paste(host,"makefulltextfeed.php?key=",frss_apikey,"&format=json&url=", inputURL,sep="")
output <- fromJSON(file=requestURL, method='C')
main <- output$rss$channel
otext <- main$item$description
otitle <- as.character(main$title)
ocreator <- main$item$dc_creator
odate <- main$item$pubDate
#Links
otext_parsed <- htmlParse(otext, asText=TRUE, encoding="UTF-8")
links <- xpathSApply(otext_parsed, "//a/@href")
if(parsed==TRUE){otext <- otext_parsed}
result <- list(Title=otitle, Text=otext,Creator=ocreator,Date=odate, Links=as.vector(links))
return(result)
}
|
/frss_function.R
|
no_license
|
supersambo/r_functions
|
R
| false
| false
| 812
|
r
|
frss_apikey <- readLines("resources/frss_apikey.txt") #to be specifid
maintext <- function(inputURL, host="http://frss.schloegl.net/",parsed=TRUE){
library(XML)
library(rjson)
requestURL <- paste(host,"makefulltextfeed.php?key=",frss_apikey,"&format=json&url=", inputURL,sep="")
output <- fromJSON(file=requestURL, method='C')
main <- output$rss$channel
otext <- main$item$description
otitle <- as.character(main$title)
ocreator <- main$item$dc_creator
odate <- main$item$pubDate
#Links
otext_parsed <- htmlParse(otext, asText=TRUE, encoding="UTF-8")
links <- xpathSApply(otext_parsed, "//a/@href")
if(parsed==TRUE){otext <- otext_parsed}
result <- list(Title=otitle, Text=otext,Creator=ocreator,Date=odate, Links=as.vector(links))
return(result)
}
|
library(rvest)
scrape_sample_annot <- function(gse_id){
gds_search <- rentrez::entrez_search(db="gds", term=paste0(gse_id, "[ACCN] AND gsm[ETYP]"))
search_res <- rentrez::entrez_summary(db="gds", id=gds_search$ids)
res <- lapply(search_res, unlist)
res <- plyr::ldply(res)
gsm_ids <- res$accession
bla <- sapply(gsm_ids, scrape_sample_char)
res <- subset(res, select=c("gse", "accession", "gpl", "ftplink", "title", "summary", SAMPLE_CHARACTERISTICS))
}
scrape_sample_char <- function(gsm_id){
url <- paste0("https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=", sample_id)
webpage <- read_html(url)
chars <- html_nodes(webpage, xpath="//td[.='Characteristics']/following-sibling::td[1]")
chars <- gsub("<br>|\n", " ", chars)
chars <- html_text(xml2::as_xml_document(chars))
chars <- trimws(chars)
return(chars)
}
|
/cemitooldb/R/deprecate.R
|
no_license
|
pedrostrusso/cemitooldb
|
R
| false
| false
| 881
|
r
|
library(rvest)
scrape_sample_annot <- function(gse_id){
gds_search <- rentrez::entrez_search(db="gds", term=paste0(gse_id, "[ACCN] AND gsm[ETYP]"))
search_res <- rentrez::entrez_summary(db="gds", id=gds_search$ids)
res <- lapply(search_res, unlist)
res <- plyr::ldply(res)
gsm_ids <- res$accession
bla <- sapply(gsm_ids, scrape_sample_char)
res <- subset(res, select=c("gse", "accession", "gpl", "ftplink", "title", "summary", SAMPLE_CHARACTERISTICS))
}
scrape_sample_char <- function(gsm_id){
url <- paste0("https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=", sample_id)
webpage <- read_html(url)
chars <- html_nodes(webpage, xpath="//td[.='Characteristics']/following-sibling::td[1]")
chars <- gsub("<br>|\n", " ", chars)
chars <- html_text(xml2::as_xml_document(chars))
chars <- trimws(chars)
return(chars)
}
|
# ==============================================================================
# Functions for working with FILTERS for the selection of nodes and edges in
# networks, including operations to import and export filters. In the Cytoscape
# user interface, filters are managed in the Select tab of the Control Panel.
#
# ==============================================================================
#' @title Apply Filter
#'
#' @description Run an existing filter by supplying the filter name.
#' @param filter.name Name of filter to apply. Default is "Default filter".
#' @param hide Whether to hide filtered out nodes and edges. Default is FALSE.
#' Ignored if all nodes or edges are filtered out. This is an alternative to
#' filtering for node and edge selection.
#' @param network (optional) Name or SUID of the network. Default is the
#' "current" network active in Cytoscape.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return List of selected nodes and edges.
#' @details Known bug: selection (or hiding) of edges using edge-based column
#' filters does not work. As a workaround, simply repeat the createColumnFilter
#' operation to perform selection (or hiding) of edges.
#' @examples \donttest{
#' applyFilter('myFilter')
#' applyFilter('myFilter', hide = TRUE)
#' }
#' @seealso unhideAll
#' @importFrom RJSONIO toJSON
#' @export
applyFilter<-function(filter.name="Default filter", hide=FALSE, network=NULL,
base.url = .defaultBaseUrl){
if(!filter.name %in% getFilterList(base.url))
stop (sprintf("Filter %s does not exist.",filter.name))
net.SUID <- getNetworkSuid(network,base.url)
setCurrentNetwork(net.SUID, base.url)
cmd.container <- paste('container', 'filter', sep='=')
cmd.name <- paste('name',filter.name,sep='=')
cmd.network <- paste('network=SUID',net.SUID, sep=':')
commandsPOST(paste('filter apply',
cmd.container,
cmd.name,
cmd.network,
sep=' '), base.url)
.checkSelected(hide, net.SUID, base.url)
}
# ------------------------------------------------------------------------------
#' @title Create Column Filter
#'
#' @description Creates a filter to control node or edge selection. Works on
#' columns of boolean, string, numeric and lists. Note the unique restrictions
#' for criterion and predicate depending on the type of column being filtered.
#' @param filter.name Name for filter.
#' @param column Table column to base filter upon.
#' @param criterion For boolean columns: TRUE or FALSE. For string columns: a
#' string value, e.g., "hello". If the predicate is REGEX then this can be a
#' regular expression as accepted by the Java Pattern class
#' (https://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html). For
#' numeric columns: If the predicate is BETWEEN or IS_NOT_BETWEEN then this is
#' a two-element vector of numbers, example: c(1,5), otherwise a single number.
#' @param predicate For boolean columns: IS, IS_NOT. For string columns: IS,
#' IS_NOT, CONTAINS, DOES_NOT_CONTAIN, REGEX. For numeric columns: IS, IS_NOT,
#' GREATER_THAN, GREATER_THAN_OR_EQUAL, LESS_THAN, LESS_THAN_OR_EQUAL, BETWEEN,
#' IS_NOT_BETWEEN
#' @param caseSensitive (optional) If string matching should be case sensitive.
#' Default is FALSE.
#' @param anyMatch (optional) Only applies to List columns. If true then at least
#' one element in the list must pass the filter, if false then all the elements
#' in the list must pass the filter. Default is TRUE.
#' @param type (optional) Apply filter to "nodes" (default) or "edges".
#' @param hide Whether to hide filtered out nodes and edges. Default is FALSE.
#' Ignored if all nodes or edges are filtered out. This is an alternative to
#' filtering for node and edge selection.
#' @param network (optional) Name or SUID of the network. Default is the
#' "current" network active in Cytoscape.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @param apply (bool) True to execute filter immediately (default); False to
#' define filter but not execute it (available in Cytoscape 3.9+).
#' @return List of selected nodes and edges.
#' @examples \donttest{
#' createColumnFilter('myFilter', 'log2FC', c(-1,1), "IS_NOT_BETWEEN")
#' createColumnFilter('myFilter', 'pValue', 0.05, "LESS_THAN")
#' createColumnFilter('myFilter', 'function', "kinase", "CONTAINS", FALSE)
#' createColumnFilter('myFilter', 'name', "^Y.*C$", "REGEX")
#' createColumnFilter('myFilter', 'isTarget', TRUE , "IS", apply=FALSE)
#' createColumnFilter('myFilter', 'isTarget', TRUE , "IS", hide=TRUE)
#' }
#' @importFrom RJSONIO fromJSON
#' @export
createColumnFilter<-function(filter.name, column, criterion, predicate,
caseSensitive=FALSE, anyMatch=TRUE,
type="nodes", hide = FALSE, network = NULL,
base.url = .defaultBaseUrl,
apply = TRUE){
setCurrentNetwork(network,base.url)
if(!column %in% getTableColumnNames(substr(type,1,4), base.url = base.url))
stop (sprintf("Column %s does not exist in the %s table", column, substr(type,1,4)))
if(predicate %in% c("BETWEEN","IS_NOT_BETWEEN")){
if(!length(criterion)==2)
stop ("criterion must be a list of two numeric values, e.g., c(0.5,2.0)")
} else if (predicate %in% c("GREATER_THAN", "GREATER_THAN_OR_EQUAL")){
# manually feed max bound so that UI is also correct
col.vals <- getTableColumns(substr(type,1,4), column, base.url = base.url)
crit.max <- max(na.omit(col.vals))
criterion <- c(criterion[1], crit.max)
# same trick to fix UI does not work for LESS_THAN cases
# } else if (predicate %in% c("LESS_THAN", "LESS_THAN_OR_EQUAL")){
# col.vals <- getTableColumns(substr(type,1,4), column, base.url = base.url)
# crit.max <- min(na.omit(col.vals))
# criterion <- c(crit.max,criterion[1])
} else if (is.numeric(criterion[1]) & predicate == "IS"){
criterion <- c(criterion[1],criterion[1])
predicate <- "BETWEEN"
} else if (is.numeric(criterion[1]) & predicate == "IS_NOT"){
criterion <- c(criterion[1],criterion[1])
predicate <- "IS_NOT_BETWEEN"
}else if (is.logical(criterion[1]) & predicate == "IS_NOT"){
criterion <- !criterion
}
cmd.name <- paste0('name="',filter.name,'"')
cmd.json <- list(id="ColumnFilter", parameters=list(criterion=criterion,
columnName=column,
predicate=predicate,
caseSensitive=caseSensitive,
anyMatch=anyMatch,
type=type))
cmd.body <- toJSON(list(name=filter.name, json=cmd.json))
if(apply==FALSE){
.verifySupportedVersions(cytoscape=3.9, base.url=base.url)
cmd.body <- toJSON(list(name=filter.name,apply=apply, json=cmd.json))
}
.postCreateFilter(cmd.body, base.url)
.checkSelected(hide, network, base.url)
}
# ------------------------------------------------------------------------------
#' @title Create Composite Filter
#'
#' @description Combines filters to control node and edge selection based on
#' previously created filters.
#' @param filter.name Name for filter.
#' @param filter.list List of filters to combine.
#' @param type (optional) Type of composition, requiring ALL (default) or ANY
#' filters to pass for final node and edge selection.
#' @param hide Whether to hide filtered out nodes and edges. Default is FALSE.
#' Ignored if all nodes or edges are filtered out. This is an alternative to
#' filtering for node and edge selection.
#' @param network (optional) Name or SUID of the network. Default is the
#' "current" network active in Cytoscape.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @param apply (bool) True to execute filter immediately (default); False to
#' define filter but not execute it (available in Cytoscape 3.9+).
#' @return List of selected nodes and edges.
#' @examples \donttest{
#' createCompositeFilter("comp1", c("filter1", "filter2"))
#' createCompositeFilter("comp2", c("filter1", "filter2"), "ANY")
#' createCompositeFilter("comp3", c("comp1", "filter3"), apply=FALSE)
#' }
#' @importFrom RJSONIO fromJSON
#' @export
createCompositeFilter<-function(filter.name, filter.list, type="ALL",
hide = FALSE, network = NULL,
base.url = .defaultBaseUrl,
apply = TRUE){
setCurrentNetwork(network,base.url)
if(!length(filter.list)>1)
stop ('Must provide a list of two or more filter names, e.g., c("filter1", "filter2")')
trans.list <- lapply(filter.list, function(x) .getFilterJson(x,base.url)[[1]]$transformers[[1]])
#return(trans.list)
cmd.json <- list(id="CompositeFilter", parameters=list(type=type), transformers=trans.list)
cmd.body <- toJSON(list(name=filter.name, json=cmd.json))
if(apply==FALSE){
.verifySupportedVersions(cytoscape=3.9, base.url=base.url)
cmd.body <- toJSON(list(name=filter.name,apply=apply, json=cmd.json))
}
.postCreateFilter(cmd.body, base.url)
.checkSelected(hide, network, base.url)
}
# ------------------------------------------------------------------------------
#' @title Create Degree Filter
#'
#' @description Creates a filter to control node selection base on in/out degree.
#' @param filter.name Name for filter.
#' @param criterion A two-element vector of numbers, example: c(1,5).
#' @param predicate BETWEEN (default) or IS_NOT_BETWEEN
#' @param edgeType (optional) Type of edges to consider in degree count:
#' ANY (default), UNDIRECTED, INCOMING, OUTGOING, DIRECTED
#' @param hide Whether to hide filtered out nodes and edges. Default is FALSE.
#' Ignored if all nodes or edges are filtered out. This is an alternative to
#' filtering for node and edge selection.
#' @param network (optional) Name or SUID of the network. Default is the
#' "current" network active in Cytoscape.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @param apply (bool) True to execute filter immediately (default); False to
#' define filter but not execute it (available in Cytoscape 3.9+).
#' @return List of selected nodes and edges.
#' @examples \donttest{
#' createDegreeFilter('myFilter', c(4,5))
#' createDegreeFilter('myFilter', c(2,5), apply=FALSE)
#' }
#' @importFrom RJSONIO fromJSON
#' @export
createDegreeFilter<-function(filter.name, criterion, predicate="BETWEEN",
edgeType="ANY", hide = FALSE, network = NULL,
base.url = .defaultBaseUrl,
apply = TRUE){
setCurrentNetwork(network,base.url)
if(!length(criterion)==2)
stop ("criterion must be a list of two numeric values, e.g., c(2,5)")
cmd.name <- paste0('name="',filter.name,'"')
cmd.json <- list(id="DegreeFilter", parameters=list(criterion=criterion,
predicate=predicate,
edgeType=edgeType))
cmd.body <- toJSON(list(name=filter.name, json=cmd.json))
if(apply==FALSE){
.verifySupportedVersions(cytoscape=3.9, base.url=base.url)
cmd.body <- toJSON(list(name=filter.name,apply=apply, json=cmd.json))
}
.postCreateFilter(cmd.body, base.url)
.checkSelected(hide, network, base.url)
}
# ------------------------------------------------------------------------------
#' @title Get Filter List
#'
#' @description Retrieve list of named filters in current session.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return List of filter names
#' @examples \donttest{
#' getFilterList()
#' }
#' @export
getFilterList<-function(base.url=.defaultBaseUrl){
commandsPOST('filter list', base.url = base.url)
}
# ------------------------------------------------------------------------------
#' @title Export Filters
#'
#' @description Saves filters to file in JSON format.
#' @param filename (char) Full path or path relavtive to current
#' working directory, in addition to the name of the file. Default is
#' "filters.json"
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @details Unlike other export functions, Cytoscape will automatically
#' overwrite files with the same name. You will not be prompted to confirm
#' or reject overwrite. Use carefully!
#' @examples \donttest{
#' exportFilters()
#' }
#' @importFrom R.utils isAbsolutePath
#' @export
exportFilters<-function(filename = "filters.json", base.url = .defaultBaseUrl){
ext <- ".json$"
if (!grepl(ext,filename))
filename <- paste0(filename,".json")
if(!isAbsolutePath(filename))
filename <- paste(getwd(),filename,sep="/")
if (file.exists(filename))
warning("This file has been overwritten.",
call. = FALSE,
immediate. = TRUE)
commandsGET(paste0('filter export file="',
filename,'"'),
base.url)
}
# ------------------------------------------------------------------------------
#' @title Import Filters
#'
#' @description Loads filters from a file in JSON format.
#' @param filename (char) Path and name of the filters file to load.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' importFilters()
#' }
#' @export
importFilters<-function(filename , base.url = .defaultBaseUrl){
if(!isAbsolutePath(filename))
filename = paste(getwd(),filename,sep='/')
res <- commandsGET(paste0('filter import file="',filename,'"'),base.url)
Sys.sleep(get(".CATCHUP_FILTER_SECS",envir = RCy3env)) ## NOTE: TEMPORARY SLEEP "FIX"
return(res)
}
# ------------------------------------------------------------------------------
# Internal function to process special json list syntax with pesky single quotes. This
# is an alternative to commandsPOST.
#' @importFrom httr POST
#' @importFrom httr content_type_json
.postCreateFilter<-function(cmd.body, base.url){
cmd.url <- paste0(base.url, '/commands/filter/create')
cmd.body <- gsub("json\": {\n", "json\": \\'{\n", cmd.body, perl = TRUE)
cmd.body <- gsub("\n} \n} \n}", "\n} \n}\\' \n}", cmd.body, perl = TRUE)
cmd.body <- gsub("\n] \n} \n}", "\n] \n}\\' \n}", cmd.body, perl = TRUE) #for createCompositeFilter
tryCatch(
res <- POST(url=cmd.url, body=cmd.body, encode="json", content_type_json()),
error=function(c) .cyError(c, res),
warnings=function(c) .cyWarnings(c, res),
finally=.cyFinally(res)
)
if(res$status_code > 299){
write(sprintf("RCy3::.postCreateFilter, HTTP Error Code: %d\n url=%s\n body=%s",
res$status_code, URLencode(cmd.url), cmd.body), stderr())
stop(fromJSON(rawToChar(res$content))$errors[[1]]$message)
}
}
# ------------------------------------------------------------------------------
# Internal function to return (or hide) filter-selected nodes and edges.
.checkSelected<-function(hide, network, base.url){
Sys.sleep(get(".MODEL_PROPAGATION_SECS",envir = RCy3env)) ## NOTE: TEMPORARY SLEEP "FIX"
sel.nodes<-getSelectedNodes(network=network, base.url=base.url)
sel.edges<-getSelectedEdges(network=network, base.url=base.url)
if(hide) {
unhideAll(network, base.url)
if(!is.na(sel.nodes[1]))
hideNodes(invertNodeSelection(network, base.url)$nodes, network, base.url)
if(!is.na(sel.edges[1]))
hideEdges(invertEdgeSelection(network, base.url)$edges, network, base.url)
}
return(list(nodes=sel.nodes, edges=sel.edges))
}
# ------------------------------------------------------------------------------
# Internal function to get filters as JSON for constructing composite filters.
.getFilterJson<-function(filter.name, base.url){
commandsPOST(paste0('filter get name="',filter.name,'"'),
base.url = base.url)
}
|
/R/Filters.R
|
permissive
|
kumonismo/RCy3
|
R
| false
| false
| 17,697
|
r
|
# ==============================================================================
# Functions for working with FILTERS for the selection of nodes and edges in
# networks, including operations to import and export filters. In the Cytoscape
# user interface, filters are managed in the Select tab of the Control Panel.
#
# ==============================================================================
#' @title Apply Filter
#'
#' @description Run an existing filter by supplying the filter name.
#' @param filter.name Name of filter to apply. Default is "Default filter".
#' @param hide Whether to hide filtered out nodes and edges. Default is FALSE.
#' Ignored if all nodes or edges are filtered out. This is an alternative to
#' filtering for node and edge selection.
#' @param network (optional) Name or SUID of the network. Default is the
#' "current" network active in Cytoscape.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return List of selected nodes and edges.
#' @details Known bug: selection (or hiding) of edges using edge-based column
#' filters does not work. As a workaround, simply repeat the createColumnFilter
#' operation to perform selection (or hiding) of edges.
#' @examples \donttest{
#' applyFilter('myFilter')
#' applyFilter('myFilter', hide = TRUE)
#' }
#' @seealso unhideAll
#' @importFrom RJSONIO toJSON
#' @export
applyFilter<-function(filter.name="Default filter", hide=FALSE, network=NULL,
base.url = .defaultBaseUrl){
if(!filter.name %in% getFilterList(base.url))
stop (sprintf("Filter %s does not exist.",filter.name))
net.SUID <- getNetworkSuid(network,base.url)
setCurrentNetwork(net.SUID, base.url)
cmd.container <- paste('container', 'filter', sep='=')
cmd.name <- paste('name',filter.name,sep='=')
cmd.network <- paste('network=SUID',net.SUID, sep=':')
commandsPOST(paste('filter apply',
cmd.container,
cmd.name,
cmd.network,
sep=' '), base.url)
.checkSelected(hide, net.SUID, base.url)
}
# ------------------------------------------------------------------------------
#' @title Create Column Filter
#'
#' @description Creates a filter to control node or edge selection. Works on
#' columns of boolean, string, numeric and lists. Note the unique restrictions
#' for criterion and predicate depending on the type of column being filtered.
#' @param filter.name Name for filter.
#' @param column Table column to base filter upon.
#' @param criterion For boolean columns: TRUE or FALSE. For string columns: a
#' string value, e.g., "hello". If the predicate is REGEX then this can be a
#' regular expression as accepted by the Java Pattern class
#' (https://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html). For
#' numeric columns: If the predicate is BETWEEN or IS_NOT_BETWEEN then this is
#' a two-element vector of numbers, example: c(1,5), otherwise a single number.
#' @param predicate For boolean columns: IS, IS_NOT. For string columns: IS,
#' IS_NOT, CONTAINS, DOES_NOT_CONTAIN, REGEX. For numeric columns: IS, IS_NOT,
#' GREATER_THAN, GREATER_THAN_OR_EQUAL, LESS_THAN, LESS_THAN_OR_EQUAL, BETWEEN,
#' IS_NOT_BETWEEN
#' @param caseSensitive (optional) If string matching should be case sensitive.
#' Default is FALSE.
#' @param anyMatch (optional) Only applies to List columns. If true then at least
#' one element in the list must pass the filter, if false then all the elements
#' in the list must pass the filter. Default is TRUE.
#' @param type (optional) Apply filter to "nodes" (default) or "edges".
#' @param hide Whether to hide filtered out nodes and edges. Default is FALSE.
#' Ignored if all nodes or edges are filtered out. This is an alternative to
#' filtering for node and edge selection.
#' @param network (optional) Name or SUID of the network. Default is the
#' "current" network active in Cytoscape.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @param apply (bool) True to execute filter immediately (default); False to
#' define filter but not execute it (available in Cytoscape 3.9+).
#' @return List of selected nodes and edges.
#' @examples \donttest{
#' createColumnFilter('myFilter', 'log2FC', c(-1,1), "IS_NOT_BETWEEN")
#' createColumnFilter('myFilter', 'pValue', 0.05, "LESS_THAN")
#' createColumnFilter('myFilter', 'function', "kinase", "CONTAINS", FALSE)
#' createColumnFilter('myFilter', 'name', "^Y.*C$", "REGEX")
#' createColumnFilter('myFilter', 'isTarget', TRUE , "IS", apply=FALSE)
#' createColumnFilter('myFilter', 'isTarget', TRUE , "IS", hide=TRUE)
#' }
#' @importFrom RJSONIO fromJSON
#' @export
createColumnFilter<-function(filter.name, column, criterion, predicate,
caseSensitive=FALSE, anyMatch=TRUE,
type="nodes", hide = FALSE, network = NULL,
base.url = .defaultBaseUrl,
apply = TRUE){
setCurrentNetwork(network,base.url)
if(!column %in% getTableColumnNames(substr(type,1,4), base.url = base.url))
stop (sprintf("Column %s does not exist in the %s table", column, substr(type,1,4)))
if(predicate %in% c("BETWEEN","IS_NOT_BETWEEN")){
if(!length(criterion)==2)
stop ("criterion must be a list of two numeric values, e.g., c(0.5,2.0)")
} else if (predicate %in% c("GREATER_THAN", "GREATER_THAN_OR_EQUAL")){
# manually feed max bound so that UI is also correct
col.vals <- getTableColumns(substr(type,1,4), column, base.url = base.url)
crit.max <- max(na.omit(col.vals))
criterion <- c(criterion[1], crit.max)
# same trick to fix UI does not work for LESS_THAN cases
# } else if (predicate %in% c("LESS_THAN", "LESS_THAN_OR_EQUAL")){
# col.vals <- getTableColumns(substr(type,1,4), column, base.url = base.url)
# crit.max <- min(na.omit(col.vals))
# criterion <- c(crit.max,criterion[1])
} else if (is.numeric(criterion[1]) & predicate == "IS"){
criterion <- c(criterion[1],criterion[1])
predicate <- "BETWEEN"
} else if (is.numeric(criterion[1]) & predicate == "IS_NOT"){
criterion <- c(criterion[1],criterion[1])
predicate <- "IS_NOT_BETWEEN"
}else if (is.logical(criterion[1]) & predicate == "IS_NOT"){
criterion <- !criterion
}
cmd.name <- paste0('name="',filter.name,'"')
cmd.json <- list(id="ColumnFilter", parameters=list(criterion=criterion,
columnName=column,
predicate=predicate,
caseSensitive=caseSensitive,
anyMatch=anyMatch,
type=type))
cmd.body <- toJSON(list(name=filter.name, json=cmd.json))
if(apply==FALSE){
.verifySupportedVersions(cytoscape=3.9, base.url=base.url)
cmd.body <- toJSON(list(name=filter.name,apply=apply, json=cmd.json))
}
.postCreateFilter(cmd.body, base.url)
.checkSelected(hide, network, base.url)
}
# ------------------------------------------------------------------------------
#' @title Create Composite Filter
#'
#' @description Combines filters to control node and edge selection based on
#' previously created filters.
#' @param filter.name Name for filter.
#' @param filter.list List of filters to combine.
#' @param type (optional) Type of composition, requiring ALL (default) or ANY
#' filters to pass for final node and edge selection.
#' @param hide Whether to hide filtered out nodes and edges. Default is FALSE.
#' Ignored if all nodes or edges are filtered out. This is an alternative to
#' filtering for node and edge selection.
#' @param network (optional) Name or SUID of the network. Default is the
#' "current" network active in Cytoscape.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @param apply (bool) True to execute filter immediately (default); False to
#' define filter but not execute it (available in Cytoscape 3.9+).
#' @return List of selected nodes and edges.
#' @examples \donttest{
#' createCompositeFilter("comp1", c("filter1", "filter2"))
#' createCompositeFilter("comp2", c("filter1", "filter2"), "ANY")
#' createCompositeFilter("comp3", c("comp1", "filter3"), apply=FALSE)
#' }
#' @importFrom RJSONIO fromJSON
#' @export
createCompositeFilter<-function(filter.name, filter.list, type="ALL",
hide = FALSE, network = NULL,
base.url = .defaultBaseUrl,
apply = TRUE){
setCurrentNetwork(network,base.url)
if(!length(filter.list)>1)
stop ('Must provide a list of two or more filter names, e.g., c("filter1", "filter2")')
trans.list <- lapply(filter.list, function(x) .getFilterJson(x,base.url)[[1]]$transformers[[1]])
#return(trans.list)
cmd.json <- list(id="CompositeFilter", parameters=list(type=type), transformers=trans.list)
cmd.body <- toJSON(list(name=filter.name, json=cmd.json))
if(apply==FALSE){
.verifySupportedVersions(cytoscape=3.9, base.url=base.url)
cmd.body <- toJSON(list(name=filter.name,apply=apply, json=cmd.json))
}
.postCreateFilter(cmd.body, base.url)
.checkSelected(hide, network, base.url)
}
# ------------------------------------------------------------------------------
#' @title Create Degree Filter
#'
#' @description Creates a filter to control node selection base on in/out degree.
#' @param filter.name Name for filter.
#' @param criterion A two-element vector of numbers, example: c(1,5).
#' @param predicate BETWEEN (default) or IS_NOT_BETWEEN
#' @param edgeType (optional) Type of edges to consider in degree count:
#' ANY (default), UNDIRECTED, INCOMING, OUTGOING, DIRECTED
#' @param hide Whether to hide filtered out nodes and edges. Default is FALSE.
#' Ignored if all nodes or edges are filtered out. This is an alternative to
#' filtering for node and edge selection.
#' @param network (optional) Name or SUID of the network. Default is the
#' "current" network active in Cytoscape.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @param apply (bool) True to execute filter immediately (default); False to
#' define filter but not execute it (available in Cytoscape 3.9+).
#' @return List of selected nodes and edges.
#' @examples \donttest{
#' createDegreeFilter('myFilter', c(4,5))
#' createDegreeFilter('myFilter', c(2,5), apply=FALSE)
#' }
#' @importFrom RJSONIO fromJSON
#' @export
createDegreeFilter<-function(filter.name, criterion, predicate="BETWEEN",
edgeType="ANY", hide = FALSE, network = NULL,
base.url = .defaultBaseUrl,
apply = TRUE){
setCurrentNetwork(network,base.url)
if(!length(criterion)==2)
stop ("criterion must be a list of two numeric values, e.g., c(2,5)")
cmd.name <- paste0('name="',filter.name,'"')
cmd.json <- list(id="DegreeFilter", parameters=list(criterion=criterion,
predicate=predicate,
edgeType=edgeType))
cmd.body <- toJSON(list(name=filter.name, json=cmd.json))
if(apply==FALSE){
.verifySupportedVersions(cytoscape=3.9, base.url=base.url)
cmd.body <- toJSON(list(name=filter.name,apply=apply, json=cmd.json))
}
.postCreateFilter(cmd.body, base.url)
.checkSelected(hide, network, base.url)
}
# ------------------------------------------------------------------------------
#' @title Get Filter List
#'
#' @description Retrieve list of named filters in current session.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return List of filter names
#' @examples \donttest{
#' getFilterList()
#' }
#' @export
getFilterList<-function(base.url=.defaultBaseUrl){
commandsPOST('filter list', base.url = base.url)
}
# ------------------------------------------------------------------------------
#' @title Export Filters
#'
#' @description Saves filters to file in JSON format.
#' @param filename (char) Full path or path relavtive to current
#' working directory, in addition to the name of the file. Default is
#' "filters.json"
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @details Unlike other export functions, Cytoscape will automatically
#' overwrite files with the same name. You will not be prompted to confirm
#' or reject overwrite. Use carefully!
#' @examples \donttest{
#' exportFilters()
#' }
#' @importFrom R.utils isAbsolutePath
#' @export
exportFilters<-function(filename = "filters.json", base.url = .defaultBaseUrl){
ext <- ".json$"
if (!grepl(ext,filename))
filename <- paste0(filename,".json")
if(!isAbsolutePath(filename))
filename <- paste(getwd(),filename,sep="/")
if (file.exists(filename))
warning("This file has been overwritten.",
call. = FALSE,
immediate. = TRUE)
commandsGET(paste0('filter export file="',
filename,'"'),
base.url)
}
# ------------------------------------------------------------------------------
#' @title Import Filters
#'
#' @description Loads filters from a file in JSON format.
#' @param filename (char) Path and name of the filters file to load.
#' @param base.url (optional) Ignore unless you need to specify a custom domain,
#' port or version to connect to the CyREST API. Default is http://localhost:1234
#' and the latest version of the CyREST API supported by this version of RCy3.
#' @return None
#' @examples \donttest{
#' importFilters()
#' }
#' @export
importFilters<-function(filename , base.url = .defaultBaseUrl){
if(!isAbsolutePath(filename))
filename = paste(getwd(),filename,sep='/')
res <- commandsGET(paste0('filter import file="',filename,'"'),base.url)
Sys.sleep(get(".CATCHUP_FILTER_SECS",envir = RCy3env)) ## NOTE: TEMPORARY SLEEP "FIX"
return(res)
}
# ------------------------------------------------------------------------------
# Internal function to process special json list syntax with pesky single quotes. This
# is an alternative to commandsPOST.
#' @importFrom httr POST
#' @importFrom httr content_type_json
.postCreateFilter<-function(cmd.body, base.url){
cmd.url <- paste0(base.url, '/commands/filter/create')
cmd.body <- gsub("json\": {\n", "json\": \\'{\n", cmd.body, perl = TRUE)
cmd.body <- gsub("\n} \n} \n}", "\n} \n}\\' \n}", cmd.body, perl = TRUE)
cmd.body <- gsub("\n] \n} \n}", "\n] \n}\\' \n}", cmd.body, perl = TRUE) #for createCompositeFilter
tryCatch(
res <- POST(url=cmd.url, body=cmd.body, encode="json", content_type_json()),
error=function(c) .cyError(c, res),
warnings=function(c) .cyWarnings(c, res),
finally=.cyFinally(res)
)
if(res$status_code > 299){
write(sprintf("RCy3::.postCreateFilter, HTTP Error Code: %d\n url=%s\n body=%s",
res$status_code, URLencode(cmd.url), cmd.body), stderr())
stop(fromJSON(rawToChar(res$content))$errors[[1]]$message)
}
}
# ------------------------------------------------------------------------------
# Internal function to return (or hide) filter-selected nodes and edges.
.checkSelected<-function(hide, network, base.url){
Sys.sleep(get(".MODEL_PROPAGATION_SECS",envir = RCy3env)) ## NOTE: TEMPORARY SLEEP "FIX"
sel.nodes<-getSelectedNodes(network=network, base.url=base.url)
sel.edges<-getSelectedEdges(network=network, base.url=base.url)
if(hide) {
unhideAll(network, base.url)
if(!is.na(sel.nodes[1]))
hideNodes(invertNodeSelection(network, base.url)$nodes, network, base.url)
if(!is.na(sel.edges[1]))
hideEdges(invertEdgeSelection(network, base.url)$edges, network, base.url)
}
return(list(nodes=sel.nodes, edges=sel.edges))
}
# ------------------------------------------------------------------------------
# Internal function to get filters as JSON for constructing composite filters.
.getFilterJson<-function(filter.name, base.url){
commandsPOST(paste0('filter get name="',filter.name,'"'),
base.url = base.url)
}
|
testlist <- list(mu = -5.31401037247976e+303, var = 0)
result <- do.call(metafolio:::est_beta_params,testlist)
str(result)
|
/metafolio/inst/testfiles/est_beta_params/libFuzzer_est_beta_params/est_beta_params_valgrind_files/1612989111-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 122
|
r
|
testlist <- list(mu = -5.31401037247976e+303, var = 0)
result <- do.call(metafolio:::est_beta_params,testlist)
str(result)
|
#' Print function
#'
#' @param x An object of class diversityEstimates
#' @param ... other arguments to be passed to print
#' @return NULL
#'
#' @export
print.diversityEstimates <- function(x, ...) {
dv <- x
cat("An object of class `diversityEstimates` with the following elements:\n")
sapply(1:length(names(dv)), function(i) { cat(" - ", names(dv)[i], "\n")})
cat("Access individual components with, e.g., object$shannon and object$`shannon-variance`\n")
cat("Use function testDiversity() to test hypotheses about diversity")
}
#' Plot function
#'
#' TODO make more like the phyloseq plot richness
#'
#' @param x An object of class diversityEstimates
#' @param ... other arguments to be passed to plot
#' @return An object of class ggplot
#' @export
plot.diversityEstimates <- function(x, ...) {
dv <- x
args <- match.call(expand.dots = TRUE)
if (is.null(args$xx)) {
args$xx <- "samples"
}
if (is.null(args$h0)) {
args$h0 <- "shannon"
}
xx <- args$xx
h0 <- args$h0
if (h0 %in% c("shannon", "simpson")) {
ests <- sapply(dv[[h0]], function(x) x$estimate)
# vars <- sapply(dv[[h0]], function(x) x$error)
lci <- sapply(dv[[h0]], function(x) x$interval[1])
uci <- sapply(dv[[h0]], function(x) x$interval[2])
df <- data.frame("names" = names(ests),
"h0" = ests, lci, uci, dv$X)
} else {
lci <- dv[[h0]] - 2*sqrt(dv[[paste(h0, "-variance", sep = "")]])
uci <- dv[[h0]] + 2*sqrt(dv[[paste(h0, "-variance", sep = "")]])
df <- data.frame("names" = names(dv[[h0]]),
"h0" = dv[[h0]], lci, uci, dv$X)
}
df$names <- factor(df$names, levels = df$names)
ggplot2::ggplot(df, ggplot2::aes(x = names, xend = names)) +
ggplot2::geom_point(ggplot2::aes(x = names, y = h0)) +
ggplot2::geom_segment(ggplot2::aes(y = lci, yend = uci)) +
ggplot2::ylab(paste(h0, "estimate")) +
ggplot2::xlab(xx) +
ggplot2::theme_bw() +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 45, hjust = 1))
}
#' Test diversity
#'
#' Hypothesis testing for alpha-diversity.
#'
#'
#' @references Willis, A., Bunge, J., and Whitman, T. (2017). Improved detection of changes in species richness in high diversity microbial communities. \emph{JRSS-C.}
#'
#' @param dv An object of class diversityEstimates. The variable `X` used for the construction
#' @param h0 The alpha-diversity index to be tested for equality
#' @return A data frame similar to the output of `lm`
#'
#' @export
testDiversity <- function(dv, h0 = "shannon") {
cat("Hypothesis testing:\n")
if (h0 %in% c("shannon", "simpson")) {
bt <- breakaway::betta(sapply(dv[[h0]], function(x) x$estimate),
sapply(dv[[h0]], function(x) x$error),
X = dv[["X"]])
} else {
bt <- breakaway::betta(dv[[h0]], dv[[paste(h0, "-variance", sep="")]], X = dv[["X"]])
}
cat(paste(" p-value for global test:", bt$global[2], "\n"))
bt$table
}
#' Test beta diversity
#'
#' Hypothesis testing for beta-diversity.
#'
#' This function uses output from DivNet() to estimate community centroids
#' within groups defined by the groups argument and test a null hypothesis
#' of equality of all group centroids against a general alternative. This test
#' is conducted using a pseudo-F statistic with null distribution approximated
#' via a nonparametric bootstrap.
#'
#' For more details and suggested workflow see the beta diversity vignette:
#' \code{vignette("beta_diversity", package = "DivNet")}
#'
#' @param dv An object of class diversityEstimates. The variable `X` used for the construction
#' @param h0 The beta-diversity index to be tested for equality
#' @param groups A numeric vector giving group membership of each specimen
#' @param sample_specimen_matrix A matrix with ik-th entry 1 if the i-th sequenced sample is taken from specimen k, 0 otherwise.
#' The columns of this matrix should correspond to unique specimens and must be named.
#' @param n_boot Number of (cluster) bootstrap resamples to use
#' @return A list containing the observed pseudo-F statistic, the beta diversity used, the
#' p-value returned by the bootstrapped pseudo-F test of equality of (measured) centroids,
#' a vector of computed bootstrapped test statistics, a matrix of estimated group centroids,
#' and a list of group centroids estimated from each bootstrap resample
#' #'
#' @export
testBetaDiversity <- function(dv,
h0,
groups,
sample_specimen_matrix,
n_boot = 1000){
if(length(colnames(sample_specimen_matrix)) != ncol(sample_specimen_matrix)){
stop("Columns of argument sample_specimen_matrix must be named.
Recommended column names are names of unique specimens in your data.")
}
n_groups <- length(unique(groups))
unique_groups <- unique(groups)
unique_specimens <- colnames(sample_specimen_matrix)
n_specimens <- ncol(sample_specimen_matrix)
group_specimens <- sapply(unique_groups,
function(x) apply(sample_specimen_matrix[groups == x,,drop = F],2,max) %>%
(function(y) names(y)[y==1]))
if(h0 == "bray-curtis"){
bc_matrix <- dv$`bray-curtis`
observed_test_statistic <- get_bc_test_statistic(bc_mat = bc_matrix, groups, unique_groups,
n_groups,
n_specimens)
boot_test_statistics <- numeric(n_boot)
np_boot_pulls <-replicate(n_boot,
sample(1:ncol(sample_specimen_matrix),
ncol(sample_specimen_matrix), replace = T))
group_centroids <- lapply(unique_groups,
function(gr){
samples <- sapply(group_specimens[[gr]],
function(specname)
which(sample_specimen_matrix[,specname] ==1)[1])
return(apply(dv$fitted_z[samples,],2,median))}
)
names(group_centroids) <- unique_groups
boot_test_statistics <- numeric(n_boot)
centroid_matrix <- do.call(rbind, lapply(groups,
function(k) group_centroids[[k]]))
boot_centroids <- vector(n_boot, mode = "list")
for(k in 1:n_boot){
which_samples <- do.call(c,lapply(np_boot_pulls[,k],
function(x) which(sample_specimen_matrix[,x] ==1)))
comps <- dv$fitted_z[which_samples,]
boot_group_specimens <-sapply(unique_groups,
function(x) apply(sample_specimen_matrix[groups == x,np_boot_pulls[,k]],2,max) %>%
(function(y) names(y)[y==1]))
boot_centroids[[k]] <- lapply(unique_groups,
function(gr){
samples <- unlist(sapply(boot_group_specimens[[gr]],
function(specname)
which(sample_specimen_matrix[,specname] ==1)))
return(apply(dv$fitted_z[samples,,drop= F],2,median))}
)
names(boot_centroids[[k]]) <- unique_groups
centered_comps <- comps - centroid_matrix[which_samples,]
boot_mat <- matrix(0,
ncol = nrow(centered_comps),
nrow = nrow(centered_comps))
for(i in 1:(nrow(centered_comps) - 1)){
for(j in (i + 1):nrow(centered_comps)){
boot_mat[i,j] <- boot_mat[j,i] <- 0.5*sum(abs(centered_comps[i,] - centered_comps[j,]))
}
}
boot_test_statistics[k] <- get_bc_test_statistic(bc_mat = boot_mat,groups = groups[which_samples],
unique_groups = unique_groups, n_groups = n_groups,
n_specimens = n_specimens)
}
}
if(h0 == "euclidean"){
euc_matrix <- dv$'euclidean'
observed_test_statistic <- get_euc_test_statistic(euc_mat = euc_matrix, groups, unique_groups,
n_groups,
n_specimens)
boot_test_statistics <- numeric(n_boot)
np_boot_pulls <-replicate(n_boot,
sample(1:ncol(sample_specimen_matrix),
ncol(sample_specimen_matrix), replace = T))
group_centroids <- lapply(unique_groups,
function(gr){
samples <- sapply(group_specimens[[gr]],
function(specname)
which(sample_specimen_matrix[,specname] ==1)[1])
return(apply(dv$fitted_z[samples,,drop= F],2,mean))}
)
names(group_centroids) <- unique_groups
boot_test_statistics <- numeric(n_boot)
centroid_matrix <- do.call(rbind, lapply(groups,
function(k) group_centroids[[k]]))
boot_centroids <- vector(n_boot, mode = "list")
for(k in 1:n_boot){
which_samples <- do.call(c,lapply(np_boot_pulls[,k],
function(x) which(sample_specimen_matrix[,x] ==1)))
comps <- dv$fitted_z[which_samples,]
boot_group_specimens <-sapply(unique_groups,
function(x) apply(sample_specimen_matrix[groups == x,np_boot_pulls[,k]],2,max) %>%
(function(y) names(y)[y==1]))
boot_centroids[[k]] <- lapply(unique_groups,
function(gr){
samples <- unlist(sapply(boot_group_specimens[[gr]],
function(specname)
which(sample_specimen_matrix[,specname] ==1)))
return(apply(dv$fitted_z[samples,,drop = F],2,mean))})
names(boot_centroids[[k]]) <- unique_groups
centered_comps <- comps - centroid_matrix[which_samples,]
boot_mat <- matrix(0,
ncol = nrow(centered_comps),
nrow = nrow(centered_comps))
for(i in 1:(nrow(centered_comps) - 1)){
for(j in (i + 1):nrow(centered_comps)){
boot_mat[i,j] <- boot_mat[j,i] <- sqrt(sum((centered_comps[i,] - centered_comps[j,])^2))
}
}
boot_test_statistics[k] <- get_euc_test_statistic(euc_mat = boot_mat,groups = groups[which_samples],
unique_groups = unique_groups, n_groups = n_groups,
n_specimens = n_specimens)
}
}
if(h0 == "aitchison"){
aitch_matrix <- get_aitchison_distance(dv$fitted_z)
observed_test_statistic <- get_euc_test_statistic(aitch_matrix, groups, unique_groups,
n_groups,
n_specimens)
group_centroids <- lapply(unique_groups,
function(gr){
samples <- sapply(group_specimens[[gr]],
function(specname)
which(sample_specimen_matrix[,specname] ==1)[1])
return(apply(log_ratio(dv$fitted_z[samples,]),2,mean))}
)
names(group_centroids) <- unique_groups
boot_test_statistics <- numeric(n_boot)
np_boot_pulls <-replicate(n_boot,
sample(1:ncol(sample_specimen_matrix),
ncol(sample_specimen_matrix), replace = T))
centroid_matrix <- do.call(rbind, lapply(groups,
function(k) group_centroids[[k]]))
boot_groups <- groups
boot_centroids <- vector(n_boot,mode = "list")
for(k in 1:n_boot){
which_samples <- do.call(c,lapply(np_boot_pulls[,k],
function(x) which(sample_specimen_matrix[,x] ==1)))
comps <- log_ratio(dv$fitted_z[which_samples,])
boot_group_specimens <-sapply(unique_groups,
function(x) apply(sample_specimen_matrix[groups == x,np_boot_pulls[,k]],2,max) %>%
(function(y) names(y)[y==1]))
boot_centroids[[k]] <- lapply(unique_groups,
function(gr){
samples <- unlist(sapply(boot_group_specimens[[gr]],
function(specname)
which(sample_specimen_matrix[,specname] ==1)))
return(apply(log_ratio(dv$fitted_z[samples,,drop = F]),2,mean))})
names(boot_centroids[[k]]) <- unique_groups
centered_comps <- comps - centroid_matrix[which_samples,]
boot_mat <- matrix(0,
ncol = nrow(centered_comps),
nrow = nrow(centered_comps))
for(i in 1:(nrow(centered_comps) - 1)){
for(j in (i + 1):nrow(centered_comps)){
boot_mat[i,j] <- boot_mat[j,i] <- sqrt(sum((centered_comps[i,] - centered_comps[j,])^2))
}
}
boot_test_statistics[k] <- get_euc_test_statistic(euc_mat = boot_mat,groups = groups[which_samples],
unique_groups = unique_groups , n_groups = n_groups,
n_specimens = n_specimens)
}
}
p.val <- mean(boot_test_statistics >= observed_test_statistic)
if(p.val == 0){
p.val <- paste(" < ", signif(1/n_boot,2),sep = "", collapse = "")
}
centroids <- do.call(rbind,group_centroids)
rownames(centroids) <- unique_groups
return(list("Test statistic" = observed_test_statistic,
"h0" = h0,
"p_value" = p.val,
"bootstrapped_statistics" = boot_test_statistics,
"centroids" = centroids,
"boot_centroids" = boot_centroids
))
}
get_bc_test_statistic <- function(bc_mat, groups, unique_groups,
n_groups,
n_specimens){
test_statistic_numerator <- 0
test_statistic_denominator <- 0
for(group in unique_groups){
sub_matrix <- bc_mat[groups == group,groups == group]
test_statistic_denominator <- test_statistic_denominator + sum(sub_matrix[upper.tri(sub_matrix)])
test_statistic_numerator <- test_statistic_numerator + sum(bc_mat[groups == group,groups != group])
}
observed_test_statistic <- (test_statistic_numerator/(n_groups - 1))/(test_statistic_denominator/(n_specimens - n_groups - 1))
return(observed_test_statistic)
}
get_euc_test_statistic <- function(euc_mat, groups, unique_groups,
n_groups,
n_specimens){
euc_mat <- euc_mat^2 #squared distances for Euclidean distance test
test_statistic_numerator <- 0
test_statistic_denominator <- 0
for(group in unique_groups){
sub_matrix <- euc_mat[groups == group,groups == group]
test_statistic_denominator <- test_statistic_denominator + sum(sub_matrix[upper.tri(sub_matrix)])
test_statistic_numerator <- test_statistic_numerator + sum(euc_mat[groups == group,groups != group])
}
observed_test_statistic <- (test_statistic_numerator/(n_groups - 1))/(test_statistic_denominator/(n_specimens - n_groups - 1))
return(observed_test_statistic)
}
get_aitchison_distance <- function(comp_matrix){
lr_matrix <- log_ratio(comp_matrix)
return(as.matrix(dist(lr_matrix)))
}
log_ratio <- function(comp_matrix){
lr_matrix <- log(comp_matrix)
lr_matrix <- lr_matrix -matrix(apply(lr_matrix,1, mean),ncol = 1)%*%matrix(1, ncol = ncol(lr_matrix))
return(lr_matrix)
}
|
/R/s3functions.R
|
no_license
|
paulinetrinh/DivNet
|
R
| false
| false
| 16,026
|
r
|
#' Print function
#'
#' @param x An object of class diversityEstimates
#' @param ... other arguments to be passed to print
#' @return NULL
#'
#' @export
print.diversityEstimates <- function(x, ...) {
dv <- x
cat("An object of class `diversityEstimates` with the following elements:\n")
sapply(1:length(names(dv)), function(i) { cat(" - ", names(dv)[i], "\n")})
cat("Access individual components with, e.g., object$shannon and object$`shannon-variance`\n")
cat("Use function testDiversity() to test hypotheses about diversity")
}
#' Plot function
#'
#' TODO make more like the phyloseq plot richness
#'
#' @param x An object of class diversityEstimates
#' @param ... other arguments to be passed to plot
#' @return An object of class ggplot
#' @export
plot.diversityEstimates <- function(x, ...) {
dv <- x
args <- match.call(expand.dots = TRUE)
if (is.null(args$xx)) {
args$xx <- "samples"
}
if (is.null(args$h0)) {
args$h0 <- "shannon"
}
xx <- args$xx
h0 <- args$h0
if (h0 %in% c("shannon", "simpson")) {
ests <- sapply(dv[[h0]], function(x) x$estimate)
# vars <- sapply(dv[[h0]], function(x) x$error)
lci <- sapply(dv[[h0]], function(x) x$interval[1])
uci <- sapply(dv[[h0]], function(x) x$interval[2])
df <- data.frame("names" = names(ests),
"h0" = ests, lci, uci, dv$X)
} else {
lci <- dv[[h0]] - 2*sqrt(dv[[paste(h0, "-variance", sep = "")]])
uci <- dv[[h0]] + 2*sqrt(dv[[paste(h0, "-variance", sep = "")]])
df <- data.frame("names" = names(dv[[h0]]),
"h0" = dv[[h0]], lci, uci, dv$X)
}
df$names <- factor(df$names, levels = df$names)
ggplot2::ggplot(df, ggplot2::aes(x = names, xend = names)) +
ggplot2::geom_point(ggplot2::aes(x = names, y = h0)) +
ggplot2::geom_segment(ggplot2::aes(y = lci, yend = uci)) +
ggplot2::ylab(paste(h0, "estimate")) +
ggplot2::xlab(xx) +
ggplot2::theme_bw() +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 45, hjust = 1))
}
#' Test diversity
#'
#' Hypothesis testing for alpha-diversity.
#'
#'
#' @references Willis, A., Bunge, J., and Whitman, T. (2017). Improved detection of changes in species richness in high diversity microbial communities. \emph{JRSS-C.}
#'
#' @param dv An object of class diversityEstimates. The variable `X` used for the construction
#' @param h0 The alpha-diversity index to be tested for equality
#' @return A data frame similar to the output of `lm`
#'
#' @export
testDiversity <- function(dv, h0 = "shannon") {
cat("Hypothesis testing:\n")
if (h0 %in% c("shannon", "simpson")) {
bt <- breakaway::betta(sapply(dv[[h0]], function(x) x$estimate),
sapply(dv[[h0]], function(x) x$error),
X = dv[["X"]])
} else {
bt <- breakaway::betta(dv[[h0]], dv[[paste(h0, "-variance", sep="")]], X = dv[["X"]])
}
cat(paste(" p-value for global test:", bt$global[2], "\n"))
bt$table
}
#' Test beta diversity
#'
#' Hypothesis testing for beta-diversity.
#'
#' This function uses output from DivNet() to estimate community centroids
#' within groups defined by the groups argument and test a null hypothesis
#' of equality of all group centroids against a general alternative. This test
#' is conducted using a pseudo-F statistic with null distribution approximated
#' via a nonparametric bootstrap.
#'
#' For more details and suggested workflow see the beta diversity vignette:
#' \code{vignette("beta_diversity", package = "DivNet")}
#'
#' @param dv An object of class diversityEstimates. The variable `X` used for the construction
#' @param h0 The beta-diversity index to be tested for equality
#' @param groups A numeric vector giving group membership of each specimen
#' @param sample_specimen_matrix A matrix with ik-th entry 1 if the i-th sequenced sample is taken from specimen k, 0 otherwise.
#' The columns of this matrix should correspond to unique specimens and must be named.
#' @param n_boot Number of (cluster) bootstrap resamples to use
#' @return A list containing the observed pseudo-F statistic, the beta diversity used, the
#' p-value returned by the bootstrapped pseudo-F test of equality of (measured) centroids,
#' a vector of computed bootstrapped test statistics, a matrix of estimated group centroids,
#' and a list of group centroids estimated from each bootstrap resample
#' #'
#' @export
testBetaDiversity <- function(dv,
h0,
groups,
sample_specimen_matrix,
n_boot = 1000){
if(length(colnames(sample_specimen_matrix)) != ncol(sample_specimen_matrix)){
stop("Columns of argument sample_specimen_matrix must be named.
Recommended column names are names of unique specimens in your data.")
}
n_groups <- length(unique(groups))
unique_groups <- unique(groups)
unique_specimens <- colnames(sample_specimen_matrix)
n_specimens <- ncol(sample_specimen_matrix)
group_specimens <- sapply(unique_groups,
function(x) apply(sample_specimen_matrix[groups == x,,drop = F],2,max) %>%
(function(y) names(y)[y==1]))
if(h0 == "bray-curtis"){
bc_matrix <- dv$`bray-curtis`
observed_test_statistic <- get_bc_test_statistic(bc_mat = bc_matrix, groups, unique_groups,
n_groups,
n_specimens)
boot_test_statistics <- numeric(n_boot)
np_boot_pulls <-replicate(n_boot,
sample(1:ncol(sample_specimen_matrix),
ncol(sample_specimen_matrix), replace = T))
group_centroids <- lapply(unique_groups,
function(gr){
samples <- sapply(group_specimens[[gr]],
function(specname)
which(sample_specimen_matrix[,specname] ==1)[1])
return(apply(dv$fitted_z[samples,],2,median))}
)
names(group_centroids) <- unique_groups
boot_test_statistics <- numeric(n_boot)
centroid_matrix <- do.call(rbind, lapply(groups,
function(k) group_centroids[[k]]))
boot_centroids <- vector(n_boot, mode = "list")
for(k in 1:n_boot){
which_samples <- do.call(c,lapply(np_boot_pulls[,k],
function(x) which(sample_specimen_matrix[,x] ==1)))
comps <- dv$fitted_z[which_samples,]
boot_group_specimens <-sapply(unique_groups,
function(x) apply(sample_specimen_matrix[groups == x,np_boot_pulls[,k]],2,max) %>%
(function(y) names(y)[y==1]))
boot_centroids[[k]] <- lapply(unique_groups,
function(gr){
samples <- unlist(sapply(boot_group_specimens[[gr]],
function(specname)
which(sample_specimen_matrix[,specname] ==1)))
return(apply(dv$fitted_z[samples,,drop= F],2,median))}
)
names(boot_centroids[[k]]) <- unique_groups
centered_comps <- comps - centroid_matrix[which_samples,]
boot_mat <- matrix(0,
ncol = nrow(centered_comps),
nrow = nrow(centered_comps))
for(i in 1:(nrow(centered_comps) - 1)){
for(j in (i + 1):nrow(centered_comps)){
boot_mat[i,j] <- boot_mat[j,i] <- 0.5*sum(abs(centered_comps[i,] - centered_comps[j,]))
}
}
boot_test_statistics[k] <- get_bc_test_statistic(bc_mat = boot_mat,groups = groups[which_samples],
unique_groups = unique_groups, n_groups = n_groups,
n_specimens = n_specimens)
}
}
if(h0 == "euclidean"){
euc_matrix <- dv$'euclidean'
observed_test_statistic <- get_euc_test_statistic(euc_mat = euc_matrix, groups, unique_groups,
n_groups,
n_specimens)
boot_test_statistics <- numeric(n_boot)
np_boot_pulls <-replicate(n_boot,
sample(1:ncol(sample_specimen_matrix),
ncol(sample_specimen_matrix), replace = T))
group_centroids <- lapply(unique_groups,
function(gr){
samples <- sapply(group_specimens[[gr]],
function(specname)
which(sample_specimen_matrix[,specname] ==1)[1])
return(apply(dv$fitted_z[samples,,drop= F],2,mean))}
)
names(group_centroids) <- unique_groups
boot_test_statistics <- numeric(n_boot)
centroid_matrix <- do.call(rbind, lapply(groups,
function(k) group_centroids[[k]]))
boot_centroids <- vector(n_boot, mode = "list")
for(k in 1:n_boot){
which_samples <- do.call(c,lapply(np_boot_pulls[,k],
function(x) which(sample_specimen_matrix[,x] ==1)))
comps <- dv$fitted_z[which_samples,]
boot_group_specimens <-sapply(unique_groups,
function(x) apply(sample_specimen_matrix[groups == x,np_boot_pulls[,k]],2,max) %>%
(function(y) names(y)[y==1]))
boot_centroids[[k]] <- lapply(unique_groups,
function(gr){
samples <- unlist(sapply(boot_group_specimens[[gr]],
function(specname)
which(sample_specimen_matrix[,specname] ==1)))
return(apply(dv$fitted_z[samples,,drop = F],2,mean))})
names(boot_centroids[[k]]) <- unique_groups
centered_comps <- comps - centroid_matrix[which_samples,]
boot_mat <- matrix(0,
ncol = nrow(centered_comps),
nrow = nrow(centered_comps))
for(i in 1:(nrow(centered_comps) - 1)){
for(j in (i + 1):nrow(centered_comps)){
boot_mat[i,j] <- boot_mat[j,i] <- sqrt(sum((centered_comps[i,] - centered_comps[j,])^2))
}
}
boot_test_statistics[k] <- get_euc_test_statistic(euc_mat = boot_mat,groups = groups[which_samples],
unique_groups = unique_groups, n_groups = n_groups,
n_specimens = n_specimens)
}
}
if(h0 == "aitchison"){
aitch_matrix <- get_aitchison_distance(dv$fitted_z)
observed_test_statistic <- get_euc_test_statistic(aitch_matrix, groups, unique_groups,
n_groups,
n_specimens)
group_centroids <- lapply(unique_groups,
function(gr){
samples <- sapply(group_specimens[[gr]],
function(specname)
which(sample_specimen_matrix[,specname] ==1)[1])
return(apply(log_ratio(dv$fitted_z[samples,]),2,mean))}
)
names(group_centroids) <- unique_groups
boot_test_statistics <- numeric(n_boot)
np_boot_pulls <-replicate(n_boot,
sample(1:ncol(sample_specimen_matrix),
ncol(sample_specimen_matrix), replace = T))
centroid_matrix <- do.call(rbind, lapply(groups,
function(k) group_centroids[[k]]))
boot_groups <- groups
boot_centroids <- vector(n_boot,mode = "list")
for(k in 1:n_boot){
which_samples <- do.call(c,lapply(np_boot_pulls[,k],
function(x) which(sample_specimen_matrix[,x] ==1)))
comps <- log_ratio(dv$fitted_z[which_samples,])
boot_group_specimens <-sapply(unique_groups,
function(x) apply(sample_specimen_matrix[groups == x,np_boot_pulls[,k]],2,max) %>%
(function(y) names(y)[y==1]))
boot_centroids[[k]] <- lapply(unique_groups,
function(gr){
samples <- unlist(sapply(boot_group_specimens[[gr]],
function(specname)
which(sample_specimen_matrix[,specname] ==1)))
return(apply(log_ratio(dv$fitted_z[samples,,drop = F]),2,mean))})
names(boot_centroids[[k]]) <- unique_groups
centered_comps <- comps - centroid_matrix[which_samples,]
boot_mat <- matrix(0,
ncol = nrow(centered_comps),
nrow = nrow(centered_comps))
for(i in 1:(nrow(centered_comps) - 1)){
for(j in (i + 1):nrow(centered_comps)){
boot_mat[i,j] <- boot_mat[j,i] <- sqrt(sum((centered_comps[i,] - centered_comps[j,])^2))
}
}
boot_test_statistics[k] <- get_euc_test_statistic(euc_mat = boot_mat,groups = groups[which_samples],
unique_groups = unique_groups , n_groups = n_groups,
n_specimens = n_specimens)
}
}
p.val <- mean(boot_test_statistics >= observed_test_statistic)
if(p.val == 0){
p.val <- paste(" < ", signif(1/n_boot,2),sep = "", collapse = "")
}
centroids <- do.call(rbind,group_centroids)
rownames(centroids) <- unique_groups
return(list("Test statistic" = observed_test_statistic,
"h0" = h0,
"p_value" = p.val,
"bootstrapped_statistics" = boot_test_statistics,
"centroids" = centroids,
"boot_centroids" = boot_centroids
))
}
get_bc_test_statistic <- function(bc_mat, groups, unique_groups,
n_groups,
n_specimens){
test_statistic_numerator <- 0
test_statistic_denominator <- 0
for(group in unique_groups){
sub_matrix <- bc_mat[groups == group,groups == group]
test_statistic_denominator <- test_statistic_denominator + sum(sub_matrix[upper.tri(sub_matrix)])
test_statistic_numerator <- test_statistic_numerator + sum(bc_mat[groups == group,groups != group])
}
observed_test_statistic <- (test_statistic_numerator/(n_groups - 1))/(test_statistic_denominator/(n_specimens - n_groups - 1))
return(observed_test_statistic)
}
get_euc_test_statistic <- function(euc_mat, groups, unique_groups,
n_groups,
n_specimens){
euc_mat <- euc_mat^2 #squared distances for Euclidean distance test
test_statistic_numerator <- 0
test_statistic_denominator <- 0
for(group in unique_groups){
sub_matrix <- euc_mat[groups == group,groups == group]
test_statistic_denominator <- test_statistic_denominator + sum(sub_matrix[upper.tri(sub_matrix)])
test_statistic_numerator <- test_statistic_numerator + sum(euc_mat[groups == group,groups != group])
}
observed_test_statistic <- (test_statistic_numerator/(n_groups - 1))/(test_statistic_denominator/(n_specimens - n_groups - 1))
return(observed_test_statistic)
}
get_aitchison_distance <- function(comp_matrix){
lr_matrix <- log_ratio(comp_matrix)
return(as.matrix(dist(lr_matrix)))
}
log_ratio <- function(comp_matrix){
lr_matrix <- log(comp_matrix)
lr_matrix <- lr_matrix -matrix(apply(lr_matrix,1, mean),ncol = 1)%*%matrix(1, ncol = ncol(lr_matrix))
return(lr_matrix)
}
|
#' Ordered bar plot
#'
#' @description
#' p.col_ord make a ordered bar plot.
#'
#' @param data a dataframe
#' @param xaxis x axis data
#' @param yaxis y axis data
#' @param ybreaks number of y axis breaks (default=10)
#' @param percent If TRUE y axis in percent (default=F)
#' @param dec If TRUE serie come be decrescent,if FALSE crescent(default=F)
#' @param yaccuracy a round for y axis (default=0.01)
#' @param ydecimalmark y decimal mark (default=".")
#' @param title title of plot
#' @param xlab x axis label
#' @param ylab y axis label
#' @param stitle subtitle
#' @param note note
#' @param ctitles color of titles (title,xlab,ylab)
#' @param cscales color of the scales (default= same ctitles)
#' @param cbgrid color of grid background
#' @param clgrid color of grid lines
#' @param cplot color of plot background
#' @param cserie color of serie
#' @param cbserie color of serie border (default= same cserie)
#' @param cticks color of axis ticks
#' @param lwdserie size of serie
#' @param pnote position of note (default=1) (only numbers)
#' @param cbord color of plot border (default= same cplot)
#' @param titlesize size of title (default=20) (only numbers)
#' @param wordssize size of words (default=12) (only numbers)
#' @param snote size of note (default=11) (only numbers)
#' @param xlim limit of x axis (default=NULL)
#'
#'
#' @return Return a graphic.
#' @export
#'
#' @examples
#' v=data.frame("x"=1:5,"y"=c(10,4,8,5,2))
#' p.col_ord(v,xaxis= v$x,yaxis=v$y)
#' #or
#' p.col_ord(v,xaxis= v[[1]],yaxis=v[[2]])
#'
#' p.col_ord(v,xaxis= v$x,yaxis=v$y,dec=TRUE,percent=FALSE)
#' p.col_ord(v,xaxis= v$x,yaxis=v$y,dec=TRUE,percent=TRUE)
#' p.col_ord(v,xaxis= v$x,yaxis=v$y,dec=FALSE,percent=FALSE)
#' p.col_ord(v,xaxis= v$x,yaxis=v$y,dec=FALSE,percent=TRUE)
#'
p.col_ord=function(data,xaxis,yaxis,ybreaks= 10,dec=FALSE,percent=FALSE,yaccuracy=0.01,ydecimalmark='.',
title='Title',xlab='X axis',ylab='Y axis',stitle=NULL,note=NULL,
ctitles = 'black' ,cscales=ctitles,cbgrid='white',clgrid=cbgrid,
cplot='white',cserie='black',cbserie= cserie, cticks='black',
lwdserie= 1,pnote=1,cbord=cplot,titlesize=20,wordssize=12,snote=11,xlim=NULL){
if(percent==FALSE & dec==FALSE){
g=(ggplot2::ggplot(stats::na.exclude(data), ggplot2::aes(x = stats::reorder(xaxis,yaxis) , y = yaxis )) + ggplot2::geom_col(fill=cserie,color=cbserie,lwd=lwdserie) +
ggplot2::scale_y_continuous(breaks=scales::breaks_extended(ybreaks)) +
ggplot2::labs(title = title, y=ylab, x=xlab,subtitle = stitle,caption=note) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, vjust = 0.5, hjust=1,size= wordssize,face = 'bold',color = cscales),
title= ggplot2::element_text(angle = 0, hjust = 0.5,size = wordssize,face = 'bold',color=ctitles),
axis.text.y = ggplot2::element_text(angle = 0, vjust = 0.5, hjust=1,size = wordssize,face = 'bold',color = cscales),
plot.title= ggplot2::element_text(angle = 0, vjust = 0.5, hjust=0.5,size = titlesize,face = 'bold',color = ctitles),
plot.subtitle = ggplot2::element_text(angle = 0, vjust = 0.5, hjust=0.5,size = wordssize,face = 'bold',color = ctitles),
plot.caption= ggplot2::element_text(angle = 0, vjust = 0.5, hjust=pnote,size = snote,face = 'bold',color = ctitles),
plot.background = ggplot2::element_rect(fill=cbgrid,colour=cbord,color=cbord), panel.background = ggplot2::element_rect(fill=cplot),
panel.grid = ggplot2::element_line(colour=clgrid),axis.ticks = ggplot2::element_line(color=cticks),
axis.line=ggplot2::element_line(colour=cticks)))
}
if(percent==TRUE & dec==FALSE){
g=(ggplot2::ggplot(stats::na.exclude(data), ggplot2::aes(x = stats::reorder(xaxis,yaxis) , y = yaxis )) + ggplot2::geom_col(fill=cserie,color=cbserie,lwd=lwdserie) +
ggplot2::scale_y_continuous(labels=scales::label_percent(accuracy = yaccuracy,decimal.mark=ydecimalmark),breaks=scales::breaks_extended(ybreaks)) +
ggplot2::labs(title = title, y=ylab, x=xlab,subtitle = stitle,caption=note) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, vjust = 0.5, hjust=1,size= wordssize,face = 'bold',color = cscales),
title= ggplot2::element_text(angle = 0, hjust = 0.5,size = wordssize,face = 'bold',color=ctitles),
axis.text.y = ggplot2::element_text(angle = 0, vjust = 0.5, hjust=1,size = wordssize,face = 'bold',color = cscales),
plot.title= ggplot2::element_text(angle = 0, vjust = 0.5, hjust=0.5,size = titlesize,face = 'bold',color = ctitles),
plot.subtitle = ggplot2::element_text(angle = 0, vjust = 0.5, hjust=0.5,size = wordssize,face = 'bold',color = ctitles),
plot.caption= ggplot2::element_text(angle = 0, vjust = 0.5, hjust=pnote,size = snote,face = 'bold',color = ctitles),
plot.background = ggplot2::element_rect(fill=cbgrid,colour=cbord,color=cbord), panel.background = ggplot2::element_rect(fill=cplot),
panel.grid = ggplot2::element_line(colour=clgrid),axis.ticks = ggplot2::element_line(color=cticks),
axis.line=ggplot2::element_line(colour=cticks)))
}
if(percent==FALSE & dec==TRUE){
g=(ggplot2::ggplot(stats::na.exclude(data), ggplot2::aes(x = stats::reorder(xaxis,-yaxis) , y = yaxis )) + ggplot2::geom_col(fill=cserie,color=cbserie,lwd=lwdserie) +
ggplot2::scale_y_continuous(breaks=scales::breaks_extended(ybreaks)) +
ggplot2::labs(title = title, y=ylab, x=xlab,subtitle = stitle,caption=note) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, vjust = 0.5, hjust=1,size= wordssize,face = 'bold',color = cscales),
title= ggplot2::element_text(angle = 0, hjust = 0.5,size = wordssize,face = 'bold',color=ctitles),
axis.text.y = ggplot2::element_text(angle = 0, vjust = 0.5, hjust=1,size = wordssize,face = 'bold',color = cscales),
plot.title= ggplot2::element_text(angle = 0, vjust = 0.5, hjust=0.5,size = titlesize,face = 'bold',color = ctitles),
plot.subtitle = ggplot2::element_text(angle = 0, vjust = 0.5, hjust=0.5,size = wordssize,face = 'bold',color = ctitles),
plot.caption= ggplot2::element_text(angle = 0, vjust = 0.5, hjust=pnote,size = snote,face = 'bold',color = ctitles),
plot.background = ggplot2::element_rect(fill=cbgrid,colour=cbord,color=cbord), panel.background = ggplot2::element_rect(fill=cplot),
panel.grid = ggplot2::element_line(colour=clgrid),axis.ticks = ggplot2::element_line(color=cticks),
axis.line=ggplot2::element_line(colour=cticks)))
}
if(percent==TRUE & dec==TRUE){
g=(ggplot2::ggplot(stats::na.exclude(data), ggplot2::aes(x = stats::reorder(xaxis,-yaxis) , y = yaxis )) + ggplot2::geom_col(fill=cserie,color=cbserie,lwd=lwdserie) +
ggplot2::scale_y_continuous(labels=scales::label_percent(accuracy = yaccuracy,decimal.mark=ydecimalmark),breaks=scales::breaks_extended(ybreaks)) +
ggplot2::labs(title = title, y=ylab, x=xlab,subtitle = stitle,caption=note) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, vjust = 0.5, hjust=1,size= wordssize,face = 'bold',color = cscales),
title= ggplot2::element_text(angle = 0, hjust = 0.5,size = wordssize,face = 'bold',color=ctitles),
axis.text.y = ggplot2::element_text(angle = 0, vjust = 0.5, hjust=1,size = wordssize,face = 'bold',color = cscales),
plot.title= ggplot2::element_text(angle = 0, vjust = 0.5, hjust=0.5,size = titlesize,face = 'bold',color = ctitles),
plot.subtitle = ggplot2::element_text(angle = 0, vjust = 0.5, hjust=0.5,size = wordssize,face = 'bold',color = ctitles),
plot.caption= ggplot2::element_text(angle = 0, vjust = 0.5, hjust=pnote,size = snote,face = 'bold',color = ctitles),
plot.background = ggplot2::element_rect(fill=cbgrid,colour=cbord,color=cbord), panel.background = ggplot2::element_rect(fill=cplot),
panel.grid = ggplot2::element_line(colour=clgrid),axis.ticks = ggplot2::element_line(color=cticks),
axis.line=ggplot2::element_line(colour=cticks)))
}
return(g)
}
|
/R/pcolord.R
|
no_license
|
jvg0mes/metools
|
R
| false
| false
| 8,983
|
r
|
#' Ordered bar plot
#'
#' @description
#' p.col_ord make a ordered bar plot.
#'
#' @param data a dataframe
#' @param xaxis x axis data
#' @param yaxis y axis data
#' @param ybreaks number of y axis breaks (default=10)
#' @param percent If TRUE y axis in percent (default=F)
#' @param dec If TRUE serie come be decrescent,if FALSE crescent(default=F)
#' @param yaccuracy a round for y axis (default=0.01)
#' @param ydecimalmark y decimal mark (default=".")
#' @param title title of plot
#' @param xlab x axis label
#' @param ylab y axis label
#' @param stitle subtitle
#' @param note note
#' @param ctitles color of titles (title,xlab,ylab)
#' @param cscales color of the scales (default= same ctitles)
#' @param cbgrid color of grid background
#' @param clgrid color of grid lines
#' @param cplot color of plot background
#' @param cserie color of serie
#' @param cbserie color of serie border (default= same cserie)
#' @param cticks color of axis ticks
#' @param lwdserie size of serie
#' @param pnote position of note (default=1) (only numbers)
#' @param cbord color of plot border (default= same cplot)
#' @param titlesize size of title (default=20) (only numbers)
#' @param wordssize size of words (default=12) (only numbers)
#' @param snote size of note (default=11) (only numbers)
#' @param xlim limit of x axis (default=NULL)
#'
#'
#' @return Return a graphic.
#' @export
#'
#' @examples
#' v=data.frame("x"=1:5,"y"=c(10,4,8,5,2))
#' p.col_ord(v,xaxis= v$x,yaxis=v$y)
#' #or
#' p.col_ord(v,xaxis= v[[1]],yaxis=v[[2]])
#'
#' p.col_ord(v,xaxis= v$x,yaxis=v$y,dec=TRUE,percent=FALSE)
#' p.col_ord(v,xaxis= v$x,yaxis=v$y,dec=TRUE,percent=TRUE)
#' p.col_ord(v,xaxis= v$x,yaxis=v$y,dec=FALSE,percent=FALSE)
#' p.col_ord(v,xaxis= v$x,yaxis=v$y,dec=FALSE,percent=TRUE)
#'
p.col_ord=function(data,xaxis,yaxis,ybreaks= 10,dec=FALSE,percent=FALSE,yaccuracy=0.01,ydecimalmark='.',
title='Title',xlab='X axis',ylab='Y axis',stitle=NULL,note=NULL,
ctitles = 'black' ,cscales=ctitles,cbgrid='white',clgrid=cbgrid,
cplot='white',cserie='black',cbserie= cserie, cticks='black',
lwdserie= 1,pnote=1,cbord=cplot,titlesize=20,wordssize=12,snote=11,xlim=NULL){
if(percent==FALSE & dec==FALSE){
g=(ggplot2::ggplot(stats::na.exclude(data), ggplot2::aes(x = stats::reorder(xaxis,yaxis) , y = yaxis )) + ggplot2::geom_col(fill=cserie,color=cbserie,lwd=lwdserie) +
ggplot2::scale_y_continuous(breaks=scales::breaks_extended(ybreaks)) +
ggplot2::labs(title = title, y=ylab, x=xlab,subtitle = stitle,caption=note) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, vjust = 0.5, hjust=1,size= wordssize,face = 'bold',color = cscales),
title= ggplot2::element_text(angle = 0, hjust = 0.5,size = wordssize,face = 'bold',color=ctitles),
axis.text.y = ggplot2::element_text(angle = 0, vjust = 0.5, hjust=1,size = wordssize,face = 'bold',color = cscales),
plot.title= ggplot2::element_text(angle = 0, vjust = 0.5, hjust=0.5,size = titlesize,face = 'bold',color = ctitles),
plot.subtitle = ggplot2::element_text(angle = 0, vjust = 0.5, hjust=0.5,size = wordssize,face = 'bold',color = ctitles),
plot.caption= ggplot2::element_text(angle = 0, vjust = 0.5, hjust=pnote,size = snote,face = 'bold',color = ctitles),
plot.background = ggplot2::element_rect(fill=cbgrid,colour=cbord,color=cbord), panel.background = ggplot2::element_rect(fill=cplot),
panel.grid = ggplot2::element_line(colour=clgrid),axis.ticks = ggplot2::element_line(color=cticks),
axis.line=ggplot2::element_line(colour=cticks)))
}
if(percent==TRUE & dec==FALSE){
g=(ggplot2::ggplot(stats::na.exclude(data), ggplot2::aes(x = stats::reorder(xaxis,yaxis) , y = yaxis )) + ggplot2::geom_col(fill=cserie,color=cbserie,lwd=lwdserie) +
ggplot2::scale_y_continuous(labels=scales::label_percent(accuracy = yaccuracy,decimal.mark=ydecimalmark),breaks=scales::breaks_extended(ybreaks)) +
ggplot2::labs(title = title, y=ylab, x=xlab,subtitle = stitle,caption=note) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, vjust = 0.5, hjust=1,size= wordssize,face = 'bold',color = cscales),
title= ggplot2::element_text(angle = 0, hjust = 0.5,size = wordssize,face = 'bold',color=ctitles),
axis.text.y = ggplot2::element_text(angle = 0, vjust = 0.5, hjust=1,size = wordssize,face = 'bold',color = cscales),
plot.title= ggplot2::element_text(angle = 0, vjust = 0.5, hjust=0.5,size = titlesize,face = 'bold',color = ctitles),
plot.subtitle = ggplot2::element_text(angle = 0, vjust = 0.5, hjust=0.5,size = wordssize,face = 'bold',color = ctitles),
plot.caption= ggplot2::element_text(angle = 0, vjust = 0.5, hjust=pnote,size = snote,face = 'bold',color = ctitles),
plot.background = ggplot2::element_rect(fill=cbgrid,colour=cbord,color=cbord), panel.background = ggplot2::element_rect(fill=cplot),
panel.grid = ggplot2::element_line(colour=clgrid),axis.ticks = ggplot2::element_line(color=cticks),
axis.line=ggplot2::element_line(colour=cticks)))
}
if(percent==FALSE & dec==TRUE){
g=(ggplot2::ggplot(stats::na.exclude(data), ggplot2::aes(x = stats::reorder(xaxis,-yaxis) , y = yaxis )) + ggplot2::geom_col(fill=cserie,color=cbserie,lwd=lwdserie) +
ggplot2::scale_y_continuous(breaks=scales::breaks_extended(ybreaks)) +
ggplot2::labs(title = title, y=ylab, x=xlab,subtitle = stitle,caption=note) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, vjust = 0.5, hjust=1,size= wordssize,face = 'bold',color = cscales),
title= ggplot2::element_text(angle = 0, hjust = 0.5,size = wordssize,face = 'bold',color=ctitles),
axis.text.y = ggplot2::element_text(angle = 0, vjust = 0.5, hjust=1,size = wordssize,face = 'bold',color = cscales),
plot.title= ggplot2::element_text(angle = 0, vjust = 0.5, hjust=0.5,size = titlesize,face = 'bold',color = ctitles),
plot.subtitle = ggplot2::element_text(angle = 0, vjust = 0.5, hjust=0.5,size = wordssize,face = 'bold',color = ctitles),
plot.caption= ggplot2::element_text(angle = 0, vjust = 0.5, hjust=pnote,size = snote,face = 'bold',color = ctitles),
plot.background = ggplot2::element_rect(fill=cbgrid,colour=cbord,color=cbord), panel.background = ggplot2::element_rect(fill=cplot),
panel.grid = ggplot2::element_line(colour=clgrid),axis.ticks = ggplot2::element_line(color=cticks),
axis.line=ggplot2::element_line(colour=cticks)))
}
if(percent==TRUE & dec==TRUE){
g=(ggplot2::ggplot(stats::na.exclude(data), ggplot2::aes(x = stats::reorder(xaxis,-yaxis) , y = yaxis )) + ggplot2::geom_col(fill=cserie,color=cbserie,lwd=lwdserie) +
ggplot2::scale_y_continuous(labels=scales::label_percent(accuracy = yaccuracy,decimal.mark=ydecimalmark),breaks=scales::breaks_extended(ybreaks)) +
ggplot2::labs(title = title, y=ylab, x=xlab,subtitle = stitle,caption=note) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, vjust = 0.5, hjust=1,size= wordssize,face = 'bold',color = cscales),
title= ggplot2::element_text(angle = 0, hjust = 0.5,size = wordssize,face = 'bold',color=ctitles),
axis.text.y = ggplot2::element_text(angle = 0, vjust = 0.5, hjust=1,size = wordssize,face = 'bold',color = cscales),
plot.title= ggplot2::element_text(angle = 0, vjust = 0.5, hjust=0.5,size = titlesize,face = 'bold',color = ctitles),
plot.subtitle = ggplot2::element_text(angle = 0, vjust = 0.5, hjust=0.5,size = wordssize,face = 'bold',color = ctitles),
plot.caption= ggplot2::element_text(angle = 0, vjust = 0.5, hjust=pnote,size = snote,face = 'bold',color = ctitles),
plot.background = ggplot2::element_rect(fill=cbgrid,colour=cbord,color=cbord), panel.background = ggplot2::element_rect(fill=cplot),
panel.grid = ggplot2::element_line(colour=clgrid),axis.ticks = ggplot2::element_line(color=cticks),
axis.line=ggplot2::element_line(colour=cticks)))
}
return(g)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Gbeta1.R
\name{fitMcGBB}
\alias{fitMcGBB}
\title{Fitting the McDonald Generalized Beta Binomial distribution when binomial
random variable, frequency and shape parameters are given}
\usage{
fitMcGBB(x,obs.freq,a,b,c)
}
\arguments{
\item{x}{vector of binomial random variables.}
\item{obs.freq}{vector of frequencies.}
\item{a}{single value for shape parameter alpha representing a.}
\item{b}{single value for shape parameter beta representing b.}
\item{c}{single value for shape parameter gamma representing c.}
}
\value{
The output of \code{fitMcGBB} gives the class format \code{fitMB} and \code{fit} consisting a list
\code{bin.ran.var} binomial random variables.
\code{obs.freq} corresponding observed frequencies.
\code{exp.freq} corresponding expected frequencies.
\code{statistic} chi-squared test statistics.
\code{df} degree of freedom.
\code{p.value} probability value by chi-squared test statistic.
\code{fitMB} fitted values of \code{dMcGBB}.
\code{NegLL} Negative Log Likelihood value.
\code{a} estimated value for alpha parameter as a.
\code{b} estimated value for beta parameter as b.
\code{c} estimated value for gamma parameter as c.
\code{AIC} AIC value.
\code{over.dis.para} over dispersion value.
\code{call} the inputs of the function.
Methods \code{summary}, \code{print}, \code{AIC}, \code{residuals} and \code{fitted} can be used to
extract specific outputs.
}
\description{
The function will fit the McDonald Generalized Beta Binomial Distribution
when random variables, corresponding frequencies and shape parameters are given. It will provide
the expected frequencies, chi-squared test statistics value, p value, degree of freedom
and over dispersion value so that it can be seen if this distribution fits the data.
}
\details{
\deqn{0 < a,b,c}
\deqn{x = 0,1,2,...}
\deqn{obs.freq \ge 0}
\strong{NOTE} : If input parameters are not in given domain conditions necessary
error messages will be provided to go further.
}
\examples{
No.D.D <- 0:7 #assigning the random variables
Obs.fre.1 <- c(47,54,43,40,40,41,39,95) #assigning the corresponding frequencies
\dontrun{
#estimating the parameters using maximum log likelihood value and assigning it
parameters <- EstMLEMcGBB(x=No.D.D,freq=Obs.fre.1,a=0.1,b=0.1,c=3.2)
aMcGBB <- bbmle::coef(parameters)[1] #assigning the estimated a
bMcGBB <- bbmle::coef(parameters)[2] #assigning the estimated b
cMcGBB <- bbmle::coef(parameters)[3] #assigning the estimated c
#fitting when the random variable,frequencies,shape parameter values are given.
results <- fitMcGBB(No.D.D,Obs.fre.1,aMcGBB,bMcGBB,cMcGBB)
results
#extracting the expected frequencies
fitted(results)
#extracting the residuals
residuals(results)
}
}
\references{
Manoj, C., Wijekoon, P. & Yapa, R.D., 2013. The McDonald Generalized Beta-Binomial Distribution: A New
Binomial Mixture Distribution and Simulation Based Comparison with Its Nested Distributions in Handling
Overdispersion. International Journal of Statistics and Probability, 2(2), pp.24-41.
Available at: \doi{10.5539/ijsp.v2n2p24}.
Janiffer, N.M., Islam, A. & Luke, O., 2014. Estimating Equations for Estimation of Mcdonald Generalized
Beta - Binomial Parameters. , (October), pp.702-709.
Roozegar, R., Tahmasebi, S. & Jafari, A.A., 2015. The McDonald Gompertz Distribution: Properties and Applications.
Communications in Statistics - Simulation and Computation, (May), pp.0-0.
Available at: \doi{10.1080/03610918.2015.1088024}.
}
\seealso{
\code{\link[bbmle]{mle2}}
}
|
/man/fitMcGBB.Rd
|
no_license
|
cran/fitODBOD
|
R
| false
| true
| 3,615
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Gbeta1.R
\name{fitMcGBB}
\alias{fitMcGBB}
\title{Fitting the McDonald Generalized Beta Binomial distribution when binomial
random variable, frequency and shape parameters are given}
\usage{
fitMcGBB(x,obs.freq,a,b,c)
}
\arguments{
\item{x}{vector of binomial random variables.}
\item{obs.freq}{vector of frequencies.}
\item{a}{single value for shape parameter alpha representing a.}
\item{b}{single value for shape parameter beta representing b.}
\item{c}{single value for shape parameter gamma representing c.}
}
\value{
The output of \code{fitMcGBB} gives the class format \code{fitMB} and \code{fit} consisting a list
\code{bin.ran.var} binomial random variables.
\code{obs.freq} corresponding observed frequencies.
\code{exp.freq} corresponding expected frequencies.
\code{statistic} chi-squared test statistics.
\code{df} degree of freedom.
\code{p.value} probability value by chi-squared test statistic.
\code{fitMB} fitted values of \code{dMcGBB}.
\code{NegLL} Negative Log Likelihood value.
\code{a} estimated value for alpha parameter as a.
\code{b} estimated value for beta parameter as b.
\code{c} estimated value for gamma parameter as c.
\code{AIC} AIC value.
\code{over.dis.para} over dispersion value.
\code{call} the inputs of the function.
Methods \code{summary}, \code{print}, \code{AIC}, \code{residuals} and \code{fitted} can be used to
extract specific outputs.
}
\description{
The function will fit the McDonald Generalized Beta Binomial Distribution
when random variables, corresponding frequencies and shape parameters are given. It will provide
the expected frequencies, chi-squared test statistics value, p value, degree of freedom
and over dispersion value so that it can be seen if this distribution fits the data.
}
\details{
\deqn{0 < a,b,c}
\deqn{x = 0,1,2,...}
\deqn{obs.freq \ge 0}
\strong{NOTE} : If input parameters are not in given domain conditions necessary
error messages will be provided to go further.
}
\examples{
No.D.D <- 0:7 #assigning the random variables
Obs.fre.1 <- c(47,54,43,40,40,41,39,95) #assigning the corresponding frequencies
\dontrun{
#estimating the parameters using maximum log likelihood value and assigning it
parameters <- EstMLEMcGBB(x=No.D.D,freq=Obs.fre.1,a=0.1,b=0.1,c=3.2)
aMcGBB <- bbmle::coef(parameters)[1] #assigning the estimated a
bMcGBB <- bbmle::coef(parameters)[2] #assigning the estimated b
cMcGBB <- bbmle::coef(parameters)[3] #assigning the estimated c
#fitting when the random variable,frequencies,shape parameter values are given.
results <- fitMcGBB(No.D.D,Obs.fre.1,aMcGBB,bMcGBB,cMcGBB)
results
#extracting the expected frequencies
fitted(results)
#extracting the residuals
residuals(results)
}
}
\references{
Manoj, C., Wijekoon, P. & Yapa, R.D., 2013. The McDonald Generalized Beta-Binomial Distribution: A New
Binomial Mixture Distribution and Simulation Based Comparison with Its Nested Distributions in Handling
Overdispersion. International Journal of Statistics and Probability, 2(2), pp.24-41.
Available at: \doi{10.5539/ijsp.v2n2p24}.
Janiffer, N.M., Islam, A. & Luke, O., 2014. Estimating Equations for Estimation of Mcdonald Generalized
Beta - Binomial Parameters. , (October), pp.702-709.
Roozegar, R., Tahmasebi, S. & Jafari, A.A., 2015. The McDonald Gompertz Distribution: Properties and Applications.
Communications in Statistics - Simulation and Computation, (May), pp.0-0.
Available at: \doi{10.1080/03610918.2015.1088024}.
}
\seealso{
\code{\link[bbmle]{mle2}}
}
|
library(Biostrings)
library(systemPipeR)
dna_object <- readDNAStringSet(file.path(getwd(), "datasets","ch2", "arabidopsis_chloroplast.fa"))
predicted_orfs <- predORF(dna_object, n = 'all', type = 'gr', mode='ORF', strand = 'both', longest_disjoint = TRUE)
predicted_orfs
bases <- c("A", "C", "T", "G")
raw_seq_string <- strsplit(as.character(dna_object), "")
seq_length <- width(dna_object[1])
counts <- lapply(bases, function(x) {sum(grepl(x, raw_seq_string))} )
probs <- unlist(lapply(counts, function(base_count){signif(base_count / seq_length, 2) }))
get_longest_orf_in_random_genome <- function(x,
length = 1000,
probs = c(0.25, 0.25, 0.25, 0.25),
bases = c("A","C","T","G")){
random_genome <- paste0(sample(bases, size = length, replace = TRUE, prob = probs), collapse = "")
random_dna_object <- DNAStringSet(random_genome)
names(random_dna_object) <- c("random_dna_string")
orfs <- predORF(random_dna_object, n = 1, type = 'gr', mode='ORF', strand = 'both', longest_disjoint = TRUE)
return(max(width(orfs)))
}
random_lengths <- unlist(lapply(1:10, get_longest_orf_in_random_genome, length = seq_length, probs = probs, bases = bases))
longest_random_orf <- max(random_lengths)
keep <- width(predicted_orfs) > longest_random_orf
orfs_to_keep <- predicted_orfs[keep]
orfs_to_keep
##writing to file
extracted_orfs <- BSgenome::getSeq(dna_object, orfs_to_keep)
names(extracted_orfs) <- paste0("orf_", 1:length(orfs_to_keep))
writeXStringSet(extracted_orfs, "saved_orfs.fa")
|
/Chapter02/recipe3.R
|
permissive
|
PacktPublishing/R-Bioinformatics-Cookbook
|
R
| false
| false
| 1,635
|
r
|
library(Biostrings)
library(systemPipeR)
dna_object <- readDNAStringSet(file.path(getwd(), "datasets","ch2", "arabidopsis_chloroplast.fa"))
predicted_orfs <- predORF(dna_object, n = 'all', type = 'gr', mode='ORF', strand = 'both', longest_disjoint = TRUE)
predicted_orfs
bases <- c("A", "C", "T", "G")
raw_seq_string <- strsplit(as.character(dna_object), "")
seq_length <- width(dna_object[1])
counts <- lapply(bases, function(x) {sum(grepl(x, raw_seq_string))} )
probs <- unlist(lapply(counts, function(base_count){signif(base_count / seq_length, 2) }))
get_longest_orf_in_random_genome <- function(x,
length = 1000,
probs = c(0.25, 0.25, 0.25, 0.25),
bases = c("A","C","T","G")){
random_genome <- paste0(sample(bases, size = length, replace = TRUE, prob = probs), collapse = "")
random_dna_object <- DNAStringSet(random_genome)
names(random_dna_object) <- c("random_dna_string")
orfs <- predORF(random_dna_object, n = 1, type = 'gr', mode='ORF', strand = 'both', longest_disjoint = TRUE)
return(max(width(orfs)))
}
random_lengths <- unlist(lapply(1:10, get_longest_orf_in_random_genome, length = seq_length, probs = probs, bases = bases))
longest_random_orf <- max(random_lengths)
keep <- width(predicted_orfs) > longest_random_orf
orfs_to_keep <- predicted_orfs[keep]
orfs_to_keep
##writing to file
extracted_orfs <- BSgenome::getSeq(dna_object, orfs_to_keep)
names(extracted_orfs) <- paste0("orf_", 1:length(orfs_to_keep))
writeXStringSet(extracted_orfs, "saved_orfs.fa")
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{convert_to_unit}
\alias{convert_to_unit}
\title{Convert timings to different units.}
\usage{
convert_to_unit(x, unit = c("ns", "us", "ms", "s", "t", "hz", "khz", "mhz",
"eps", "f"))
}
\arguments{
\item{x}{An \code{microthrow_exception $ warning} object.}
\item{unit}{A unit of time. See details.}
}
\value{
A matrix containing the converted time values with an
attribute \code{unit} which is a printable name of the unit of
time.
}
\description{
The following units of time are supported \describe{
\item{\dQuote{ns}}{Nanoseconds.}
\item{\dQuote{us}}{Microseconds.}
\item{\dQuote{ms}}{Milliseconds.}
\item{\dQuote{s}}{Seconds.}
\item{\dQuote{t}}{Appropriately prefixed time unit.}
\item{\dQuote{hz}}{Hertz / evaluations per second.}
\item{\dQuote{eps}}{Evaluations per second / Hertz.}
\item{\dQuote{khz}}{Kilohertz / 1000s of evaluations per second.}
\item{\dQuote{mhz}}{Megahertz / 1000000s of evaluations per second.}
\item{\dQuote{f}}{Appropriately prefixed frequency unit.}
}
}
\author{
Olaf Mersmann
}
|
/man/convert_to_unit.Rd
|
no_license
|
rgrannell1/microbenchmark
|
R
| false
| false
| 1,075
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{convert_to_unit}
\alias{convert_to_unit}
\title{Convert timings to different units.}
\usage{
convert_to_unit(x, unit = c("ns", "us", "ms", "s", "t", "hz", "khz", "mhz",
"eps", "f"))
}
\arguments{
\item{x}{An \code{microthrow_exception $ warning} object.}
\item{unit}{A unit of time. See details.}
}
\value{
A matrix containing the converted time values with an
attribute \code{unit} which is a printable name of the unit of
time.
}
\description{
The following units of time are supported \describe{
\item{\dQuote{ns}}{Nanoseconds.}
\item{\dQuote{us}}{Microseconds.}
\item{\dQuote{ms}}{Milliseconds.}
\item{\dQuote{s}}{Seconds.}
\item{\dQuote{t}}{Appropriately prefixed time unit.}
\item{\dQuote{hz}}{Hertz / evaluations per second.}
\item{\dQuote{eps}}{Evaluations per second / Hertz.}
\item{\dQuote{khz}}{Kilohertz / 1000s of evaluations per second.}
\item{\dQuote{mhz}}{Megahertz / 1000000s of evaluations per second.}
\item{\dQuote{f}}{Appropriately prefixed frequency unit.}
}
}
\author{
Olaf Mersmann
}
|
library(brainflow)
board_id <- brainflow_python$BoardIds$SYNTHETIC_BOARD$value
sampling_rate <- brainflow_python$BoardShim$get_sampling_rate(board_id)
nfft <- brainflow_python$DataFilter$get_nearest_power_of_two(sampling_rate)
params <- brainflow_python$BrainFlowInputParams()
board_shim <- brainflow_python$BoardShim(board_id, params)
board_shim$prepare_session()
board_shim$start_stream()
Sys.sleep(time = 10)
board_shim$stop_stream()
data <- board_shim$get_board_data()
board_shim$release_session()
eeg_channels <- brainflow_python$BoardShim$get_eeg_channels(board_id)
data <- np$ascontiguousarray(data)
eeg_channels <- np$ascontiguousarray(c(eeg_channels))
bands <- brainflow_python$DataFilter$get_avg_band_powers(data, eeg_channels, sampling_rate, TRUE)
feature_vector <- np$array(bands[[1]])
model_params <- brainflow_python$BrainFlowModelParams(brainflow_python$BrainFlowMetrics$MINDFULNESS$value, brainflow_python$BrainFlowClassifiers$DEFAULT_CLASSIFIER$value)
model <- brainflow_python$MLModel(model_params)
model$prepare()
score <- model$predict(feature_vector)
model$release()
|
/r_package/examples/eeg_metrics.R
|
permissive
|
neuroidss/brainflow
|
R
| false
| false
| 1,090
|
r
|
library(brainflow)
board_id <- brainflow_python$BoardIds$SYNTHETIC_BOARD$value
sampling_rate <- brainflow_python$BoardShim$get_sampling_rate(board_id)
nfft <- brainflow_python$DataFilter$get_nearest_power_of_two(sampling_rate)
params <- brainflow_python$BrainFlowInputParams()
board_shim <- brainflow_python$BoardShim(board_id, params)
board_shim$prepare_session()
board_shim$start_stream()
Sys.sleep(time = 10)
board_shim$stop_stream()
data <- board_shim$get_board_data()
board_shim$release_session()
eeg_channels <- brainflow_python$BoardShim$get_eeg_channels(board_id)
data <- np$ascontiguousarray(data)
eeg_channels <- np$ascontiguousarray(c(eeg_channels))
bands <- brainflow_python$DataFilter$get_avg_band_powers(data, eeg_channels, sampling_rate, TRUE)
feature_vector <- np$array(bands[[1]])
model_params <- brainflow_python$BrainFlowModelParams(brainflow_python$BrainFlowMetrics$MINDFULNESS$value, brainflow_python$BrainFlowClassifiers$DEFAULT_CLASSIFIER$value)
model <- brainflow_python$MLModel(model_params)
model$prepare()
score <- model$predict(feature_vector)
model$release()
|
rm(list=ls())
source('~/.Rprofile')
source('~/Public/DropBox/GitHub/R-Adhesion/Tracking.R')
source('~/Public/DropBox/GitHub/R-Adhesion/OOTracking.R')
setwd('/Users/jaywarrick/Documents/MMB/Projects/Adhesion/R/Testing')
load(file="20140911.Rdata")
trackList <- TrackList$new(trackList)
for(track in trackList$tracks)
{
trackList$setTrack(Track$new(track))
}
maximaList <- MaximaList$new(maximaList)
for(.maxima in maximaList$maxima)
{
maximaList$setMaxima(Maxima$new(.maxima))
}
trackList$setValidFrames(fit=bestFit, validStart=0.15, validEnd=0.9)
trackList$smoothVelocities(fit=bestFit, dist=10, maxWidth=25)
trackList$plotTrackList(validOnly=TRUE, slot='vxs', fun=abs, ylim=c(0,50), xlim=c(400,500))
trackMatrix <- trackList$getMatrix(slot='vxs', validOnly=TRUE)
sum(!is.na(trackMatrix))
ret <- list()
for(frame in colnames(trackMatrix))
{
velocities <- trackMatrix[,frame]
velocities <- abs(velocities[!is.na(velocities)])
if(!isempty(velocities))
{
adhered <- sum(velocities < 3)/length(velocities)
if(adhered == 1)
{
browser()
}
else
{
ret[[frame]] <- adhered
}
}
else
{
print(frame)
}
}
times <- trackList$tAll[as.numeric(names(ret))+1]
plot(times, as.numeric(ret), xlab='Time [s]', ylab='Percent Adhered [%]')
aTrack <- Track$new(aTrack)
trackList$setTrack(aTrack)
aTrack$plotTrack(slotY='vx', validOnly=FALSE, type='l', col='blue', pch=20, cex=0.25)
aTrack$plotTrack(slotY='vx', validOnly=FALSE, type='p', col='black', add=TRUE, pch=20, cex=0.5)
aTrack$plotTrack(slotY='vx', validOnly=TRUE, type='p', col='red', add=TRUE, pch=20, cex=0.5)
# length(aTrack$getSlot(slot='x', validOnly=T))
# length(aTrack$getSlot(slot='x', validOnly=F))
# aTrack$length()
# sum(aTrack$getSlot(slot='x', validOnly=T) %in% aTrack$getSlot(slot='x', validOnly=F))
# fit <- bestFit
# validStart = 0.25
# validEnd = 0.75
#
# sin <- trackList$sin
# fi <- trackList$fi
# ff <- trackList$ff
# tAll <- trackList$tAll
#
# sweep <- getSweep(amplitude=fit$par['amplitude'], phaseShift=fit$par['phaseShift'], offset=0, sin=sin, fi=fi, ff=ff, tAll=tAll, frames=-1, guess=NULL)
#
# sin <- FALSE
# fi <- 0.1
# ff <- 0.05
# tAll <- seq(0,1000,1)/10
# validStart = 0.1
# validEnd = 0.95
# sweep <- getSweep(amplitude=1, phaseShift=0, offset=0, sin=sin, fi=fi, ff=ff, tAll=tAll, frames=-1, guess=NULL)
# inflectionsToAddress <- sweep$inflectionNums %in% c(1,3) # These are times at which flow switches directions
# validFrames <- numeric(0)
# for(tIndex in 1:base::length(tAll))
# {
# # Get the nearest inflection at or beyond this time
#
# temp <- which((sweep$inflections >= tAll[tIndex]) & inflectionsToAddress)
# infIndex <- temp[1]
# if(is.na(infIndex)) next
#
# # Get the bounding inflections that represent changes in fluid direction
# infT2 <- sweep$inflections[infIndex] # take the inflection we found
# if((infIndex-2) < 1)
# {
# infT1 <- 0
# }
# else
# {
# infT1 <- sweep$inflections[infIndex-2] # also take two inflections prior because each inflection represents pi/2 and we want to go back to the last change in direction which is pi ago.
# }
# dInfT <- infT2-infT1 # define the time interval size between these two inflections
#
# # Within the if statement calculate the fractional location of this time index in the interval between the two inflections.
# if( (tAll[tIndex] >= (infT1 + validStart*dInfT)) & (tAll[tIndex] <= (infT1 + validEnd*dInfT)) )
# {
# # If it is within the startValid and endValid bounds, add it to the list of the valid frames
# validFrames <- c(validFrames, tIndex-1) # (tIndex-1) = frame because frames are indicies that start at 0
# }
# }
# plot(sweep$t, sweep$v, type='l')
# points(sweep$t[validFrames+1], sweep$v[validFrames + 1], type='p', pch=20, cex=1, col='red')
# sweep$inflectionNums
#
# ni <- 0
# ti <- first(tAll)
# tf <- last(tAll)
# phi <- 0
# inflections <- (log((log(ff/fi)*phi)/(2*fi*pi*tf)+(log(ff/fi)*ni)/(4*fi*tf)+1)*tf)/log(ff/fi)
##### Testing 1 #####
# maxima1 <- new('Maxima')
# maxima2 <- maxima1$copy()
# maxima3 <- maxima1$copy()
# maxima1$initializeWithROI(frame=0, polygon='1,1,1;2,2,2;3,3,3;4,4,4;51,61,5')
# maxima2$initializeWithROI(frame=1, polygon='2,1,5;3,2,4;4,3,3;5,4,2;50,60,1')
# maxima3$initializeWithROI(frame=2, polygon='1,1,1;2,2,2;3,3,3;4,4,4;5,5,5')
#
# maximaList <- new('MaximaList')
# maximaList$setMaxima(maxima1)
# maximaList$setMaxima(maxima2)
# maximaList$setMaxima(maxima3)
# maximaList$trackBack(startFrame=2, endFrame=0)
#
# trackList <- maximaList$getTrackList(sin=FALSE, fi=2, ff=0.1, tAll=0:1)
# maximaList$generateMaximaPlots(path='~/Documents/MMB/Projects/Adhesion/R/Testing/Plots1')
##### Testing 2 #####
# maximaList <- new('MaximaList')
# maximaList$initializeWithFile(path=path2)
# mListCopy <- maximaList$copy()
# mListCopy$trackBack(startFrame=501)
# # trackList <- mListCopy$getTrackList(sin=FALSE, fi=2, ff=0.1, tAll=0:515)
# # ##### Testing 3 #####
# # 50 ms exposure. 2361 images in 500 s = 4.722 frames / sec
# path3 <- '/Volumes/BeebeBig/Jay/JEX Databases/Adhesion FACS/RPMI P-Sel 5Hz-100mHz/Cell_x0_y0/Roi-Tracks Roi/x0_y0.jxd'
# path4 <- '/Volumes/BeebeBig/Jay/JEX Databases/Adhesion FACS/RPMI P-Sel 5Hz-100mHz/Cell_x0_y0/Roi-Maxima/x0_y0.jxd'
# path5 <- '~/Documents/MMB/Projects/Adhesion/R/Testing/SparseMaxima.txt'
# if(!('maximaList' %in% ls()))
# {
# maximaList <- new('MaximaList')
# maximaList$initializeWithFile(path=path5)
# } else
# {
# maximaList <- MaximaList$new(maximaList)
# }
# mListCopy <- maximaList$copy()
# mListCopy$trackBack(startFrame=9994, endFrame=0, maxDist=150, direction=c(1,0,0), directionality=10, uniformityDistThresh=2, digits=1)
# mListCopy$generateMaximaPlots(path='~/Documents/MMB/Projects/Adhesion/R/Testing/Plots1')
# trackList <- mListCopy$getTrackList(sin=FALSE, fi=2, ff=0.01, tAll=seq(0,500,length.out=maximaList$length()))
# trackList$filterTracks(fun = trackLengthFilter, min=500, max=1000000)
# # trackList$filterTracks(fun = trackFrameFilter, startMin=0, startMax=1000000, endMin=maximaList$length()-1, endMax=1000000)
# trackList$plotTrackList()
# bestFit <- getBulkPhaseShift(trackList)
# duh <- getSweep(amplitude=bestFit$par['amplitude'], phaseShift=bestFit$par['phaseShift'], offset=0, sin=trackList$sin, fi=trackList$fi, ff=trackList$ff, tAll=trackList$tAll, frames=-1, guess=NULL)
# lines(duh$t, duh$v, col='blue')
# # aTrack <- trackList$getTrack(0)
# widths <- getWindowWidths(fit=bestFit, trackList=trackList, dist=10, maxWidth=1000)
# # aTrack$smoothVelocities(widths)
#
# trackList$smoothVelocities(fit=bestFit, dist=10, maxWidth=25)
# trackList$plotTrackList(slot='vxs', xlim=c(50,100))
# lines(duh$t, duh$v, col='blue')
#
# setwd('/Users/jaywarrick/Documents/MMB/Projects/Adhesion/R/Testing')
# save(list = c('maximaList','trackList', 'bestFit'), file="20140911.Rdata")
##### Testing Sweep #####
# duh <- getSweep(amplitude=100, phaseShift=0, offset=0, sin=FALSE, fi=2, ff=0.01, tAll=seq(0,500,length.out=10001), frames=-1, guess=NULL)
# plot(duh$t, duh$x, col='red', type='l', xlim=c(230, 250))
# ##### Testing 4 #####
#
# path3 <- '/Volumes/BeebeBig/Jay/JEX Databases/Adhesion FACS/RPMI P-Sel 5Hz-100mHz/Cell_x0_y0/Roi-Tracks Roi/x0_y0.jxd'
# trackList <- new('TrackList')
# trackList$initializeWithFile(file=path3, sin=TRUE, fi=5, ff=0.1, tAll=seq(0,500,length.out=2361))
# trackList$filterTracks(fun = trackLengthFilter, min=50, max=1000000)
# trackList$filterTracks(fun = trackFrameFilter, startMin=0, startMax=1000000, endMin=2360, endMax=1000000)
# bestFit <- getBulkPhaseShift(trackList)
#
# ##### Testing 5 #####
#
# path <- "/Volumes/BeebeBig/Jay/JEX Databases/Adhesion FACS/Test/Cell_x0_y0/Roi-Tracks Roi/x0_y0.jxd"
# path2 <- '/Users/jaywarrick/Documents/JEX/Raw Data/LNCaP.arff'
# path3 <- '/Users/jaywarrick/Documents/JEX/LocalTest/PC3 vs LNCaP/Cell_x0_y0/Roi-Tracks Upper/x0_y0.jxd'
# trackList <- new('TrackList')
# trackList$initializeWithFile(file=path3, sin=TRUE, fi=1, ff=0.1, tAll=seq(0, 515, 1))
# trackList$filterTracks(fun = trackLengthFilter, min=50, max=1000000)
# trackList$filterTracks(fun = trackFrameFilter, startMin=0, startMax=1000000, endMin=515, endMax=1000000)
# bestFit <- getBulkPhaseShift(trackList)
|
/OOTrackingAnalysis.R
|
no_license
|
jaywarrick/R-Adhesion
|
R
| false
| false
| 8,425
|
r
|
rm(list=ls())
source('~/.Rprofile')
source('~/Public/DropBox/GitHub/R-Adhesion/Tracking.R')
source('~/Public/DropBox/GitHub/R-Adhesion/OOTracking.R')
setwd('/Users/jaywarrick/Documents/MMB/Projects/Adhesion/R/Testing')
load(file="20140911.Rdata")
trackList <- TrackList$new(trackList)
for(track in trackList$tracks)
{
trackList$setTrack(Track$new(track))
}
maximaList <- MaximaList$new(maximaList)
for(.maxima in maximaList$maxima)
{
maximaList$setMaxima(Maxima$new(.maxima))
}
trackList$setValidFrames(fit=bestFit, validStart=0.15, validEnd=0.9)
trackList$smoothVelocities(fit=bestFit, dist=10, maxWidth=25)
trackList$plotTrackList(validOnly=TRUE, slot='vxs', fun=abs, ylim=c(0,50), xlim=c(400,500))
trackMatrix <- trackList$getMatrix(slot='vxs', validOnly=TRUE)
sum(!is.na(trackMatrix))
ret <- list()
for(frame in colnames(trackMatrix))
{
velocities <- trackMatrix[,frame]
velocities <- abs(velocities[!is.na(velocities)])
if(!isempty(velocities))
{
adhered <- sum(velocities < 3)/length(velocities)
if(adhered == 1)
{
browser()
}
else
{
ret[[frame]] <- adhered
}
}
else
{
print(frame)
}
}
times <- trackList$tAll[as.numeric(names(ret))+1]
plot(times, as.numeric(ret), xlab='Time [s]', ylab='Percent Adhered [%]')
aTrack <- Track$new(aTrack)
trackList$setTrack(aTrack)
aTrack$plotTrack(slotY='vx', validOnly=FALSE, type='l', col='blue', pch=20, cex=0.25)
aTrack$plotTrack(slotY='vx', validOnly=FALSE, type='p', col='black', add=TRUE, pch=20, cex=0.5)
aTrack$plotTrack(slotY='vx', validOnly=TRUE, type='p', col='red', add=TRUE, pch=20, cex=0.5)
# length(aTrack$getSlot(slot='x', validOnly=T))
# length(aTrack$getSlot(slot='x', validOnly=F))
# aTrack$length()
# sum(aTrack$getSlot(slot='x', validOnly=T) %in% aTrack$getSlot(slot='x', validOnly=F))
# fit <- bestFit
# validStart = 0.25
# validEnd = 0.75
#
# sin <- trackList$sin
# fi <- trackList$fi
# ff <- trackList$ff
# tAll <- trackList$tAll
#
# sweep <- getSweep(amplitude=fit$par['amplitude'], phaseShift=fit$par['phaseShift'], offset=0, sin=sin, fi=fi, ff=ff, tAll=tAll, frames=-1, guess=NULL)
#
# sin <- FALSE
# fi <- 0.1
# ff <- 0.05
# tAll <- seq(0,1000,1)/10
# validStart = 0.1
# validEnd = 0.95
# sweep <- getSweep(amplitude=1, phaseShift=0, offset=0, sin=sin, fi=fi, ff=ff, tAll=tAll, frames=-1, guess=NULL)
# inflectionsToAddress <- sweep$inflectionNums %in% c(1,3) # These are times at which flow switches directions
# validFrames <- numeric(0)
# for(tIndex in 1:base::length(tAll))
# {
# # Get the nearest inflection at or beyond this time
#
# temp <- which((sweep$inflections >= tAll[tIndex]) & inflectionsToAddress)
# infIndex <- temp[1]
# if(is.na(infIndex)) next
#
# # Get the bounding inflections that represent changes in fluid direction
# infT2 <- sweep$inflections[infIndex] # take the inflection we found
# if((infIndex-2) < 1)
# {
# infT1 <- 0
# }
# else
# {
# infT1 <- sweep$inflections[infIndex-2] # also take two inflections prior because each inflection represents pi/2 and we want to go back to the last change in direction which is pi ago.
# }
# dInfT <- infT2-infT1 # define the time interval size between these two inflections
#
# # Within the if statement calculate the fractional location of this time index in the interval between the two inflections.
# if( (tAll[tIndex] >= (infT1 + validStart*dInfT)) & (tAll[tIndex] <= (infT1 + validEnd*dInfT)) )
# {
# # If it is within the startValid and endValid bounds, add it to the list of the valid frames
# validFrames <- c(validFrames, tIndex-1) # (tIndex-1) = frame because frames are indicies that start at 0
# }
# }
# plot(sweep$t, sweep$v, type='l')
# points(sweep$t[validFrames+1], sweep$v[validFrames + 1], type='p', pch=20, cex=1, col='red')
# sweep$inflectionNums
#
# ni <- 0
# ti <- first(tAll)
# tf <- last(tAll)
# phi <- 0
# inflections <- (log((log(ff/fi)*phi)/(2*fi*pi*tf)+(log(ff/fi)*ni)/(4*fi*tf)+1)*tf)/log(ff/fi)
##### Testing 1 #####
# maxima1 <- new('Maxima')
# maxima2 <- maxima1$copy()
# maxima3 <- maxima1$copy()
# maxima1$initializeWithROI(frame=0, polygon='1,1,1;2,2,2;3,3,3;4,4,4;51,61,5')
# maxima2$initializeWithROI(frame=1, polygon='2,1,5;3,2,4;4,3,3;5,4,2;50,60,1')
# maxima3$initializeWithROI(frame=2, polygon='1,1,1;2,2,2;3,3,3;4,4,4;5,5,5')
#
# maximaList <- new('MaximaList')
# maximaList$setMaxima(maxima1)
# maximaList$setMaxima(maxima2)
# maximaList$setMaxima(maxima3)
# maximaList$trackBack(startFrame=2, endFrame=0)
#
# trackList <- maximaList$getTrackList(sin=FALSE, fi=2, ff=0.1, tAll=0:1)
# maximaList$generateMaximaPlots(path='~/Documents/MMB/Projects/Adhesion/R/Testing/Plots1')
##### Testing 2 #####
# maximaList <- new('MaximaList')
# maximaList$initializeWithFile(path=path2)
# mListCopy <- maximaList$copy()
# mListCopy$trackBack(startFrame=501)
# # trackList <- mListCopy$getTrackList(sin=FALSE, fi=2, ff=0.1, tAll=0:515)
# # ##### Testing 3 #####
# # 50 ms exposure. 2361 images in 500 s = 4.722 frames / sec
# path3 <- '/Volumes/BeebeBig/Jay/JEX Databases/Adhesion FACS/RPMI P-Sel 5Hz-100mHz/Cell_x0_y0/Roi-Tracks Roi/x0_y0.jxd'
# path4 <- '/Volumes/BeebeBig/Jay/JEX Databases/Adhesion FACS/RPMI P-Sel 5Hz-100mHz/Cell_x0_y0/Roi-Maxima/x0_y0.jxd'
# path5 <- '~/Documents/MMB/Projects/Adhesion/R/Testing/SparseMaxima.txt'
# if(!('maximaList' %in% ls()))
# {
# maximaList <- new('MaximaList')
# maximaList$initializeWithFile(path=path5)
# } else
# {
# maximaList <- MaximaList$new(maximaList)
# }
# mListCopy <- maximaList$copy()
# mListCopy$trackBack(startFrame=9994, endFrame=0, maxDist=150, direction=c(1,0,0), directionality=10, uniformityDistThresh=2, digits=1)
# mListCopy$generateMaximaPlots(path='~/Documents/MMB/Projects/Adhesion/R/Testing/Plots1')
# trackList <- mListCopy$getTrackList(sin=FALSE, fi=2, ff=0.01, tAll=seq(0,500,length.out=maximaList$length()))
# trackList$filterTracks(fun = trackLengthFilter, min=500, max=1000000)
# # trackList$filterTracks(fun = trackFrameFilter, startMin=0, startMax=1000000, endMin=maximaList$length()-1, endMax=1000000)
# trackList$plotTrackList()
# bestFit <- getBulkPhaseShift(trackList)
# duh <- getSweep(amplitude=bestFit$par['amplitude'], phaseShift=bestFit$par['phaseShift'], offset=0, sin=trackList$sin, fi=trackList$fi, ff=trackList$ff, tAll=trackList$tAll, frames=-1, guess=NULL)
# lines(duh$t, duh$v, col='blue')
# # aTrack <- trackList$getTrack(0)
# widths <- getWindowWidths(fit=bestFit, trackList=trackList, dist=10, maxWidth=1000)
# # aTrack$smoothVelocities(widths)
#
# trackList$smoothVelocities(fit=bestFit, dist=10, maxWidth=25)
# trackList$plotTrackList(slot='vxs', xlim=c(50,100))
# lines(duh$t, duh$v, col='blue')
#
# setwd('/Users/jaywarrick/Documents/MMB/Projects/Adhesion/R/Testing')
# save(list = c('maximaList','trackList', 'bestFit'), file="20140911.Rdata")
##### Testing Sweep #####
# duh <- getSweep(amplitude=100, phaseShift=0, offset=0, sin=FALSE, fi=2, ff=0.01, tAll=seq(0,500,length.out=10001), frames=-1, guess=NULL)
# plot(duh$t, duh$x, col='red', type='l', xlim=c(230, 250))
# ##### Testing 4 #####
#
# path3 <- '/Volumes/BeebeBig/Jay/JEX Databases/Adhesion FACS/RPMI P-Sel 5Hz-100mHz/Cell_x0_y0/Roi-Tracks Roi/x0_y0.jxd'
# trackList <- new('TrackList')
# trackList$initializeWithFile(file=path3, sin=TRUE, fi=5, ff=0.1, tAll=seq(0,500,length.out=2361))
# trackList$filterTracks(fun = trackLengthFilter, min=50, max=1000000)
# trackList$filterTracks(fun = trackFrameFilter, startMin=0, startMax=1000000, endMin=2360, endMax=1000000)
# bestFit <- getBulkPhaseShift(trackList)
#
# ##### Testing 5 #####
#
# path <- "/Volumes/BeebeBig/Jay/JEX Databases/Adhesion FACS/Test/Cell_x0_y0/Roi-Tracks Roi/x0_y0.jxd"
# path2 <- '/Users/jaywarrick/Documents/JEX/Raw Data/LNCaP.arff'
# path3 <- '/Users/jaywarrick/Documents/JEX/LocalTest/PC3 vs LNCaP/Cell_x0_y0/Roi-Tracks Upper/x0_y0.jxd'
# trackList <- new('TrackList')
# trackList$initializeWithFile(file=path3, sin=TRUE, fi=1, ff=0.1, tAll=seq(0, 515, 1))
# trackList$filterTracks(fun = trackLengthFilter, min=50, max=1000000)
# trackList$filterTracks(fun = trackFrameFilter, startMin=0, startMax=1000000, endMin=515, endMax=1000000)
# bestFit <- getBulkPhaseShift(trackList)
|
#' @import magrittr
utils::globalVariables(c(".","%>%"))
|
/R/gVar.R
|
no_license
|
cran/CollapseLevels
|
R
| false
| false
| 61
|
r
|
#' @import magrittr
utils::globalVariables(c(".","%>%"))
|
# NORTH CAROLINA
library(tidyverse)
full_2018 <- read_csv("~/Downloads/2663437.csv")
View(full_2018)
subset_2018 <- full_2018[6517:10447,]
# TEMPERATURE
temp <- mean(as.numeric(subset_2018$HourlyDryBulbTemperature), na.rm = TRUE)
summary(temp)
# HUMIDITY
humidity <- mean(subset_2018$HourlyRelativeHumidity, na.rm = TRUE)
summary(humidity)
# PRECIPITATION
prec <- mean(as.numeric(subset_2018$HourlyPrecipitation), na.rm = TRUE)
summary(prec)
|
/state averages/North Carolina.R
|
no_license
|
s-kumar72/sees-mm
|
R
| false
| false
| 452
|
r
|
# NORTH CAROLINA
library(tidyverse)
full_2018 <- read_csv("~/Downloads/2663437.csv")
View(full_2018)
subset_2018 <- full_2018[6517:10447,]
# TEMPERATURE
temp <- mean(as.numeric(subset_2018$HourlyDryBulbTemperature), na.rm = TRUE)
summary(temp)
# HUMIDITY
humidity <- mean(subset_2018$HourlyRelativeHumidity, na.rm = TRUE)
summary(humidity)
# PRECIPITATION
prec <- mean(as.numeric(subset_2018$HourlyPrecipitation), na.rm = TRUE)
summary(prec)
|
## summary table per gene
## - global mean, min, max
## - population mean, min, max
## - ANOVA F statistic
## - unadjusted P, adjusted P (FDR)
library(dplyr)
library(fst)
kgp.p3<-read.table("data/integrated_call_samples_v3.20130502.ALL.panel", header = T, sep = "\t", as.is = T, col.names = c("ID","sub_pop","pop","gender"))
collect_values<-function(dp, ref) {
fst_file=sprintf("data/%s_%sx.fst", ref, dp)
raw_data<-read_fst(fst_file)
idset<-intersect(colnames(raw_data), kgp.p3$ID)
kgp.map.1<-filter(kgp.p3, ID %in% idset)
raw_data.1<-raw_data[, kgp.map.1$ID]
mat<-data.frame(
ccds_id=raw_data$ccds_id,
gene_symbol=raw_data$gene,
global_mean=apply(raw_data.1, MARGIN = 1, FUN = mean),
global_min=apply(raw_data.1, MARGIN = 1, FUN = min),
global_max=apply(raw_data.1, MARGIN = 1, FUN = max),
AFR_mean=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "AFR" ]], MARGIN = 1, FUN = mean),
AFR_min=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "AFR" ]], MARGIN = 1, FUN = min),
AFR_max=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "AFR" ]], MARGIN = 1, FUN = max),
AMR_mean=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "AMR" ]], MARGIN = 1, FUN = mean),
AMR_min=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "AMR" ]], MARGIN = 1, FUN = min),
AMR_max=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "AMR" ]], MARGIN = 1, FUN = max),
EUR_mean=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "EUR" ]], MARGIN = 1, FUN = mean),
EUR_min=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "EUR" ]], MARGIN = 1, FUN = min),
EUR_max=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "EUR" ]], MARGIN = 1, FUN = max),
EAS_mean=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "EAS" ]], MARGIN = 1, FUN = mean),
EAS_min=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "EAS" ]], MARGIN = 1, FUN = min),
EAS_max=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "EAS" ]], MARGIN = 1, FUN = max),
SAS_mean=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "SAS" ]], MARGIN = 1, FUN = mean),
SAS_min=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "SAS" ]], MARGIN = 1, FUN = min),
SAS_max=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "SAS" ]], MARGIN = 1, FUN = max),
F_statistic=apply(raw_data.1, MARGIN=1, FUN=function(x){ dat=data.frame(val=x, pop=kgp.map.1$pop, stringsAsFactors = F); return(summary(aov(data=dat, formula=val ~ pop))[[1]][["F value"]][1]) }),
p_unadj=apply(raw_data.1, MARGIN=1, FUN=function(x){ dat=data.frame(val=x, pop=kgp.map.1$pop, stringsAsFactors = F); return(summary(aov(data=dat, formula=val ~ pop))[[1]][["Pr(>F)"]][1]) }),
stringsAsFactors = F
)
mat$p_adj=p.adjust(mat$p_unadj, method = "fdr")
mat$depth=dp
mat$ver=ref
return(mat)
}
summary.hg38<-bind_rows(lapply(X=c(5,10,15,20,25,30,50,75,100), FUN = collect_values, ref="hg38"))
summary.b37<-bind_rows(lapply(X=c(5,10,15,20,25,30,50,75,100), FUN = collect_values, ref="b37"))
write_fst(summary.b37, path = "data/summary.b37.fst")
write_fst(summary.hg38, path = "data/summary.hg38.fst")
summary.b37.long<-melt(summary.b37, id.vars = c("ccds_id","gene_symbol","depth","ver"))
summary.hg38.long<-melt(summary.hg38, id.vars = c("ccds_id","gene_symbol","depth","ver"))
## finally merge all into long data frame
summary.long<-bind_rows(summary.b37.long, summary.hg38.long)
summary.long$depth<-paste0(summary.long$depth, "x")
write_fst(summary.long, path="data/summary.long.fst")
## prepare .fst data file for gnomAD exomes
gnomad_exome<-read.table("data/gnomad_exome.r2.1.txt", header = T, sep = "\t", as.is = T, check.names = F)
row.names(gnomad_exome)<-gnomad_exome$ccds_id
write_fst(gnomad_exome, path="data/gnomad_exome.r2.1.fst")
## transcript metadata from ensembl
library(ensembldb)
library(wiggleplotr)
library(biomaRt)
txdb = makeTxDbFromBiomart(biomart = "ensembl", dataset = "hsapiens_gene_ensembl", host = "grch37.ensembl.org")
tx_exons = exonsBy(txdb, by="tx", use.names = T)
tx_cdss = cdsBy(txdb, by="tx", use.names = T)
ensembl_mart = useMart("ensembl", host = "grch37.ensembl.org")
ensembl_dataset = useDataset("hsapiens_gene_ensembl", mart = ensembl_mart)
tx_metadata = getBM(attributes = c("ensembl_transcript_id","ensembl_gene_id", "external_gene_name", "strand", "gene_biotype","transcript_biotype","ccds", "hgnc_symbol"),
mart = ensembl_dataset)
tx_metadata = rename(tx_metadata, transcript_id = ensembl_transcript_id, gene_id = ensembl_gene_id, gene_name = external_gene_name)
save(list=c("tx_exons","tx_cdss", "tx_metadata"), file="data/tx_data.RData")
|
/scripts/prep.R
|
permissive
|
bch-gnome/WEScover
|
R
| false
| false
| 4,663
|
r
|
## summary table per gene
## - global mean, min, max
## - population mean, min, max
## - ANOVA F statistic
## - unadjusted P, adjusted P (FDR)
library(dplyr)
library(fst)
kgp.p3<-read.table("data/integrated_call_samples_v3.20130502.ALL.panel", header = T, sep = "\t", as.is = T, col.names = c("ID","sub_pop","pop","gender"))
collect_values<-function(dp, ref) {
fst_file=sprintf("data/%s_%sx.fst", ref, dp)
raw_data<-read_fst(fst_file)
idset<-intersect(colnames(raw_data), kgp.p3$ID)
kgp.map.1<-filter(kgp.p3, ID %in% idset)
raw_data.1<-raw_data[, kgp.map.1$ID]
mat<-data.frame(
ccds_id=raw_data$ccds_id,
gene_symbol=raw_data$gene,
global_mean=apply(raw_data.1, MARGIN = 1, FUN = mean),
global_min=apply(raw_data.1, MARGIN = 1, FUN = min),
global_max=apply(raw_data.1, MARGIN = 1, FUN = max),
AFR_mean=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "AFR" ]], MARGIN = 1, FUN = mean),
AFR_min=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "AFR" ]], MARGIN = 1, FUN = min),
AFR_max=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "AFR" ]], MARGIN = 1, FUN = max),
AMR_mean=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "AMR" ]], MARGIN = 1, FUN = mean),
AMR_min=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "AMR" ]], MARGIN = 1, FUN = min),
AMR_max=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "AMR" ]], MARGIN = 1, FUN = max),
EUR_mean=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "EUR" ]], MARGIN = 1, FUN = mean),
EUR_min=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "EUR" ]], MARGIN = 1, FUN = min),
EUR_max=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "EUR" ]], MARGIN = 1, FUN = max),
EAS_mean=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "EAS" ]], MARGIN = 1, FUN = mean),
EAS_min=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "EAS" ]], MARGIN = 1, FUN = min),
EAS_max=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "EAS" ]], MARGIN = 1, FUN = max),
SAS_mean=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "SAS" ]], MARGIN = 1, FUN = mean),
SAS_min=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "SAS" ]], MARGIN = 1, FUN = min),
SAS_max=apply(raw_data.1[, kgp.map.1$ID[ kgp.map.1$pop == "SAS" ]], MARGIN = 1, FUN = max),
F_statistic=apply(raw_data.1, MARGIN=1, FUN=function(x){ dat=data.frame(val=x, pop=kgp.map.1$pop, stringsAsFactors = F); return(summary(aov(data=dat, formula=val ~ pop))[[1]][["F value"]][1]) }),
p_unadj=apply(raw_data.1, MARGIN=1, FUN=function(x){ dat=data.frame(val=x, pop=kgp.map.1$pop, stringsAsFactors = F); return(summary(aov(data=dat, formula=val ~ pop))[[1]][["Pr(>F)"]][1]) }),
stringsAsFactors = F
)
mat$p_adj=p.adjust(mat$p_unadj, method = "fdr")
mat$depth=dp
mat$ver=ref
return(mat)
}
summary.hg38<-bind_rows(lapply(X=c(5,10,15,20,25,30,50,75,100), FUN = collect_values, ref="hg38"))
summary.b37<-bind_rows(lapply(X=c(5,10,15,20,25,30,50,75,100), FUN = collect_values, ref="b37"))
write_fst(summary.b37, path = "data/summary.b37.fst")
write_fst(summary.hg38, path = "data/summary.hg38.fst")
summary.b37.long<-melt(summary.b37, id.vars = c("ccds_id","gene_symbol","depth","ver"))
summary.hg38.long<-melt(summary.hg38, id.vars = c("ccds_id","gene_symbol","depth","ver"))
## finally merge all into long data frame
summary.long<-bind_rows(summary.b37.long, summary.hg38.long)
summary.long$depth<-paste0(summary.long$depth, "x")
write_fst(summary.long, path="data/summary.long.fst")
## prepare .fst data file for gnomAD exomes
gnomad_exome<-read.table("data/gnomad_exome.r2.1.txt", header = T, sep = "\t", as.is = T, check.names = F)
row.names(gnomad_exome)<-gnomad_exome$ccds_id
write_fst(gnomad_exome, path="data/gnomad_exome.r2.1.fst")
## transcript metadata from ensembl
library(ensembldb)
library(wiggleplotr)
library(biomaRt)
txdb = makeTxDbFromBiomart(biomart = "ensembl", dataset = "hsapiens_gene_ensembl", host = "grch37.ensembl.org")
tx_exons = exonsBy(txdb, by="tx", use.names = T)
tx_cdss = cdsBy(txdb, by="tx", use.names = T)
ensembl_mart = useMart("ensembl", host = "grch37.ensembl.org")
ensembl_dataset = useDataset("hsapiens_gene_ensembl", mart = ensembl_mart)
tx_metadata = getBM(attributes = c("ensembl_transcript_id","ensembl_gene_id", "external_gene_name", "strand", "gene_biotype","transcript_biotype","ccds", "hgnc_symbol"),
mart = ensembl_dataset)
tx_metadata = rename(tx_metadata, transcript_id = ensembl_transcript_id, gene_id = ensembl_gene_id, gene_name = external_gene_name)
save(list=c("tx_exons","tx_cdss", "tx_metadata"), file="data/tx_data.RData")
|
# load a time series
data(AirPassengers)
str(AirPassengers)
class(AirPassengers)
frequency(AirPassengers)
summary(AirPassengers)
plot(AirPassengers)
abline(reg=lm(AirPassengers~time(AirPassengers)))
cycle(AirPassengers)
plot(aggregate(AirPassengers,FUN=mean))
boxplot(AirPassengers~cycle(AirPassengers))
(fit <- arima(log(AirPassengers), c(0, 1, 1),seasonal = list(order = c(0, 1, 1), period = 12)))
pred <- predict(fit, n.ahead = 10*12)
ts.plot(AirPassengers,2.718^pred$pred, log = "y", lty = c(1,3))
######################################################
# Forecasting time series with neural networks in R ##
######################################################
library(forecast)
setwd("C:/Forecast_R")
daily_data = read.csv('day.csv', header=TRUE, stringsAsFactors=FALSE)
str(daily_data)
daily_data$Date = as.Date(daily_data$dteday)
count_ts = ts(daily_data[, c('cnt')])
# remove outliers
daily_data$clean_cnt = tsclean(count_ts)
str(daily_data)
data <- as.data.frame (daily_data$clean_cnt)
data <-as.data.frame(AirPassengers)
plot(data)
y=auto.arima(data)
plot(forecast(y,h=30))
|
/forescast_timeseries.R
|
no_license
|
karafede/forecast_ARIMA
|
R
| false
| false
| 1,159
|
r
|
# load a time series
data(AirPassengers)
str(AirPassengers)
class(AirPassengers)
frequency(AirPassengers)
summary(AirPassengers)
plot(AirPassengers)
abline(reg=lm(AirPassengers~time(AirPassengers)))
cycle(AirPassengers)
plot(aggregate(AirPassengers,FUN=mean))
boxplot(AirPassengers~cycle(AirPassengers))
(fit <- arima(log(AirPassengers), c(0, 1, 1),seasonal = list(order = c(0, 1, 1), period = 12)))
pred <- predict(fit, n.ahead = 10*12)
ts.plot(AirPassengers,2.718^pred$pred, log = "y", lty = c(1,3))
######################################################
# Forecasting time series with neural networks in R ##
######################################################
library(forecast)
setwd("C:/Forecast_R")
daily_data = read.csv('day.csv', header=TRUE, stringsAsFactors=FALSE)
str(daily_data)
daily_data$Date = as.Date(daily_data$dteday)
count_ts = ts(daily_data[, c('cnt')])
# remove outliers
daily_data$clean_cnt = tsclean(count_ts)
str(daily_data)
data <- as.data.frame (daily_data$clean_cnt)
data <-as.data.frame(AirPassengers)
plot(data)
y=auto.arima(data)
plot(forecast(y,h=30))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/soccer.R
\docType{data}
\name{soccer}
\alias{soccer}
\title{Number of cards given for each referee-player pair in soccer.}
\format{
A data frame with 146,028 rows and 26 variables:
\describe{
\item{playerShort}{short player ID}
\item{player}{player name}
\item{club}{player club}
\item{leagueCountry}{country of player club (
England, Germany, France, and Spain)}
\item{birthday}{player birthday}
\item{height}{player height (in cm)}
\item{weight}{player weight (in kg)}
\item{position}{detailed player position}
\item{games}{number of games in the player-referee dyad}
\item{victories}{victories in the player-referee dyad}
\item{ties}{ties in the player-referee dyad}
\item{defeats}{losses in the player-referee dyad}
\item{goals}{goals scored by a player in the player-referee dyad}
\item{yellowCards}{number of yellow cards player received from referee}
\item{yellowReds}{number of yellow-red cards player received from referee}
\item{redCards}{number of red cards player received from referee}
\item{rater1}{skin rating of photo by rater 1
(5-point scale ranging from “very light skin” to “very dark skin”)}
\item{rater2}{skin rating of photo by rater 2
(5-point scale ranging from “very light skin” to “very dark skin”)}
\item{refNum}{unique referee ID number
(referee name removed for anonymizing purposes)}
\item{refCountry}{unique referee country ID number
(country name removed for anonymizing purposes)}
\item{meanIAT}{mean implicit bias score (using the race IAT)
for referee country, higher values correspond to faster
white | good, black | bad associations}
\item{nIAT}{sample size for race IAT in that particular country}
\item{seIAT}{standard error for mean estimate of race IAT}
\item{meanExp}{mean explicit bias score (using a racial thermometer task)
for referee country, higher values correspond to greater feelings of
warmth toward whites versus blacks}
\item{nExp}{sample size for explicit bias in that particular country}
\item{seExp}{standard error for mean estimate of explicit bias measure}
}
}
\source{
Silberzahn, R., Uhlmann, E. L., Martin, D. P., Anselmi, P., Aust, F.,
Awtrey, E. C., … Nosek, B. A. (2018, August 24).
{Many analysts, one dataset: Making transparent how variations in analytical
choices affect results.} Retrieved from \url{https://osf.io/gvm2z/}
}
\usage{
soccer
}
\description{
A dataset containing card counts between 2,053 soccer players
playing in the first male divisions of England, Germany, France,
and Spain in the 2012-2013 season and 3,147 referees
that these players played under in professional matches.
The dataset contains other covariates including 2 independent
skin tone ratings per player.
Each line represents a player-referee pair.
}
\details{
The skin colour of each player was rated by two independent raters,
{rater1} and {rater2}, and the 5-point scale values were
scaled to 0 to 1 - i.e., 0, 0.25, 0.5, 0.75, 1.
}
\keyword{datasets}
|
/man/soccer.Rd
|
no_license
|
mverseanalysis/mverse
|
R
| false
| true
| 3,090
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/soccer.R
\docType{data}
\name{soccer}
\alias{soccer}
\title{Number of cards given for each referee-player pair in soccer.}
\format{
A data frame with 146,028 rows and 26 variables:
\describe{
\item{playerShort}{short player ID}
\item{player}{player name}
\item{club}{player club}
\item{leagueCountry}{country of player club (
England, Germany, France, and Spain)}
\item{birthday}{player birthday}
\item{height}{player height (in cm)}
\item{weight}{player weight (in kg)}
\item{position}{detailed player position}
\item{games}{number of games in the player-referee dyad}
\item{victories}{victories in the player-referee dyad}
\item{ties}{ties in the player-referee dyad}
\item{defeats}{losses in the player-referee dyad}
\item{goals}{goals scored by a player in the player-referee dyad}
\item{yellowCards}{number of yellow cards player received from referee}
\item{yellowReds}{number of yellow-red cards player received from referee}
\item{redCards}{number of red cards player received from referee}
\item{rater1}{skin rating of photo by rater 1
(5-point scale ranging from “very light skin” to “very dark skin”)}
\item{rater2}{skin rating of photo by rater 2
(5-point scale ranging from “very light skin” to “very dark skin”)}
\item{refNum}{unique referee ID number
(referee name removed for anonymizing purposes)}
\item{refCountry}{unique referee country ID number
(country name removed for anonymizing purposes)}
\item{meanIAT}{mean implicit bias score (using the race IAT)
for referee country, higher values correspond to faster
white | good, black | bad associations}
\item{nIAT}{sample size for race IAT in that particular country}
\item{seIAT}{standard error for mean estimate of race IAT}
\item{meanExp}{mean explicit bias score (using a racial thermometer task)
for referee country, higher values correspond to greater feelings of
warmth toward whites versus blacks}
\item{nExp}{sample size for explicit bias in that particular country}
\item{seExp}{standard error for mean estimate of explicit bias measure}
}
}
\source{
Silberzahn, R., Uhlmann, E. L., Martin, D. P., Anselmi, P., Aust, F.,
Awtrey, E. C., … Nosek, B. A. (2018, August 24).
{Many analysts, one dataset: Making transparent how variations in analytical
choices affect results.} Retrieved from \url{https://osf.io/gvm2z/}
}
\usage{
soccer
}
\description{
A dataset containing card counts between 2,053 soccer players
playing in the first male divisions of England, Germany, France,
and Spain in the 2012-2013 season and 3,147 referees
that these players played under in professional matches.
The dataset contains other covariates including 2 independent
skin tone ratings per player.
Each line represents a player-referee pair.
}
\details{
The skin colour of each player was rated by two independent raters,
{rater1} and {rater2}, and the 5-point scale values were
scaled to 0 to 1 - i.e., 0, 0.25, 0.5, 0.75, 1.
}
\keyword{datasets}
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{coo.plot}
\alias{coo.plot}
\alias{coo.plot.default}
\alias{coo.plot.ldk}
\title{Plots a single shape}
\usage{
coo.plot(coo, ...)
\method{coo.plot}{default}(coo, xlim, ylim, border = "#333333", col = NA,
lwd = 1, lty = 1, points = FALSE, first.point = TRUE,
centroid = TRUE, xy.axis = TRUE, pch = 1, cex = 0.5, main = NA,
poly = TRUE, plot.new = TRUE, plot = TRUE, zoom = 1, ...)
\method{coo.plot}{ldk}(coo, cex = 1, poly = FALSE, ...)
}
\arguments{
\item{coo}{A \code{list} or a \code{matrix} of coordinates.}
\item{xlim}{If \code{coo.plot} is called and \code{coo} is missing, then a
vector of length 2 specifying the \code{ylim} of the ploting area.}
\item{ylim}{If \code{coo.plot} is called and \code{coo} is missing, then a
vector of length 2 specifying the \code{ylim} of the ploting area.}
\item{border}{A color for the shape border.}
\item{col}{A color to fill the shape polygon.}
\item{lwd}{The \code{lwd} for drawing shapes.}
\item{lty}{The \code{lty} for drawing shapes.}
\item{points}{\code{logical}. Whether to display points. If missing and
number of points is < 100, then points are plotted.}
\item{first.point}{\code{logical} whether to plot or not the first point.}
\item{centroid}{\code{logical}. Whether to display centroid.}
\item{xy.axis}{\code{logical}. Whether to draw the xy axis.}
\item{pch}{The \code{pch} for points.}
\item{cex}{The \code{cex} for points.}
\item{main}{\code{character}. A title for the plot.}
\item{poly}{logical whether to use \link{polygon} and \link{lines} to draw the shape,
or just \link{points}. In other words, whether the shape should be considered as a configuration
of landmarks or not (eg a closed outline).}
\item{plot.new}{\code{logical} whether to plot or not a new frame.}
\item{plot}{logical whether to plot something or just to create an empty plot.}
\item{zoom}{a numeric to take your distances.}
\item{...}{further arguments for use in coo.plot methods. See examples.}
}
\value{
No returned value.
}
\description{
A simple wrapper around \link{plot} for plotting shapes. Widely used in Momocs
in other graphical functions, in methods, etc.
}
\examples{
data(bot)
b <- bot[1]
coo.plot(b)
coo.plot(bot[2], plot.new=FALSE) # equivalent to coo.draw(bot[2])
coo.plot(b, zoom=2)
coo.plot(b, border='blue')
coo.plot(b, first.point=FALSE, centroid=FALSE)
coo.plot(b, points=TRUE, pch=20)
coo.plot(b, xy.axis=FALSE, lwd=2, col='#F2F2F2')
}
\seealso{
coo.draw
}
\keyword{Graphics}
|
/man/coo.plot.Rd
|
no_license
|
raz1/Momocs
|
R
| false
| false
| 2,519
|
rd
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{coo.plot}
\alias{coo.plot}
\alias{coo.plot.default}
\alias{coo.plot.ldk}
\title{Plots a single shape}
\usage{
coo.plot(coo, ...)
\method{coo.plot}{default}(coo, xlim, ylim, border = "#333333", col = NA,
lwd = 1, lty = 1, points = FALSE, first.point = TRUE,
centroid = TRUE, xy.axis = TRUE, pch = 1, cex = 0.5, main = NA,
poly = TRUE, plot.new = TRUE, plot = TRUE, zoom = 1, ...)
\method{coo.plot}{ldk}(coo, cex = 1, poly = FALSE, ...)
}
\arguments{
\item{coo}{A \code{list} or a \code{matrix} of coordinates.}
\item{xlim}{If \code{coo.plot} is called and \code{coo} is missing, then a
vector of length 2 specifying the \code{ylim} of the ploting area.}
\item{ylim}{If \code{coo.plot} is called and \code{coo} is missing, then a
vector of length 2 specifying the \code{ylim} of the ploting area.}
\item{border}{A color for the shape border.}
\item{col}{A color to fill the shape polygon.}
\item{lwd}{The \code{lwd} for drawing shapes.}
\item{lty}{The \code{lty} for drawing shapes.}
\item{points}{\code{logical}. Whether to display points. If missing and
number of points is < 100, then points are plotted.}
\item{first.point}{\code{logical} whether to plot or not the first point.}
\item{centroid}{\code{logical}. Whether to display centroid.}
\item{xy.axis}{\code{logical}. Whether to draw the xy axis.}
\item{pch}{The \code{pch} for points.}
\item{cex}{The \code{cex} for points.}
\item{main}{\code{character}. A title for the plot.}
\item{poly}{logical whether to use \link{polygon} and \link{lines} to draw the shape,
or just \link{points}. In other words, whether the shape should be considered as a configuration
of landmarks or not (eg a closed outline).}
\item{plot.new}{\code{logical} whether to plot or not a new frame.}
\item{plot}{logical whether to plot something or just to create an empty plot.}
\item{zoom}{a numeric to take your distances.}
\item{...}{further arguments for use in coo.plot methods. See examples.}
}
\value{
No returned value.
}
\description{
A simple wrapper around \link{plot} for plotting shapes. Widely used in Momocs
in other graphical functions, in methods, etc.
}
\examples{
data(bot)
b <- bot[1]
coo.plot(b)
coo.plot(bot[2], plot.new=FALSE) # equivalent to coo.draw(bot[2])
coo.plot(b, zoom=2)
coo.plot(b, border='blue')
coo.plot(b, first.point=FALSE, centroid=FALSE)
coo.plot(b, points=TRUE, pch=20)
coo.plot(b, xy.axis=FALSE, lwd=2, col='#F2F2F2')
}
\seealso{
coo.draw
}
\keyword{Graphics}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-movie.R
\docType{data}
\name{movie_87}
\alias{movie_87}
\title{Basquiat}
\format{igraph object}
\source{
https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/T4HBA3
https://www.imdb.com/title/tt0115632
}
\usage{
movie_87
}
\description{
Interactions of characters in the movie "Basquiat" (1996)
}
\details{
The networks were built with a movie script parser. Even after multiple manual checks, the data set can still contain minor errors (e.g. typos in character names or wrongly parsed names). This may require some additional manual checks before using the data. Please report any such issues (https://github.com/schochastics/networkdata/issues/)
}
\references{
Kaminski, Jermain; Schober, Michael; Albaladejo, Raymond; Zastupailo, Oleksandr; Hidalgo, César, 2018, Moviegalaxies - Social Networks in Movies, https://doi.org/10.7910/DVN/T4HBA3, Harvard Dataverse, V3
}
\keyword{datasets}
|
/man/movie_87.Rd
|
permissive
|
kjhealy/networkdata
|
R
| false
| true
| 994
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-movie.R
\docType{data}
\name{movie_87}
\alias{movie_87}
\title{Basquiat}
\format{igraph object}
\source{
https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/T4HBA3
https://www.imdb.com/title/tt0115632
}
\usage{
movie_87
}
\description{
Interactions of characters in the movie "Basquiat" (1996)
}
\details{
The networks were built with a movie script parser. Even after multiple manual checks, the data set can still contain minor errors (e.g. typos in character names or wrongly parsed names). This may require some additional manual checks before using the data. Please report any such issues (https://github.com/schochastics/networkdata/issues/)
}
\references{
Kaminski, Jermain; Schober, Michael; Albaladejo, Raymond; Zastupailo, Oleksandr; Hidalgo, César, 2018, Moviegalaxies - Social Networks in Movies, https://doi.org/10.7910/DVN/T4HBA3, Harvard Dataverse, V3
}
\keyword{datasets}
|
source("incl/start.R")
message("*** Futures - lazy ...")
strategies <- c("batchtools_local")
## CRAN processing times:
## On Windows 32-bit, don't run these tests
if (!fullTest && isWin32) strategies <- character(0L)
for (strategy in strategies) {
mprintf("- plan('%s') ...\n", strategy)
plan(strategy)
a <- 42
f <- future(2 * a, lazy = TRUE)
a <- 21
## In future (> 1.14.0), resolved() will launch lazy future,
## which means for some backends (e.g. sequential) this means
## that resolved() might end up returning TRUE.
if (packageVersion("future") <= "1.14.0") {
stopifnot(!resolved(f))
}
f <- resolve(f)
stopifnot(resolved(f))
v <- value(f)
stopifnot(v == 84)
a <- 42
v %<-% { 2 * a } %lazy% TRUE
a <- 21
f <- futureOf(v)
## In future (> 1.14.0), resolved() will launch lazy future,
## which means for some backends (e.g. sequential) this means
## that resolved() might end up returning TRUE.
if (packageVersion("future") <= "1.14.0") {
stopifnot(!resolved(f))
}
f <- resolve(f)
stopifnot(resolved(f))
stopifnot(v == 84)
mprintf("- plan('%s') ... DONE\n", strategy)
} ## for (strategy ...)
message("*** Futures - lazy ... DONE")
source("incl/end.R")
|
/tests/future,lazy.R
|
no_license
|
pythseq/future.batchtools
|
R
| false
| false
| 1,227
|
r
|
source("incl/start.R")
message("*** Futures - lazy ...")
strategies <- c("batchtools_local")
## CRAN processing times:
## On Windows 32-bit, don't run these tests
if (!fullTest && isWin32) strategies <- character(0L)
for (strategy in strategies) {
mprintf("- plan('%s') ...\n", strategy)
plan(strategy)
a <- 42
f <- future(2 * a, lazy = TRUE)
a <- 21
## In future (> 1.14.0), resolved() will launch lazy future,
## which means for some backends (e.g. sequential) this means
## that resolved() might end up returning TRUE.
if (packageVersion("future") <= "1.14.0") {
stopifnot(!resolved(f))
}
f <- resolve(f)
stopifnot(resolved(f))
v <- value(f)
stopifnot(v == 84)
a <- 42
v %<-% { 2 * a } %lazy% TRUE
a <- 21
f <- futureOf(v)
## In future (> 1.14.0), resolved() will launch lazy future,
## which means for some backends (e.g. sequential) this means
## that resolved() might end up returning TRUE.
if (packageVersion("future") <= "1.14.0") {
stopifnot(!resolved(f))
}
f <- resolve(f)
stopifnot(resolved(f))
stopifnot(v == 84)
mprintf("- plan('%s') ... DONE\n", strategy)
} ## for (strategy ...)
message("*** Futures - lazy ... DONE")
source("incl/end.R")
|
library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'wald'
scenario <- 5
param <- 1
anal_type <- "mice"
ss <- ss.bounds%>%
dplyr::filter(method == "wald", scenario.id == scenario)
do_val <- 0.15
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCores() - 1,
FUN= function(x) {
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(reshape2, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(nibinom)
set.seed(10000*scenario + x)
#generate full data with desired correlation structure
dt0 <- sim_cont(p_C = ss$p_C, p_T = ss$p_C - ss$M2, n_arm = ss$n.arm,
mu1 = 4, mu2 = 100, sigma1 = 1, sigma2 = 20, r12 = -0.3, b1 = 0.1, b2 = -0.01)
ci.full <- dt0%>%wald_ci(ss$M2,'y', alpha)
#define missingness parameters and do rates
m_param <- mpars(do = do_val, atype = anal_type)
#impose missing values and perform analysis
ci.miss.mnar1 <- m_param%>%
slice(1)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = wald_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 0.78, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss.mnar2 <- m_param%>%
slice(2)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = wald_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 1.35, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss <- bind_rows(ci.miss.mnar1, ci.miss.mnar2)%>%
dplyr::mutate(scenario.id = ss$scenario.id,
p_C = ss$p_C,
M2 = ss$M2,
type = 't.H0',
do = do_val,
sim.id = x)
ci.all <- list(ci.full, ci.miss)%>%purrr::set_names(c("ci.full","ci.miss"))
return(ci.all)
})
#to summarize type-I error and mean relative bias from the simulated data
source('funs/h0.mice.sum.R')
h0.mice.sum(x1, method = 'wald')
|
/sim_pgms/wald/do15/2xcontH0_sc5_do15_mice.R
|
no_license
|
yuliasidi/nibinom_apply
|
R
| false
| false
| 3,330
|
r
|
library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'wald'
scenario <- 5
param <- 1
anal_type <- "mice"
ss <- ss.bounds%>%
dplyr::filter(method == "wald", scenario.id == scenario)
do_val <- 0.15
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCores() - 1,
FUN= function(x) {
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(reshape2, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(nibinom)
set.seed(10000*scenario + x)
#generate full data with desired correlation structure
dt0 <- sim_cont(p_C = ss$p_C, p_T = ss$p_C - ss$M2, n_arm = ss$n.arm,
mu1 = 4, mu2 = 100, sigma1 = 1, sigma2 = 20, r12 = -0.3, b1 = 0.1, b2 = -0.01)
ci.full <- dt0%>%wald_ci(ss$M2,'y', alpha)
#define missingness parameters and do rates
m_param <- mpars(do = do_val, atype = anal_type)
#impose missing values and perform analysis
ci.miss.mnar1 <- m_param%>%
slice(1)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = wald_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 0.78, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss.mnar2 <- m_param%>%
slice(2)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = wald_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 1.35, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss <- bind_rows(ci.miss.mnar1, ci.miss.mnar2)%>%
dplyr::mutate(scenario.id = ss$scenario.id,
p_C = ss$p_C,
M2 = ss$M2,
type = 't.H0',
do = do_val,
sim.id = x)
ci.all <- list(ci.full, ci.miss)%>%purrr::set_names(c("ci.full","ci.miss"))
return(ci.all)
})
#to summarize type-I error and mean relative bias from the simulated data
source('funs/h0.mice.sum.R')
h0.mice.sum(x1, method = 'wald')
|
\docType{package}
\name{WMTools-package}
\alias{WMTools}
\alias{WMTools-package}
\title{Tools for simulating activations in Warmachine(R)}
\description{
Simulate ranged and melee attacks in the game of
Warmachine(R)
}
\details{
\tabular{ll}{ Package: \tab WMTools \cr Type: \tab Package
\cr Version: \tab 0.1 \cr Date: \tab 2014-10-09 \cr
Lazyload: \tab yes \cr }
}
\section{Special abilities recognized}{
warjack \enumerate{ \item gunfighter (\code{shot}) }
}
\section{Special abilities recognized}{
range \enumerate{ \item free boost hit (\code{shot})
\item free boost damage (\code{shot}) \item ammo
type:quake (\code{shot}) \item critical knockdown
(\code{shot}) \item critical devastation (\code{shot})
\item linked guns (\code{ranged}) \item rapid fire
(\code{ranged}) }
}
\section{Special abilities recognized}{
melee \enumerate{ \item chain attack bloodbath
(\code{melee}) \item powerful charge (\code{attack})
\item crit knockdown (\code{attack}) }
}
\section{Special abilities recognized}{
target \enumerate{ \item stealth (\code{shot}) }
}
\examples{
blueleader <- list(stats = c(SPD = 5, MAT = 7, RAT = 5),
range = list(),
melee = list('quake hammer' = list(stats = c(RNG = 2, PAS = 18),
special = c("crit knockdown")),
'open fist' = list(stats = c(RNG = 0.5, PAS = 14), special = character(0))))
activation(blueleader, which = 1, target = list(stats = list(DEF = 13, ARM = 13, BASE = 30)),
strategy = "aim", boost_hit = TRUE, boost_damage = TRUE, foc = 3,
dice = c(1, 5, 4, 1, 1, 2))
activation(blueleader, which = 1, target = list(stats = list(DEF = 13, ARM = 13, BASE = 30)),
strategy = "charge", boost_hit = TRUE, boost_damage = TRUE, foc = 3,
dice = c(1, 5, 4, 1, 1, 2))
}
\keyword{game}
\keyword{package}
\keyword{simulation}
|
/man/WMTools-package.Rd
|
permissive
|
CSJCampbell/WMTools
|
R
| false
| false
| 1,854
|
rd
|
\docType{package}
\name{WMTools-package}
\alias{WMTools}
\alias{WMTools-package}
\title{Tools for simulating activations in Warmachine(R)}
\description{
Simulate ranged and melee attacks in the game of
Warmachine(R)
}
\details{
\tabular{ll}{ Package: \tab WMTools \cr Type: \tab Package
\cr Version: \tab 0.1 \cr Date: \tab 2014-10-09 \cr
Lazyload: \tab yes \cr }
}
\section{Special abilities recognized}{
warjack \enumerate{ \item gunfighter (\code{shot}) }
}
\section{Special abilities recognized}{
range \enumerate{ \item free boost hit (\code{shot})
\item free boost damage (\code{shot}) \item ammo
type:quake (\code{shot}) \item critical knockdown
(\code{shot}) \item critical devastation (\code{shot})
\item linked guns (\code{ranged}) \item rapid fire
(\code{ranged}) }
}
\section{Special abilities recognized}{
melee \enumerate{ \item chain attack bloodbath
(\code{melee}) \item powerful charge (\code{attack})
\item crit knockdown (\code{attack}) }
}
\section{Special abilities recognized}{
target \enumerate{ \item stealth (\code{shot}) }
}
\examples{
blueleader <- list(stats = c(SPD = 5, MAT = 7, RAT = 5),
range = list(),
melee = list('quake hammer' = list(stats = c(RNG = 2, PAS = 18),
special = c("crit knockdown")),
'open fist' = list(stats = c(RNG = 0.5, PAS = 14), special = character(0))))
activation(blueleader, which = 1, target = list(stats = list(DEF = 13, ARM = 13, BASE = 30)),
strategy = "aim", boost_hit = TRUE, boost_damage = TRUE, foc = 3,
dice = c(1, 5, 4, 1, 1, 2))
activation(blueleader, which = 1, target = list(stats = list(DEF = 13, ARM = 13, BASE = 30)),
strategy = "charge", boost_hit = TRUE, boost_damage = TRUE, foc = 3,
dice = c(1, 5, 4, 1, 1, 2))
}
\keyword{game}
\keyword{package}
\keyword{simulation}
|
# tSNE javascript------------------------------------------------------
div(id = "tsne_js",
#useShinyjs(),
#extendShinyjs(script = source(file.path("js", "tsne.js"), local = TRUE)$value),
fluidRow(
numericInput('inNumIter', label = "Number of Iterations:", value = 100,
min = 10, max = 1000),
numericInput('maxNumNeigh', label = "Max Number of Neighbors:", value = 10,
min = 2, max = 100),
actionButton('tsne_go', "GO", class = "tsne-go"),
plotOutput('tSNE_plot',height=500)
)
)
|
/ui_files/tSNE_JS.R
|
no_license
|
asRodelgo/shinyTCMN
|
R
| false
| false
| 561
|
r
|
# tSNE javascript------------------------------------------------------
div(id = "tsne_js",
#useShinyjs(),
#extendShinyjs(script = source(file.path("js", "tsne.js"), local = TRUE)$value),
fluidRow(
numericInput('inNumIter', label = "Number of Iterations:", value = 100,
min = 10, max = 1000),
numericInput('maxNumNeigh', label = "Max Number of Neighbors:", value = 10,
min = 2, max = 100),
actionButton('tsne_go', "GO", class = "tsne-go"),
plotOutput('tSNE_plot',height=500)
)
)
|
library(caret)
library(RSNNS)
set.seed(1)
data(iris)
#将数据顺序打乱
iris = iris[sample(1:nrow(iris),length(1:nrow(iris))),1:ncol(iris)]
#定义网络输入
irisValues= iris[,1:4]
#定义网络输出,并将数据进行格式转换,将类别变量处理成向量形式
irisTargets = decodeClassLabels(iris[,5])
#从中划分出训练样本和检验样本
#splitForTrainingAndTest将输入值和目标值拆分为训练集和测试集。 得到的是列表
#测试集从数据的结尾获取。如果要对数据进行混洗,则应在调用此函数之前完成。
iris = splitForTrainingAndTest(irisValues, irisTargets, ratio = 0)
#数据标准化
iris = normTrainingAndTestSet(iris)
cv_error = rep(0, 10)
for(i in 1:10)
{
fold = ((i-1)*nrow(iris$inputsTrain)/10+1):(i*nrow(iris$inputsTrain)/10)
X_valid = iris$inputsTrain[fold,]
X_train = iris$inputsTrain[-fold,]
y_valid = iris$targetsTrain[fold,]
y_train = iris$targetsTrain[-fold,]
#利用mlp命令执行前馈反向传播神经网络算法
# model = mlp(X_train, y_train, size = 5, learnFunc = "Quickprop",
# learnFuncParams = c(0.1, 2.0, 0.0001, 0.1), maxit = 100, inputsTest = X_valid,
# targetsTest = y_valid)
model = mlp(X_train, y_train, size = 5, learnFunc = "BackpropBatch",
learnFuncParams = c(10, 0.1), maxit = 100,
inputsTest = X_valid, targetsTest = y_valid)
# model = mlp(X_train, y_train, size = 5, learnFunc = "SCG",
# learnFuncParams = c(0, 0, 0, 0), maxit = 30,
# inputsTest = X_valid, targetsTest = y_valid)
# 利用上面建立的模型进行预测
pred = predict(model, X_valid)
# 生成混淆矩阵,观察预测精度
result = confusionMatrix(y_valid, pred)
# print(result)
cv_error[i] = sum(diag(result))/sum(result)
}
print(mean(cv_error))
|
/project/neural_net.R
|
no_license
|
zhengfuli/Statistical-Learning-in-R
|
R
| false
| false
| 1,892
|
r
|
library(caret)
library(RSNNS)
set.seed(1)
data(iris)
#将数据顺序打乱
iris = iris[sample(1:nrow(iris),length(1:nrow(iris))),1:ncol(iris)]
#定义网络输入
irisValues= iris[,1:4]
#定义网络输出,并将数据进行格式转换,将类别变量处理成向量形式
irisTargets = decodeClassLabels(iris[,5])
#从中划分出训练样本和检验样本
#splitForTrainingAndTest将输入值和目标值拆分为训练集和测试集。 得到的是列表
#测试集从数据的结尾获取。如果要对数据进行混洗,则应在调用此函数之前完成。
iris = splitForTrainingAndTest(irisValues, irisTargets, ratio = 0)
#数据标准化
iris = normTrainingAndTestSet(iris)
cv_error = rep(0, 10)
for(i in 1:10)
{
fold = ((i-1)*nrow(iris$inputsTrain)/10+1):(i*nrow(iris$inputsTrain)/10)
X_valid = iris$inputsTrain[fold,]
X_train = iris$inputsTrain[-fold,]
y_valid = iris$targetsTrain[fold,]
y_train = iris$targetsTrain[-fold,]
#利用mlp命令执行前馈反向传播神经网络算法
# model = mlp(X_train, y_train, size = 5, learnFunc = "Quickprop",
# learnFuncParams = c(0.1, 2.0, 0.0001, 0.1), maxit = 100, inputsTest = X_valid,
# targetsTest = y_valid)
model = mlp(X_train, y_train, size = 5, learnFunc = "BackpropBatch",
learnFuncParams = c(10, 0.1), maxit = 100,
inputsTest = X_valid, targetsTest = y_valid)
# model = mlp(X_train, y_train, size = 5, learnFunc = "SCG",
# learnFuncParams = c(0, 0, 0, 0), maxit = 30,
# inputsTest = X_valid, targetsTest = y_valid)
# 利用上面建立的模型进行预测
pred = predict(model, X_valid)
# 生成混淆矩阵,观察预测精度
result = confusionMatrix(y_valid, pred)
# print(result)
cv_error[i] = sum(diag(result))/sum(result)
}
print(mean(cv_error))
|
# write summary to file
writeSummary <- function(fittedModel, parEstFile = NULL) {
if (!(missing(parEstFile) || is.null(parEstFile))) {
sink(file = parEstFile, type = "o")
try({
print(summary(fittedModel))
# cat("\n\n#################################\n#### Group Parameter Estimates\n")
# print(fittedModel$summary$groupParameters)
cat("\n\n#################################\n#### Individual Parameter Estimates\n")
printIndividualPar(fittedModel$summary$individParameters)
if (!is.null(fittedModel$summary$transformedParameters)) {
cat("\n\n#################################\n#### Transformed Parameters (Group level)\n")
print(fittedModel$summary$transformedParameters)
}
cat("\n\n#################################\n#### Model information\n")
print(fittedModel$mptInfo)
})
sink()
}
}
# print array of individual estimates
printIndividualPar <- function(array) {
dd <- dim(array)
par <- dimnames(array)[[1]]
for (i in 1:dd[1]) {
cat("Parameter ", par[i], "\n")
print(array[i, , ])
}
}
|
/R/writeSummaryToFile.R
|
no_license
|
mariusbarth/TreeBUGS
|
R
| false
| false
| 1,112
|
r
|
# write summary to file
writeSummary <- function(fittedModel, parEstFile = NULL) {
if (!(missing(parEstFile) || is.null(parEstFile))) {
sink(file = parEstFile, type = "o")
try({
print(summary(fittedModel))
# cat("\n\n#################################\n#### Group Parameter Estimates\n")
# print(fittedModel$summary$groupParameters)
cat("\n\n#################################\n#### Individual Parameter Estimates\n")
printIndividualPar(fittedModel$summary$individParameters)
if (!is.null(fittedModel$summary$transformedParameters)) {
cat("\n\n#################################\n#### Transformed Parameters (Group level)\n")
print(fittedModel$summary$transformedParameters)
}
cat("\n\n#################################\n#### Model information\n")
print(fittedModel$mptInfo)
})
sink()
}
}
# print array of individual estimates
printIndividualPar <- function(array) {
dd <- dim(array)
par <- dimnames(array)[[1]]
for (i in 1:dd[1]) {
cat("Parameter ", par[i], "\n")
print(array[i, , ])
}
}
|
#######################
#Exercice 3
y0 = seq(0, 10, by=2)
y0
y1 = seq(2, 18, 2)
y1
y2 = rep(4, 20)
y2
y3 = seq(0, 9.5, 0.5)
y3
#Extraction y3
y3[3]
y3[-3]
#Comparaison
matrix(y3, nrow=2)
matrix(y3, byrow=TRUE)
#Matrices
A = matrix(seq(1:12), nrow=4, byrow=TRUE)
B = matrix(seq(1:12), nrow=4)
#Extraction matrices
A[2,3]
A[,1]
A[2,]
#Nouvelle matrice
C = matrix(data = A[c(1, 4),], nrow=2)
C
mat99 = matrix(data = 1, nrow = 9, ncol = 9)
mat99
diag(mat99) <- 0
mat99
#Exercice 5
x = seq(0, 1, 0.1)
x
length(x)
y <- 4 * x * (1 - x)
y
plot(x, y)
max(y)
y == 1
fx <- 4 * x * x * (1 - x)
plot(x, fx, col="red")
#Exercice 6
ncartes = 32 / 4
ncartes
anagrammes = factorial(9)
anagrammes
nchances = choose(49, 5) * choose(10, 2)
nchances = 1 / nchances
nchances
jeudomino = 10 + 10 * 9 / 2
jeudomino
|
/TP1/TP1.R
|
no_license
|
LaurenRolan/AnaDon
|
R
| false
| false
| 860
|
r
|
#######################
#Exercice 3
y0 = seq(0, 10, by=2)
y0
y1 = seq(2, 18, 2)
y1
y2 = rep(4, 20)
y2
y3 = seq(0, 9.5, 0.5)
y3
#Extraction y3
y3[3]
y3[-3]
#Comparaison
matrix(y3, nrow=2)
matrix(y3, byrow=TRUE)
#Matrices
A = matrix(seq(1:12), nrow=4, byrow=TRUE)
B = matrix(seq(1:12), nrow=4)
#Extraction matrices
A[2,3]
A[,1]
A[2,]
#Nouvelle matrice
C = matrix(data = A[c(1, 4),], nrow=2)
C
mat99 = matrix(data = 1, nrow = 9, ncol = 9)
mat99
diag(mat99) <- 0
mat99
#Exercice 5
x = seq(0, 1, 0.1)
x
length(x)
y <- 4 * x * (1 - x)
y
plot(x, y)
max(y)
y == 1
fx <- 4 * x * x * (1 - x)
plot(x, fx, col="red")
#Exercice 6
ncartes = 32 / 4
ncartes
anagrammes = factorial(9)
anagrammes
nchances = choose(49, 5) * choose(10, 2)
nchances = 1 / nchances
nchances
jeudomino = 10 + 10 * 9 / 2
jeudomino
|
library(readr)
library(ggplot2)
library(ggthemes)
library(RColorBrewer)
ckd_beta <- read_csv("ckd_beta.txt")
#not a bad plot
label_colors <- brewer.pal(n = 4,name = "Paired")[c(2,4)]
ggplot(ckd_beta) +
geom_point(aes(MDS1,MDS2,color=DiseaseState),size=3) +
theme_bw() +
annotate("text",label="Stress=0.13",x=Inf,y=Inf,hjust=1.1,vjust=1.5) +
scale_color_manual(values = label_colors) +
labs(x="NMDS1",y="NMDS2", color="Disease State") +
theme(panel.border = element_rect(color="gray75"))
#a better plot
label_colors <- brewer.pal(n = 4,name = "Paired")[c(2,4)]
ggplot(ckd_beta) +
geom_point(aes(MDS1,MDS2,color=DiseaseState),size=3) +
theme_bw() +
annotate("text",label="Stress=0.13",x=Inf,y=Inf,hjust=1.1,vjust=1.5) +
annotate("text",label="CKD",color=label_colors[1],x=0.07, y=0.12,size=4,fontface=2) +
annotate("text",label="Normal",color=label_colors[2],x=-0.05, y=0,size=4,fontface=2) +
scale_color_manual(values = label_colors,guide=FALSE) +
labs(x="NMDS1",y="NMDS2") +
theme(panel.border = element_rect(color="gray75"))
|
/rat-scatter.R
|
no_license
|
englandwe/dataviz-examples
|
R
| false
| false
| 1,058
|
r
|
library(readr)
library(ggplot2)
library(ggthemes)
library(RColorBrewer)
ckd_beta <- read_csv("ckd_beta.txt")
#not a bad plot
label_colors <- brewer.pal(n = 4,name = "Paired")[c(2,4)]
ggplot(ckd_beta) +
geom_point(aes(MDS1,MDS2,color=DiseaseState),size=3) +
theme_bw() +
annotate("text",label="Stress=0.13",x=Inf,y=Inf,hjust=1.1,vjust=1.5) +
scale_color_manual(values = label_colors) +
labs(x="NMDS1",y="NMDS2", color="Disease State") +
theme(panel.border = element_rect(color="gray75"))
#a better plot
label_colors <- brewer.pal(n = 4,name = "Paired")[c(2,4)]
ggplot(ckd_beta) +
geom_point(aes(MDS1,MDS2,color=DiseaseState),size=3) +
theme_bw() +
annotate("text",label="Stress=0.13",x=Inf,y=Inf,hjust=1.1,vjust=1.5) +
annotate("text",label="CKD",color=label_colors[1],x=0.07, y=0.12,size=4,fontface=2) +
annotate("text",label="Normal",color=label_colors[2],x=-0.05, y=0,size=4,fontface=2) +
scale_color_manual(values = label_colors,guide=FALSE) +
labs(x="NMDS1",y="NMDS2") +
theme(panel.border = element_rect(color="gray75"))
|
# Process data.
library(ichseg)
library(here)
library(dplyr)
library(tidyr)
library(neurobase)
library(readr)
library(extrantsr)
# root_dir <- "~/CLEAR_PITCH"
root_dir = here::here()
# Process data.
# Change batch_type if want to use original data
# Original data was used to train model
batches = c("batch", "test_set")
batch_type = batches[1]
img_dir = file.path(root_dir, batch_type)
proc_dir = file.path(root_dir, "processed")
res_dir = file.path(root_dir, "results")
filenames = file.path(res_dir, "all_filenames_df.rds")
df = read_rds(filenames)
df = df %>%
filter(batch_group == batch_type)
df$outfile = file.path(df$id_proc_dir,
paste0(df$stub, "_", "predictor_df.rds"))
n_ids = nrow(df)
iid = as.numeric(Sys.getenv("SGE_TASK_ID"))
if (is.na(iid)) {
iid = 201
}
id = df$id[iid]
id_proc_dir = df$id_proc_dir[iid]
dir.create(id_proc_dir, showWarnings = FALSE)
img = df$CT[iid]
msk = df$Msk[iid]
if (is.na(msk)) {
stop("Mask not found!")
}
ss_file = file.path(id_proc_dir,
"brain.nii.gz")
mask_file = file.path(id_proc_dir,
"brain_mask.nii.gz")
n4 = FALSE
for (n4 in c(FALSE, TRUE)) {
print(id)
stub = sub("_CT", "", nii.stub(img, bn = TRUE))
ufile = file.path(id_proc_dir,
paste0(stub, "_usemask.nii.gz"))
if (file.exists(mask_file)) {
mask = mask_file
} else {
mask = NULL
}
if (n4) {
stub = paste0(stub, "_n4")
}
outprefix = file.path(
id_proc_dir,
paste0(stub, "_")
)
outfile = file.path(id_proc_dir,
paste0(stub, "_",
"predictor_df.rds"))
if (!file.exists(outfile)) {
if (n4) {
img = readnii(img)
brain_mask = readnii(mask_file)
img = mask_img(img, mask = brain_mask)
img[ img < 0] = 0
# n4 = bias_correct(img, correction = "N4",
# mask = brain_mask)
img = window_img(img, c(0, 100))
n4_2 = bias_correct(img, correction = "N4",
mask = brain_mask)
img = n4_2
}
proc = ich_process_predictors(
img = img,
maskfile = mask_file,
mask = mask,
outprefix = outprefix,
stub = stub,
roi = msk,
save_imgs = TRUE,
outdir = id_proc_dir)
idf = as_data_frame(proc$img.pred$df)
idf$any_zero_neighbor =
as.integer(idf$any_zero_neighbor)
idf$mask = idf$mask > 0
proc$img.pred$df = idf
idf = idf[ idf$mask | idf$Y > 0, ]
write_rds(idf, path = outfile)
}
}
# else {
# idf = read_rds(outfile)
# }
# usemask = readnii(ufile)
# dist_img = remake_img(df$dist_centroid,
# usemask, usemask)
|
/programs/process.R
|
no_license
|
muschellij2/clear_pitch
|
R
| false
| false
| 2,558
|
r
|
# Process data.
library(ichseg)
library(here)
library(dplyr)
library(tidyr)
library(neurobase)
library(readr)
library(extrantsr)
# root_dir <- "~/CLEAR_PITCH"
root_dir = here::here()
# Process data.
# Change batch_type if want to use original data
# Original data was used to train model
batches = c("batch", "test_set")
batch_type = batches[1]
img_dir = file.path(root_dir, batch_type)
proc_dir = file.path(root_dir, "processed")
res_dir = file.path(root_dir, "results")
filenames = file.path(res_dir, "all_filenames_df.rds")
df = read_rds(filenames)
df = df %>%
filter(batch_group == batch_type)
df$outfile = file.path(df$id_proc_dir,
paste0(df$stub, "_", "predictor_df.rds"))
n_ids = nrow(df)
iid = as.numeric(Sys.getenv("SGE_TASK_ID"))
if (is.na(iid)) {
iid = 201
}
id = df$id[iid]
id_proc_dir = df$id_proc_dir[iid]
dir.create(id_proc_dir, showWarnings = FALSE)
img = df$CT[iid]
msk = df$Msk[iid]
if (is.na(msk)) {
stop("Mask not found!")
}
ss_file = file.path(id_proc_dir,
"brain.nii.gz")
mask_file = file.path(id_proc_dir,
"brain_mask.nii.gz")
n4 = FALSE
for (n4 in c(FALSE, TRUE)) {
print(id)
stub = sub("_CT", "", nii.stub(img, bn = TRUE))
ufile = file.path(id_proc_dir,
paste0(stub, "_usemask.nii.gz"))
if (file.exists(mask_file)) {
mask = mask_file
} else {
mask = NULL
}
if (n4) {
stub = paste0(stub, "_n4")
}
outprefix = file.path(
id_proc_dir,
paste0(stub, "_")
)
outfile = file.path(id_proc_dir,
paste0(stub, "_",
"predictor_df.rds"))
if (!file.exists(outfile)) {
if (n4) {
img = readnii(img)
brain_mask = readnii(mask_file)
img = mask_img(img, mask = brain_mask)
img[ img < 0] = 0
# n4 = bias_correct(img, correction = "N4",
# mask = brain_mask)
img = window_img(img, c(0, 100))
n4_2 = bias_correct(img, correction = "N4",
mask = brain_mask)
img = n4_2
}
proc = ich_process_predictors(
img = img,
maskfile = mask_file,
mask = mask,
outprefix = outprefix,
stub = stub,
roi = msk,
save_imgs = TRUE,
outdir = id_proc_dir)
idf = as_data_frame(proc$img.pred$df)
idf$any_zero_neighbor =
as.integer(idf$any_zero_neighbor)
idf$mask = idf$mask > 0
proc$img.pred$df = idf
idf = idf[ idf$mask | idf$Y > 0, ]
write_rds(idf, path = outfile)
}
}
# else {
# idf = read_rds(outfile)
# }
# usemask = readnii(ufile)
# dist_img = remake_img(df$dist_centroid,
# usemask, usemask)
|
# Source file to plot the percent loss of NPV from applying harvest rate trajectory from
# each assumed (columns) into each true state of nature (rows).
# Presently used for main manuscript figure
# Last Updated, 2/27/2017
rm(list = ls())
wd <- '/Users/essing/Dropbox/Desktop/Rcode/EggPredationModel'
setwd(wd)
require(RColorBrewer)
#setwd("/Users/essing/Dropbox/Desktop/Rcode/timtools/R")
addalpha <- function(colors, alpha=1.0) {
r <- col2rgb(colors, alpha=T)
# Apply alpha
r[4,] <- alpha*255
r <- r/255.0
return(rgb(r[1,], r[2,], r[3,], r[4,]))
}
# colorRampPaletteAlpha()
colorRampPaletteAlpha <- function(colors, n=32, interpolate='linear') {
# addalpha()
# Create the color ramp normally
cr <- colorRampPalette(colors, interpolate=interpolate)(n)
# Find the alpha channel
a <- col2rgb(colors, alpha=T)[4,]
# Interpolate
if (interpolate=='linear') {
l <- approx(a, n=n)
} else {
l <- spline(a, n=n)
}
l$y[l$y > 255] <- 255 # Clamp if spline is > 255
cr <- addalpha(cr, l$y/255.0)
return(cr)
}
setwd("./src")
source("makeimageFN.R")
setwd("..")
datadir <- 'data/optimization_output_summer_2016'
plotfilename <- "ALL.hNPZ.Plots.pdf"
min.NPV <- -40
max.NPV <- 40
txt.mult = 2
setwd(paste("./",datadir, sep = ""))
highhigh <- read.csv(file = 'hNPV_output_Case1.csv', header = F)
highlow <- read.csv(file = 'hNPV_output_Case2.csv', header = F)
lowhigh <- read.csv(file = 'hNPV_output_Case3.csv', header = F)
lowlow <- read.csv(file = 'hNPV_output_Case4.csv', header = F)
setwd(wd)
print(lowhigh)
Iflip <- matrix(0, nrow = 4, ncol = 4)
Iflip[4, 1] = 1
Iflip[3, 2] = 1
Iflip[2, 3] = 1
Iflip[1, 4] = 1
# Set Color Pallettes
color.list.neg <- rep(brewer.pal(10,"RdYlBu")[1],11)
alpha.list <- c(exp(-seq(0,2,length.out = 10)),0)
color.alpha.neg<-rep(NA,11)
for (k in 1:11) color.alpha.neg[k] <- addalpha(color.list.neg[k],alpha.list[k])
col.palette.neg <-
colorRampPaletteAlpha(color.alpha.neg, n=33, interpolate = "linear")
color.list.pos <- rep(brewer.pal(10,"RdYlBu")[10],11)
alpha.list <- rev(alpha.list)
color.alpha.pos<-rep(NA,11)
for (k in 1:11) color.alpha.pos[k] <- addalpha(color.list.pos[k],alpha.list[k])
col.palette.pos <-
colorRampPaletteAlpha(color.alpha.pos, n=20, interpolate = "linear")
setwd("./graphics")
pdf(file = plotfilename,
height = 10,
width = 10)
output.2.use <- c("highhigh", "highlow", "lowhigh", "lowlow")
nf = layout(matrix(
c(1, 2, 1, 2, 3, 4, 3, 4, 5, 5),
byrow = TRUE,
nrow = 5,
ncol = 2
))
par(mar = c(2, 5, 2, 2),
las = 1,
omi = c(1, 1, 1, 1))
for (i in 1:length(output.2.use)) {
eval.text <- paste("output<-", output.2.use[i])
eval(parse(text = eval.text))
pos.list <- which(output > max.NPV)
output <-replace(as.matrix(output), pos.list, max.NPV)
# swap out really negative losses with a single big number, -50
really.neg.list <- which(output <= min.NPV)
output <- (replace(as.matrix(output), really.neg.list, min.NPV))
flipped.mat <- (t(output) %*% Iflip)
#flipped.mat<-t(output)
#col.palette <-
#rev(colorRampPalette(viridis(n=15), interpolate = "spline")(15))
mod.image(
x=seq(1:4),
y=seq(1:4),
z=flipped.mat,
ylab = "",
xlab = "",
zlim = c(min.NPV, max.NPV),
axes = FALSE,
colneg = col.palette.neg,
colpos = col.palette.pos
)
box()
axis(
3,
at = seq(1.5, 4.5,by=1),
labels = c("Ind", "Pred", "Egg", "Dep"),
cex.axis = txt.mult
)
axis(
2,
at = seq(1.5, 4.5, by=1),
labels = c("Dep", "Egg", "Pred", "Ind"),
cex.axis = txt.mult
)
if (i == 1) {
mtext(
side = 3,
text = "Prey High",
line = 4,
cex = txt.mult
)
mtext(
side = 2,
text = "Piscivore High",
line = 5,
las = 0,
cex = txt.mult
)
}
if (i == 2) {
mtext(
side = 3,
text = "Prey Low",
line = 4,
cex = txt.mult
)
}
if (i == 3) {
mtext(
side = 2,
text = "Piscivore Low",
line = 5,
las = 0,
cex = txt.mult
)
}
}
# make colorbar along the bottom
NPV.index.neg <- seq(min.NPV / 100, -0.025, by = .025)
NPV.index.pos <- seq(0 / 100, max.NPV / 100, by = .025)
NPV.index.list <- c(NPV.index.neg,NPV.index.pos)
#NPV.index.list=round(100*NPV.index.list)/100
NPV.colorsneg <-
colorRampPaletteAlpha(color.alpha.neg, n=length(NPV.index.neg), interpolate = "linear")
NPV.colorspos <-
colorRampPaletteAlpha(color.alpha.pos, n=length(NPV.index.pos), interpolate = "linear")
par(mai = c(0.5, 2, .5,2), xpd = TRUE)
plot(
0,
0,
type = "n",
xlim = c(min.NPV / 100, max.NPV / 100),
ylim = c(0, .9),
axes = FALSE,
ylab = "",
xlab = ""
)
NPV.colors<-c(NPV.colorsneg,NPV.colorspos)
# loop through colors, make squares
ymin <- 0.5
ymax <- 1.5
NPV.inc <- NPV.index.list[2]-NPV.index.list[1]
for (i in 1:length(NPV.index.list)){
rect(xleft = NPV.index.list[i],xright = NPV.index.list[i]+NPV.inc*0.99,ybottom=ymin,ytop=ymax,col=NPV.colors[i],border=NA,lwd=0)
}
#gradient.rect( xleft= -0.15, xright = 0, ybottom = ymin, ytop = ymax, density = 0, col=NPV.colors, gradient = "x", border = NA)
#for (i in 2:length(NPV.index.list)) {
# NPV.color <- NPV.colors[i - 1]
## y <- c(ymin, ymax, ymax, ymin)
# x <-
# c(NPV.index.list[i - 1],
# NPV.index.list[i - 1],
# NPV.index.list[i],
# NPV.index.list[i])
# polygon(x, y, col = NPV.color, border = NPV.color)
#}
NPV.plot.list.lab <- c(paste("<", min.NPV, sep = ""), -20,0, 20, 40)
NPV.plot.list <- c(seq(min.NPV, 40, by = 20))
par(xpd = TRUE)
text(
y = rep(0.1, length(NPV.plot.list)),
x = NPV.plot.list / 100,
labels = NPV.plot.list.lab,
pos = 3,
cex = txt.mult
)
text(
y = -0.1,
x = mean(c(min.NPV / 100, max.NPV / 100)),
labels = "Change (%) in Net Present Value",
pos = 1,
cex = txt.mult
)
dev.off()
system2("open", args = c("-a Skim.app", plotfilename))
setwd(wd)
|
/R/src/Plot_Color_Map_NPV_herring.R
|
no_license
|
tessington/PNAS-EBFM
|
R
| false
| false
| 5,905
|
r
|
# Source file to plot the percent loss of NPV from applying harvest rate trajectory from
# each assumed (columns) into each true state of nature (rows).
# Presently used for main manuscript figure
# Last Updated, 2/27/2017
rm(list = ls())
wd <- '/Users/essing/Dropbox/Desktop/Rcode/EggPredationModel'
setwd(wd)
require(RColorBrewer)
#setwd("/Users/essing/Dropbox/Desktop/Rcode/timtools/R")
addalpha <- function(colors, alpha=1.0) {
r <- col2rgb(colors, alpha=T)
# Apply alpha
r[4,] <- alpha*255
r <- r/255.0
return(rgb(r[1,], r[2,], r[3,], r[4,]))
}
# colorRampPaletteAlpha()
colorRampPaletteAlpha <- function(colors, n=32, interpolate='linear') {
# addalpha()
# Create the color ramp normally
cr <- colorRampPalette(colors, interpolate=interpolate)(n)
# Find the alpha channel
a <- col2rgb(colors, alpha=T)[4,]
# Interpolate
if (interpolate=='linear') {
l <- approx(a, n=n)
} else {
l <- spline(a, n=n)
}
l$y[l$y > 255] <- 255 # Clamp if spline is > 255
cr <- addalpha(cr, l$y/255.0)
return(cr)
}
setwd("./src")
source("makeimageFN.R")
setwd("..")
datadir <- 'data/optimization_output_summer_2016'
plotfilename <- "ALL.hNPZ.Plots.pdf"
min.NPV <- -40
max.NPV <- 40
txt.mult = 2
setwd(paste("./",datadir, sep = ""))
highhigh <- read.csv(file = 'hNPV_output_Case1.csv', header = F)
highlow <- read.csv(file = 'hNPV_output_Case2.csv', header = F)
lowhigh <- read.csv(file = 'hNPV_output_Case3.csv', header = F)
lowlow <- read.csv(file = 'hNPV_output_Case4.csv', header = F)
setwd(wd)
print(lowhigh)
Iflip <- matrix(0, nrow = 4, ncol = 4)
Iflip[4, 1] = 1
Iflip[3, 2] = 1
Iflip[2, 3] = 1
Iflip[1, 4] = 1
# Set Color Pallettes
color.list.neg <- rep(brewer.pal(10,"RdYlBu")[1],11)
alpha.list <- c(exp(-seq(0,2,length.out = 10)),0)
color.alpha.neg<-rep(NA,11)
for (k in 1:11) color.alpha.neg[k] <- addalpha(color.list.neg[k],alpha.list[k])
col.palette.neg <-
colorRampPaletteAlpha(color.alpha.neg, n=33, interpolate = "linear")
color.list.pos <- rep(brewer.pal(10,"RdYlBu")[10],11)
alpha.list <- rev(alpha.list)
color.alpha.pos<-rep(NA,11)
for (k in 1:11) color.alpha.pos[k] <- addalpha(color.list.pos[k],alpha.list[k])
col.palette.pos <-
colorRampPaletteAlpha(color.alpha.pos, n=20, interpolate = "linear")
setwd("./graphics")
pdf(file = plotfilename,
height = 10,
width = 10)
output.2.use <- c("highhigh", "highlow", "lowhigh", "lowlow")
nf = layout(matrix(
c(1, 2, 1, 2, 3, 4, 3, 4, 5, 5),
byrow = TRUE,
nrow = 5,
ncol = 2
))
par(mar = c(2, 5, 2, 2),
las = 1,
omi = c(1, 1, 1, 1))
for (i in 1:length(output.2.use)) {
eval.text <- paste("output<-", output.2.use[i])
eval(parse(text = eval.text))
pos.list <- which(output > max.NPV)
output <-replace(as.matrix(output), pos.list, max.NPV)
# swap out really negative losses with a single big number, -50
really.neg.list <- which(output <= min.NPV)
output <- (replace(as.matrix(output), really.neg.list, min.NPV))
flipped.mat <- (t(output) %*% Iflip)
#flipped.mat<-t(output)
#col.palette <-
#rev(colorRampPalette(viridis(n=15), interpolate = "spline")(15))
mod.image(
x=seq(1:4),
y=seq(1:4),
z=flipped.mat,
ylab = "",
xlab = "",
zlim = c(min.NPV, max.NPV),
axes = FALSE,
colneg = col.palette.neg,
colpos = col.palette.pos
)
box()
axis(
3,
at = seq(1.5, 4.5,by=1),
labels = c("Ind", "Pred", "Egg", "Dep"),
cex.axis = txt.mult
)
axis(
2,
at = seq(1.5, 4.5, by=1),
labels = c("Dep", "Egg", "Pred", "Ind"),
cex.axis = txt.mult
)
if (i == 1) {
mtext(
side = 3,
text = "Prey High",
line = 4,
cex = txt.mult
)
mtext(
side = 2,
text = "Piscivore High",
line = 5,
las = 0,
cex = txt.mult
)
}
if (i == 2) {
mtext(
side = 3,
text = "Prey Low",
line = 4,
cex = txt.mult
)
}
if (i == 3) {
mtext(
side = 2,
text = "Piscivore Low",
line = 5,
las = 0,
cex = txt.mult
)
}
}
# make colorbar along the bottom
NPV.index.neg <- seq(min.NPV / 100, -0.025, by = .025)
NPV.index.pos <- seq(0 / 100, max.NPV / 100, by = .025)
NPV.index.list <- c(NPV.index.neg,NPV.index.pos)
#NPV.index.list=round(100*NPV.index.list)/100
NPV.colorsneg <-
colorRampPaletteAlpha(color.alpha.neg, n=length(NPV.index.neg), interpolate = "linear")
NPV.colorspos <-
colorRampPaletteAlpha(color.alpha.pos, n=length(NPV.index.pos), interpolate = "linear")
par(mai = c(0.5, 2, .5,2), xpd = TRUE)
plot(
0,
0,
type = "n",
xlim = c(min.NPV / 100, max.NPV / 100),
ylim = c(0, .9),
axes = FALSE,
ylab = "",
xlab = ""
)
NPV.colors<-c(NPV.colorsneg,NPV.colorspos)
# loop through colors, make squares
ymin <- 0.5
ymax <- 1.5
NPV.inc <- NPV.index.list[2]-NPV.index.list[1]
for (i in 1:length(NPV.index.list)){
rect(xleft = NPV.index.list[i],xright = NPV.index.list[i]+NPV.inc*0.99,ybottom=ymin,ytop=ymax,col=NPV.colors[i],border=NA,lwd=0)
}
#gradient.rect( xleft= -0.15, xright = 0, ybottom = ymin, ytop = ymax, density = 0, col=NPV.colors, gradient = "x", border = NA)
#for (i in 2:length(NPV.index.list)) {
# NPV.color <- NPV.colors[i - 1]
## y <- c(ymin, ymax, ymax, ymin)
# x <-
# c(NPV.index.list[i - 1],
# NPV.index.list[i - 1],
# NPV.index.list[i],
# NPV.index.list[i])
# polygon(x, y, col = NPV.color, border = NPV.color)
#}
NPV.plot.list.lab <- c(paste("<", min.NPV, sep = ""), -20,0, 20, 40)
NPV.plot.list <- c(seq(min.NPV, 40, by = 20))
par(xpd = TRUE)
text(
y = rep(0.1, length(NPV.plot.list)),
x = NPV.plot.list / 100,
labels = NPV.plot.list.lab,
pos = 3,
cex = txt.mult
)
text(
y = -0.1,
x = mean(c(min.NPV / 100, max.NPV / 100)),
labels = "Change (%) in Net Present Value",
pos = 1,
cex = txt.mult
)
dev.off()
system2("open", args = c("-a Skim.app", plotfilename))
setwd(wd)
|
is.installed <- function(package) {
is.element(package, installed.packages()[, 1])
}
utils_starts_with <- function(lhs, rhs) {
if (nchar(lhs) < nchar(rhs)) {
return(FALSE)
}
identical(substring(lhs, 1, nchar(rhs)), rhs)
}
aliased_path <- function(path) {
home <- path.expand("~/")
if (utils_starts_with(path, home)) {
path <- file.path("~", substring(path, nchar(home) + 1))
}
path
}
transpose_list <- function(list) {
do.call(Map, c(c, list, USE.NAMES = FALSE))
}
#' Random string generation
#'
#' Generate a random string with a given prefix.
#'
#' @param prefix A length-one character vector.
#' @export
random_string <- function(prefix = "table") {
paste0(prefix, "_", gsub("-", "_", uuid::UUIDgenerate()))
}
#' Instantiate a Java array with a specific element type.
#'
#' Given a list of Java object references, instantiate an \code{Array[T]}
#' containing the same list of references, where \code{T} is a non-primitive
#' type that is more specific than \code{java.lang.Object}.
#'
#' @param sc A \code{spark_connection}.
#' @param x A list of Java object references.
#' @param element_type A valid Java class name representing the generic type
#' parameter of the Java array to be instantiated. Each element of \code{x}
#' must refer to a Java object that is assignable to \code{element_type}.
#'
#' @examples
#' sc <- spark_connect(master = "spark://HOST:PORT")
#'
#' string_arr <- jarray(sc, letters, element_type = "java.lang.String")
#' # string_arr is now a reference to an array of type String[]
#'
#' @export
jarray <- function(sc, x, element_type) {
cls <- paste0("[L", element_type, ";")
arr_cls <- invoke_static(sc, "java.lang.Class", "forName", cls)
j_invoke_static(
sc, "java.util.Arrays", "copyOf", as.list(x), length(x), arr_cls
)
}
#' Instantiate a Java float type.
#'
#' Instantiate a \code{java.lang.Float} object with the value specified.
#' NOTE: this method is useful when one has to invoke a Java/Scala method
#' requiring a float (instead of double) type for at least one of its
#' parameters.
#'
#' @param sc A \code{spark_connection}.
#' @param x A numeric value in R.
#'
#' @examples
#' sc <- spark_connect(master = "spark://HOST:PORT")
#'
#' jflt <- jfloat(sc, 1.23e-8)
#' # jflt is now a reference to a java.lang.Float object
#'
#' @export
jfloat <- function(sc, x) {
j_invoke_new(sc, "java.lang.Float", as.numeric(x))
}
#' Instantiate an Array[Float].
#'
#' Instantiate an \code{Array[Float]} object with the value specified.
#' NOTE: this method is useful when one has to invoke a Java/Scala method
#' requiring an \code{Array[Float]} as one of its parameters.
#'
#' @param sc A \code{spark_connection}.
#' @param x A numeric vector in R.
#'
#' @examples
#' sc <- spark_connect(master = "spark://HOST:PORT")
#'
#' jflt_arr <- jfloat_array(sc, c(-1.23e-8, 0, -1.23e-8))
#' # jflt_arr is now a reference an array of java.lang.Float
#'
#' @export
jfloat_array <- function(sc, x) {
vals <- lapply(x, function(v) j_invoke_new(sc, "java.lang.Float", v))
jarray(sc, vals, "java.lang.Float")
}
printf <- function(fmt, ...) {
cat(sprintf(fmt, ...))
}
spark_require_version <- function(sc, required, module = NULL, required_max = NULL) {
# guess module based on calling function
if (is.null(module)) {
call <- sys.call(sys.parent())
module <- tryCatch(as.character(call[[1]]), error = function(ex) "")
}
# check and report version requirements
version <- spark_version(sc)
if (version < required) {
fmt <- "%s requires Spark %s or higher."
msg <- sprintf(fmt, module, required, version)
stop(msg, call. = FALSE)
} else if (!is.null(required_max)) {
if (version >= required_max) {
fmt <- "%s is removed in Spark %s."
msg <- sprintf(fmt, module, required_max, version)
stop(msg, call. = FALSE)
}
}
TRUE
}
is_required_spark <- function(x, required_version) {
UseMethod("is_required_spark")
}
is_required_spark.spark_connection <- function(x, required_version) {
version <- spark_version(x)
version >= required_version
}
is_required_spark.spark_jobj <- function(x, required_version) {
sc <- spark_connection(x)
is_required_spark(sc, required_version)
}
spark_param_deprecated <- function(param, version = "3.x") {
warning("The '", param, "' parameter is deprecated in Spark ", version)
}
regex_replace <- function(string, ...) {
dots <- list(...)
nm <- names(dots)
for (i in seq_along(dots)) {
string <- gsub(nm[[i]], dots[[i]], string, perl = TRUE)
}
string
}
spark_sanitize_names <- function(names, config) {
# Spark 1.6.X has a number of issues with '.'s in column names, e.g.
#
# https://issues.apache.org/jira/browse/SPARK-5632
# https://issues.apache.org/jira/browse/SPARK-13455
#
# Many of these issues are marked as resolved, but it appears this is
# a common regression in Spark and the handling is not uniform across
# the Spark API.
# sanitize names by default, but opt out with global option
if (!isTRUE(spark_config_value(config, "sparklyr.sanitize.column.names", TRUE))) {
return(names)
}
# begin transforming names
oldNames <- newNames <- names
# use 'iconv' to translate names to ASCII if possible
newNames <- unlist(lapply(newNames, function(name) {
# attempt to translate to ASCII
transformed <- tryCatch(
iconv(name, to = "ASCII//TRANSLIT"),
error = function(e) NA
)
# on success, return the transformed name
if (!is.na(transformed)) {
transformed
} else {
name
}
}))
# replace spaces with '_', and discard other characters
newNames <- regex_replace(
newNames,
"^\\s*|\\s*$" = "",
"[\\s.]+" = "_",
"[^\\w_]" = "",
"^(\\W)" = "V\\1"
)
# ensure new names are unique
newNames <- make.unique(newNames, sep = "_")
# report translations
verbose <- spark_config_value(
config,
c("sparklyr.verbose.sanitize", "sparklyr.sanitize.column.names.verbose", "sparklyr.verbose"),
FALSE
)
if (verbose) {
changedIdx <- which(oldNames != newNames)
if (length(changedIdx)) {
changedOldNames <- oldNames[changedIdx]
changedNewNames <- newNames[changedIdx]
nLhs <- max(nchar(changedOldNames))
nRhs <- max(nchar(changedNewNames))
lhs <- sprintf(paste("%-", nLhs + 2, "s", sep = ""), shQuote(changedOldNames))
rhs <- sprintf(paste("%-", nRhs + 2, "s", sep = ""), shQuote(changedNewNames))
n <- floor(log10(max(changedIdx)))
index <- sprintf(paste("(#%-", n, "s)", sep = ""), changedIdx)
msg <- paste(
"The following columns have been renamed:",
paste("-", lhs, "=>", rhs, index, collapse = "\n"),
sep = "\n"
)
message(msg)
}
}
newNames
}
# normalizes a path that we are going to send to spark but avoids
# normalizing remote identifiers like hdfs:// or s3n://. note
# that this will take care of path.expand ("~") as well as converting
# relative paths to absolute (necessary since the path will be read by
# another process that has a different current working directory)
spark_normalize_single_path <- function(path) {
# don't normalize paths that are urls
parsed <- httr::parse_url(path)
if (!is.null(parsed$scheme)) {
path
} else {
normalizePath(path, mustWork = FALSE)
}
}
spark_normalize_path <- function(paths) {
unname(sapply(paths, spark_normalize_single_path))
}
stopf <- function(fmt, ..., call. = TRUE, domain = NULL) {
stop(simpleError(
sprintf(fmt, ...),
if (call.) sys.call(sys.parent())
))
}
warnf <- function(fmt, ..., call. = TRUE, immediate. = FALSE) {
warning(sprintf(fmt, ...), call. = call., immediate. = immediate.)
}
enumerate <- function(object, f, ...) {
nm <- names(object)
result <- lapply(seq_along(object), function(i) {
f(nm[[i]], object[[i]], ...)
})
names(result) <- names(object)
result
}
path_program <- function(program, fmt = NULL) {
fmt <- fmt %||% "program '%s' is required but not available on the path"
path <- Sys.which(program)
if (!nzchar(path)) {
stopf(fmt, program, call. = FALSE)
}
path
}
infer_active_package_name <- function() {
root <- rprojroot::find_package_root_file()
dcf <- read.dcf(file.path(root, "DESCRIPTION"), all = TRUE)
dcf$Package
}
split_chunks <- function(x, chunk_size) {
# return early when chunk_size > length of vector
n <- length(x)
if (n <= chunk_size) {
return(list(x))
}
# compute ranges for subsetting
starts <- seq(1, n, by = chunk_size)
ends <- c(seq(chunk_size, n - 1, by = chunk_size), n)
# apply our subsetter
mapply(function(start, end) {
x[start:end]
}, starts, ends, SIMPLIFY = FALSE, USE.NAMES = FALSE)
}
remove_class <- function(object, class) {
classes <- attr(object, "class")
newClasses <- classes[!classes %in% c(class)]
attr(object, "class") <- newClasses
object
}
trim_whitespace <- function(strings) {
gsub("^[[:space:]]*|[[:space:]]*$", "", strings)
}
split_separator <- function(sc) {
if (inherits(sc, "livy_connection")) {
list(regexp = "\\|~\\|", plain = "|~|")
} else {
list(regexp = "\3", plain = "\3")
}
}
resolve_fn <- function(fn, ...) {
if (is.function(fn)) fn(...) else fn
}
is.tbl_spark <- function(x) {
inherits(x, "tbl_spark")
}
`%<-%` <- function(x, value) {
dest <- as.character(as.list(substitute(x))[-1])
if (length(dest) != length(value)) stop("Assignment must contain same number of elements")
for (i in seq_along(dest)) {
assign(dest[[i]], value[[i]], envir = sys.frame(which = sys.parent(n = 1)))
}
invisible(NULL)
}
sort_named_list <- function(lst, ...) {
lst[order(names(lst), ...)]
}
# syntax sugar for calling dplyr methods with do.call and a non-trivial variable
# list of args
`%>>%` <- function(x, fn) {
fn_call <- function(largs) {
do.call(fn, append(list(x), as.list(largs)))
}
fn_call
}
`%@%` <- function(fn, largs) fn(largs)
# syntax sugar for executing a chain of method calls with each call operating on
# the JVM object returned from the previous call
`%>|%` <- function(x, invocations) {
do.call(invoke, append(list(x, "%>%"), invocations))
}
pcre_to_java <- function(regex) {
regex %>%
gsub("\\[:alnum:\\]", "A-Za-z0-9", .) %>%
gsub("\\[:alpha:\\]", "A-Za-z", .) %>%
gsub("\\[:ascii:\\]", paste0("\\\\", "x00", "-", "\\\\", "x7F"), .) %>%
gsub("\\[:blank:\\]", " \\\\t", .) %>%
gsub("\\[:cntrl:\\]", paste0("\\\\", "x00", "-", "\\\\", "x1F", "\\\\", "x7F"), .) %>%
gsub("\\[:digit:\\]", "0-9", .) %>%
gsub("\\[:graph:\\]", paste0("\\\\", "x21", "-", "\\\\", "x7E"), .) %>%
gsub("\\[:lower:\\]", "a-z", .) %>%
gsub("\\[:print:\\]", paste0("\\\\", "x20", "-", "\\\\", "x7E"), .) %>%
gsub("\\[:punct:\\]",
paste0("\\\\", "x21", "-", "\\\\", "x2F",
"\\\\", "x3A", "-", "\\\\", "x40",
"\\\\", "x5B", "-", "\\\\", "x60",
"\\\\", "x7B", "-", "\\\\", "x7E"),
.
) %>%
gsub("\\[:space:\\]",
paste0(" ",
"\\\\", "t",
"\\\\", "r",
"\\\\", "n",
"\\\\", "v",
"\\\\", "f"
),
.
) %>%
gsub("\\[:upper:\\]", "A-Z", .) %>%
gsub("\\[:word:\\]", "A-Za-z0-9_", .) %>%
gsub("\\[:xdigit:\\]", "0-9a-fA-F", .)
}
# helper method returning a minimal R dataframe containing the same set of
# column names as `sdf` does
replicate_colnames <- function(sdf) {
columns <- lapply(
colnames(sdf),
function(column) {
v <- list(NA)
names(v) <- column
v
}
)
do.call(data.frame, columns)
}
translate_spark_column_types <- function(sdf) {
type_map <- list(
BooleanType = "logical",
ByteType = "integer",
ShortType = "integer",
IntegerType = "integer",
FloatType = "numeric",
DoubleType = "numeric",
LongType = "numeric",
StringType = "character",
BinaryType = "raw",
TimestampType = "POSIXct",
DateType = "Date",
CalendarIntervalType = "character",
NullType = "NULL"
)
sdf %>%
sdf_schema() %>%
lapply(
function(e) {
if (e$type %in% names(type_map)) {
type_map[[e$type]]
} else if (grepl("^(Array|Struct|Map)Type\\(.*\\)$", e$type)) {
"list"
} else if (grepl("^DecimalType\\(.*\\)$", e$type)) {
"numeric"
} else {
"unknown"
}
}
)
}
simulate_vars_spark <- function(x, drop_groups = FALSE) {
col_types <- translate_spark_column_types(x)
if (drop_groups) {
non_group_cols <- setdiff(names(col_types), dplyr::group_vars(x))
col_types <- col_types[non_group_cols]
}
col_types %>%
lapply(
function(x) {
fn <- tryCatch(
get(paste0("as.", x), envir = parent.frame()),
error = function(e) {
NULL
}
)
if (is.null(fn)) {
list()
} else {
fn(NA)
}
}
) %>%
tibble::as_tibble()
}
simulate_vars.tbl_spark <- function(x, drop_groups = FALSE) {
simulate_vars_spark(x, drop_groups)
}
simulate_vars_is_typed.tbl_spark <- function(x) TRUE
# wrapper for download.file()
download_file <- function(...) {
min_timeout_s <- 300
# Temporarily set download.file() timeout to 300 seconds if it was
# previously less than that, and restore the previous timeout setting
# on exit.
prev_timeout_s <- getOption("timeout")
if (prev_timeout_s < min_timeout_s) {
on.exit(options(timeout = prev_timeout_s))
options(timeout = min_timeout_s)
}
download.file(...)
}
# Infer all R packages that may be required for executing `fn`
infer_required_r_packages <- function(fn) {
pkgs <- as.data.frame(installed.packages())
deps <- new.env(hash = TRUE, parent = emptyenv(), size = nrow(pkgs))
populate_deps <- function(pkg) {
pkg <- as.character(pkg)
if (!identical(deps[[pkg]], TRUE)) {
imm_deps <- pkg %>%
tools::package_dependencies(db = installed.packages(), recursive = FALSE)
purrr::map(imm_deps[[1]], ~ populate_deps(.x))
deps[[pkg]] <- TRUE
}
}
rlang::fn_body(fn) %>%
globals::walkAST(
call = function(x) {
cfn <- rlang::call_fn(x)
for (mfn in list(base::library,
base::require,
base::requireNamespace,
base::loadNamespace)) {
if (identical(cfn, mfn)) {
populate_deps(rlang::call_args(match.call(mfn, x))$package)
return(x)
}
}
if (identical(cfn, base::attachNamespace)) {
populate_deps(rlang::call_args(match.call(base::attachNamespace, x))$ns)
return(x)
}
ns <- rlang::call_ns(x)
if (!is.null(ns)) {
populate_deps(ns)
} else {
where <- strsplit(find(rlang::call_name(x)), ":")[[1]]
if (identical(where[[1]], "package")) {
populate_deps(where[[2]])
}
}
x
}
)
ls(deps)
}
os_is_windows <- function() {
.Platform$OS.type == "windows"
}
|
/R/utils.R
|
permissive
|
yitao-li/sparklyr
|
R
| false
| false
| 15,199
|
r
|
is.installed <- function(package) {
is.element(package, installed.packages()[, 1])
}
utils_starts_with <- function(lhs, rhs) {
if (nchar(lhs) < nchar(rhs)) {
return(FALSE)
}
identical(substring(lhs, 1, nchar(rhs)), rhs)
}
aliased_path <- function(path) {
home <- path.expand("~/")
if (utils_starts_with(path, home)) {
path <- file.path("~", substring(path, nchar(home) + 1))
}
path
}
transpose_list <- function(list) {
do.call(Map, c(c, list, USE.NAMES = FALSE))
}
#' Random string generation
#'
#' Generate a random string with a given prefix.
#'
#' @param prefix A length-one character vector.
#' @export
random_string <- function(prefix = "table") {
paste0(prefix, "_", gsub("-", "_", uuid::UUIDgenerate()))
}
#' Instantiate a Java array with a specific element type.
#'
#' Given a list of Java object references, instantiate an \code{Array[T]}
#' containing the same list of references, where \code{T} is a non-primitive
#' type that is more specific than \code{java.lang.Object}.
#'
#' @param sc A \code{spark_connection}.
#' @param x A list of Java object references.
#' @param element_type A valid Java class name representing the generic type
#' parameter of the Java array to be instantiated. Each element of \code{x}
#' must refer to a Java object that is assignable to \code{element_type}.
#'
#' @examples
#' sc <- spark_connect(master = "spark://HOST:PORT")
#'
#' string_arr <- jarray(sc, letters, element_type = "java.lang.String")
#' # string_arr is now a reference to an array of type String[]
#'
#' @export
jarray <- function(sc, x, element_type) {
cls <- paste0("[L", element_type, ";")
arr_cls <- invoke_static(sc, "java.lang.Class", "forName", cls)
j_invoke_static(
sc, "java.util.Arrays", "copyOf", as.list(x), length(x), arr_cls
)
}
#' Instantiate a Java float type.
#'
#' Instantiate a \code{java.lang.Float} object with the value specified.
#' NOTE: this method is useful when one has to invoke a Java/Scala method
#' requiring a float (instead of double) type for at least one of its
#' parameters.
#'
#' @param sc A \code{spark_connection}.
#' @param x A numeric value in R.
#'
#' @examples
#' sc <- spark_connect(master = "spark://HOST:PORT")
#'
#' jflt <- jfloat(sc, 1.23e-8)
#' # jflt is now a reference to a java.lang.Float object
#'
#' @export
jfloat <- function(sc, x) {
j_invoke_new(sc, "java.lang.Float", as.numeric(x))
}
#' Instantiate an Array[Float].
#'
#' Instantiate an \code{Array[Float]} object with the value specified.
#' NOTE: this method is useful when one has to invoke a Java/Scala method
#' requiring an \code{Array[Float]} as one of its parameters.
#'
#' @param sc A \code{spark_connection}.
#' @param x A numeric vector in R.
#'
#' @examples
#' sc <- spark_connect(master = "spark://HOST:PORT")
#'
#' jflt_arr <- jfloat_array(sc, c(-1.23e-8, 0, -1.23e-8))
#' # jflt_arr is now a reference an array of java.lang.Float
#'
#' @export
jfloat_array <- function(sc, x) {
vals <- lapply(x, function(v) j_invoke_new(sc, "java.lang.Float", v))
jarray(sc, vals, "java.lang.Float")
}
printf <- function(fmt, ...) {
cat(sprintf(fmt, ...))
}
spark_require_version <- function(sc, required, module = NULL, required_max = NULL) {
# guess module based on calling function
if (is.null(module)) {
call <- sys.call(sys.parent())
module <- tryCatch(as.character(call[[1]]), error = function(ex) "")
}
# check and report version requirements
version <- spark_version(sc)
if (version < required) {
fmt <- "%s requires Spark %s or higher."
msg <- sprintf(fmt, module, required, version)
stop(msg, call. = FALSE)
} else if (!is.null(required_max)) {
if (version >= required_max) {
fmt <- "%s is removed in Spark %s."
msg <- sprintf(fmt, module, required_max, version)
stop(msg, call. = FALSE)
}
}
TRUE
}
is_required_spark <- function(x, required_version) {
UseMethod("is_required_spark")
}
is_required_spark.spark_connection <- function(x, required_version) {
version <- spark_version(x)
version >= required_version
}
is_required_spark.spark_jobj <- function(x, required_version) {
sc <- spark_connection(x)
is_required_spark(sc, required_version)
}
spark_param_deprecated <- function(param, version = "3.x") {
warning("The '", param, "' parameter is deprecated in Spark ", version)
}
regex_replace <- function(string, ...) {
dots <- list(...)
nm <- names(dots)
for (i in seq_along(dots)) {
string <- gsub(nm[[i]], dots[[i]], string, perl = TRUE)
}
string
}
spark_sanitize_names <- function(names, config) {
# Spark 1.6.X has a number of issues with '.'s in column names, e.g.
#
# https://issues.apache.org/jira/browse/SPARK-5632
# https://issues.apache.org/jira/browse/SPARK-13455
#
# Many of these issues are marked as resolved, but it appears this is
# a common regression in Spark and the handling is not uniform across
# the Spark API.
# sanitize names by default, but opt out with global option
if (!isTRUE(spark_config_value(config, "sparklyr.sanitize.column.names", TRUE))) {
return(names)
}
# begin transforming names
oldNames <- newNames <- names
# use 'iconv' to translate names to ASCII if possible
newNames <- unlist(lapply(newNames, function(name) {
# attempt to translate to ASCII
transformed <- tryCatch(
iconv(name, to = "ASCII//TRANSLIT"),
error = function(e) NA
)
# on success, return the transformed name
if (!is.na(transformed)) {
transformed
} else {
name
}
}))
# replace spaces with '_', and discard other characters
newNames <- regex_replace(
newNames,
"^\\s*|\\s*$" = "",
"[\\s.]+" = "_",
"[^\\w_]" = "",
"^(\\W)" = "V\\1"
)
# ensure new names are unique
newNames <- make.unique(newNames, sep = "_")
# report translations
verbose <- spark_config_value(
config,
c("sparklyr.verbose.sanitize", "sparklyr.sanitize.column.names.verbose", "sparklyr.verbose"),
FALSE
)
if (verbose) {
changedIdx <- which(oldNames != newNames)
if (length(changedIdx)) {
changedOldNames <- oldNames[changedIdx]
changedNewNames <- newNames[changedIdx]
nLhs <- max(nchar(changedOldNames))
nRhs <- max(nchar(changedNewNames))
lhs <- sprintf(paste("%-", nLhs + 2, "s", sep = ""), shQuote(changedOldNames))
rhs <- sprintf(paste("%-", nRhs + 2, "s", sep = ""), shQuote(changedNewNames))
n <- floor(log10(max(changedIdx)))
index <- sprintf(paste("(#%-", n, "s)", sep = ""), changedIdx)
msg <- paste(
"The following columns have been renamed:",
paste("-", lhs, "=>", rhs, index, collapse = "\n"),
sep = "\n"
)
message(msg)
}
}
newNames
}
# normalizes a path that we are going to send to spark but avoids
# normalizing remote identifiers like hdfs:// or s3n://. note
# that this will take care of path.expand ("~") as well as converting
# relative paths to absolute (necessary since the path will be read by
# another process that has a different current working directory)
spark_normalize_single_path <- function(path) {
# don't normalize paths that are urls
parsed <- httr::parse_url(path)
if (!is.null(parsed$scheme)) {
path
} else {
normalizePath(path, mustWork = FALSE)
}
}
spark_normalize_path <- function(paths) {
unname(sapply(paths, spark_normalize_single_path))
}
stopf <- function(fmt, ..., call. = TRUE, domain = NULL) {
stop(simpleError(
sprintf(fmt, ...),
if (call.) sys.call(sys.parent())
))
}
warnf <- function(fmt, ..., call. = TRUE, immediate. = FALSE) {
warning(sprintf(fmt, ...), call. = call., immediate. = immediate.)
}
enumerate <- function(object, f, ...) {
nm <- names(object)
result <- lapply(seq_along(object), function(i) {
f(nm[[i]], object[[i]], ...)
})
names(result) <- names(object)
result
}
path_program <- function(program, fmt = NULL) {
fmt <- fmt %||% "program '%s' is required but not available on the path"
path <- Sys.which(program)
if (!nzchar(path)) {
stopf(fmt, program, call. = FALSE)
}
path
}
infer_active_package_name <- function() {
root <- rprojroot::find_package_root_file()
dcf <- read.dcf(file.path(root, "DESCRIPTION"), all = TRUE)
dcf$Package
}
split_chunks <- function(x, chunk_size) {
# return early when chunk_size > length of vector
n <- length(x)
if (n <= chunk_size) {
return(list(x))
}
# compute ranges for subsetting
starts <- seq(1, n, by = chunk_size)
ends <- c(seq(chunk_size, n - 1, by = chunk_size), n)
# apply our subsetter
mapply(function(start, end) {
x[start:end]
}, starts, ends, SIMPLIFY = FALSE, USE.NAMES = FALSE)
}
remove_class <- function(object, class) {
classes <- attr(object, "class")
newClasses <- classes[!classes %in% c(class)]
attr(object, "class") <- newClasses
object
}
trim_whitespace <- function(strings) {
gsub("^[[:space:]]*|[[:space:]]*$", "", strings)
}
split_separator <- function(sc) {
if (inherits(sc, "livy_connection")) {
list(regexp = "\\|~\\|", plain = "|~|")
} else {
list(regexp = "\3", plain = "\3")
}
}
resolve_fn <- function(fn, ...) {
if (is.function(fn)) fn(...) else fn
}
is.tbl_spark <- function(x) {
inherits(x, "tbl_spark")
}
`%<-%` <- function(x, value) {
dest <- as.character(as.list(substitute(x))[-1])
if (length(dest) != length(value)) stop("Assignment must contain same number of elements")
for (i in seq_along(dest)) {
assign(dest[[i]], value[[i]], envir = sys.frame(which = sys.parent(n = 1)))
}
invisible(NULL)
}
sort_named_list <- function(lst, ...) {
lst[order(names(lst), ...)]
}
# syntax sugar for calling dplyr methods with do.call and a non-trivial variable
# list of args
`%>>%` <- function(x, fn) {
fn_call <- function(largs) {
do.call(fn, append(list(x), as.list(largs)))
}
fn_call
}
`%@%` <- function(fn, largs) fn(largs)
# syntax sugar for executing a chain of method calls with each call operating on
# the JVM object returned from the previous call
`%>|%` <- function(x, invocations) {
do.call(invoke, append(list(x, "%>%"), invocations))
}
pcre_to_java <- function(regex) {
regex %>%
gsub("\\[:alnum:\\]", "A-Za-z0-9", .) %>%
gsub("\\[:alpha:\\]", "A-Za-z", .) %>%
gsub("\\[:ascii:\\]", paste0("\\\\", "x00", "-", "\\\\", "x7F"), .) %>%
gsub("\\[:blank:\\]", " \\\\t", .) %>%
gsub("\\[:cntrl:\\]", paste0("\\\\", "x00", "-", "\\\\", "x1F", "\\\\", "x7F"), .) %>%
gsub("\\[:digit:\\]", "0-9", .) %>%
gsub("\\[:graph:\\]", paste0("\\\\", "x21", "-", "\\\\", "x7E"), .) %>%
gsub("\\[:lower:\\]", "a-z", .) %>%
gsub("\\[:print:\\]", paste0("\\\\", "x20", "-", "\\\\", "x7E"), .) %>%
gsub("\\[:punct:\\]",
paste0("\\\\", "x21", "-", "\\\\", "x2F",
"\\\\", "x3A", "-", "\\\\", "x40",
"\\\\", "x5B", "-", "\\\\", "x60",
"\\\\", "x7B", "-", "\\\\", "x7E"),
.
) %>%
gsub("\\[:space:\\]",
paste0(" ",
"\\\\", "t",
"\\\\", "r",
"\\\\", "n",
"\\\\", "v",
"\\\\", "f"
),
.
) %>%
gsub("\\[:upper:\\]", "A-Z", .) %>%
gsub("\\[:word:\\]", "A-Za-z0-9_", .) %>%
gsub("\\[:xdigit:\\]", "0-9a-fA-F", .)
}
# helper method returning a minimal R dataframe containing the same set of
# column names as `sdf` does
replicate_colnames <- function(sdf) {
columns <- lapply(
colnames(sdf),
function(column) {
v <- list(NA)
names(v) <- column
v
}
)
do.call(data.frame, columns)
}
translate_spark_column_types <- function(sdf) {
type_map <- list(
BooleanType = "logical",
ByteType = "integer",
ShortType = "integer",
IntegerType = "integer",
FloatType = "numeric",
DoubleType = "numeric",
LongType = "numeric",
StringType = "character",
BinaryType = "raw",
TimestampType = "POSIXct",
DateType = "Date",
CalendarIntervalType = "character",
NullType = "NULL"
)
sdf %>%
sdf_schema() %>%
lapply(
function(e) {
if (e$type %in% names(type_map)) {
type_map[[e$type]]
} else if (grepl("^(Array|Struct|Map)Type\\(.*\\)$", e$type)) {
"list"
} else if (grepl("^DecimalType\\(.*\\)$", e$type)) {
"numeric"
} else {
"unknown"
}
}
)
}
simulate_vars_spark <- function(x, drop_groups = FALSE) {
col_types <- translate_spark_column_types(x)
if (drop_groups) {
non_group_cols <- setdiff(names(col_types), dplyr::group_vars(x))
col_types <- col_types[non_group_cols]
}
col_types %>%
lapply(
function(x) {
fn <- tryCatch(
get(paste0("as.", x), envir = parent.frame()),
error = function(e) {
NULL
}
)
if (is.null(fn)) {
list()
} else {
fn(NA)
}
}
) %>%
tibble::as_tibble()
}
simulate_vars.tbl_spark <- function(x, drop_groups = FALSE) {
simulate_vars_spark(x, drop_groups)
}
simulate_vars_is_typed.tbl_spark <- function(x) TRUE
# wrapper for download.file()
download_file <- function(...) {
min_timeout_s <- 300
# Temporarily set download.file() timeout to 300 seconds if it was
# previously less than that, and restore the previous timeout setting
# on exit.
prev_timeout_s <- getOption("timeout")
if (prev_timeout_s < min_timeout_s) {
on.exit(options(timeout = prev_timeout_s))
options(timeout = min_timeout_s)
}
download.file(...)
}
# Infer all R packages that may be required for executing `fn`
infer_required_r_packages <- function(fn) {
pkgs <- as.data.frame(installed.packages())
deps <- new.env(hash = TRUE, parent = emptyenv(), size = nrow(pkgs))
populate_deps <- function(pkg) {
pkg <- as.character(pkg)
if (!identical(deps[[pkg]], TRUE)) {
imm_deps <- pkg %>%
tools::package_dependencies(db = installed.packages(), recursive = FALSE)
purrr::map(imm_deps[[1]], ~ populate_deps(.x))
deps[[pkg]] <- TRUE
}
}
rlang::fn_body(fn) %>%
globals::walkAST(
call = function(x) {
cfn <- rlang::call_fn(x)
for (mfn in list(base::library,
base::require,
base::requireNamespace,
base::loadNamespace)) {
if (identical(cfn, mfn)) {
populate_deps(rlang::call_args(match.call(mfn, x))$package)
return(x)
}
}
if (identical(cfn, base::attachNamespace)) {
populate_deps(rlang::call_args(match.call(base::attachNamespace, x))$ns)
return(x)
}
ns <- rlang::call_ns(x)
if (!is.null(ns)) {
populate_deps(ns)
} else {
where <- strsplit(find(rlang::call_name(x)), ":")[[1]]
if (identical(where[[1]], "package")) {
populate_deps(where[[2]])
}
}
x
}
)
ls(deps)
}
os_is_windows <- function() {
.Platform$OS.type == "windows"
}
|
Sys.time()
# Load packages
library(gdata)
library(pheatmap)
library(RColorBrewer)
##############################################################################
# Test arguments
##############################################################################
prefix='23_01_pca1_mergingNEW2_'
outdir='../carsten_cytof/PD1_project/CK_2016-06-23_01/030_heatmaps'
path_data='../carsten_cytof/PD1_project/CK_2016-06-23_01/010_data/23_01_expr_raw.rds'
path_data_norm='../carsten_cytof/PD1_project/CK_2016-06-23_01/010_data/23_01_expr_norm.rds'
path_clustering_observables='../carsten_cytof/PD1_project/CK_2016-06-23_01/030_heatmaps/23_01_pca1_clustering_observables.xls'
path_clustering='../carsten_cytof/PD1_project/CK_2016-06-23_01/030_heatmaps/23_01_pca1_mergingNEW2_clustering.xls'
path_clustering_labels='../carsten_cytof/PD1_project/CK_2016-06-23_01/030_heatmaps/23_01_pca1_mergingNEW2_clustering_labels.xls'
path_marker_selection='../carsten_cytof/PD1_project/CK_2016-06-23_01/010_helpfiles/23_01_pca1_mergingNEW2_marker_selection.txt'
path_cluster_merging=NULL
prefix='23_03_pca1_cl20_merging4_'
outdir='../carsten_cytof/PD1_project/CK_2016-06-23_03/030_heatmaps'
path_data='../carsten_cytof/PD1_project/CK_2016-06-23_03/010_data/23_03_expr_raw.rds'
path_data_norm='../carsten_cytof/PD1_project/CK_2016-06-23_03/010_data/23_03_expr_norm.rds'
path_clustering_observables='../carsten_cytof/PD1_project/CK_2016-06-23_03/030_heatmaps/23_03_pca1_clustering_observables.xls'
path_clustering='../carsten_cytof/PD1_project/CK_2016-06-23_03/030_heatmaps/23_03_pca1_cl20_clustering.xls'
path_clustering_labels='../carsten_cytof/PD1_project/CK_2016-06-23_03/030_heatmaps/23_03_pca1_cl20_clustering_labels.xls'
path_marker_selection='../carsten_cytof/PD1_project/CK_2016-06-23_03/010_helpfiles/23_03_pca1_cl20_marker_selection.txt'
path_cluster_merging='../carsten_cytof/PD1_project/CK_2016-06-23_03/010_helpfiles/23_03_pca1_cl20_cluster_merging4.xlsx'
### Cytokine profiles
prefix='23CD4TmemCD69_29CD4TmemCD69_02CD4_cl49_clustering_data23CD4_cl1_'
outdir='../carsten_cytof/PD1_project/CK_2016-06-merged_23_29/02_CD4/090_cytokine_bimatrix_frequencies_clustering/cytokine_profiles'
path_data='../carsten_cytof/PD1_project/CK_2016-06-23_02_CD4_merging2/010_data/23CD4_02CD4_expr_raw.rds'
path_data_norm='../carsten_cytof/PD1_project/CK_2016-06-23_02_CD4_merging2/010_data/23CD4_02CD4_expr_norm.rds'
path_clustering_observables='../carsten_cytof/PD1_project/CK_2016-06-23_02_CD4_merging2/030_heatmaps/23CD4_02CD4_pca1_clustering_observables.xls'
path_clustering_labels='../carsten_cytof/PD1_project/CK_2016-06-23_02_CD4_merging2/030_heatmaps/23CD4_02CD4_pca1_merging2_clustering_labels.xls'
path_clustering='../carsten_cytof/PD1_project/CK_2016-06-merged_23_29/02_CD4/090_cytokine_bimatrix_frequencies_clustering/cytokine_profiles/23CD4TmemCD69_29CD4TmemCD69_02CD4_cl49_clustering_data23CD4_cl1.txt'
path_marker_selection='../carsten_cytof/PD1_project/CK_2016-06-23_02_CD4_merging2/010_helpfiles/23CD4_02CD4_pca1_merging2_marker_selection.txt'
path_cluster_merging=NULL
args <- NULL
##############################################################################
# Read in the arguments
##############################################################################
rm(list = ls())
args <- (commandArgs(trailingOnly = TRUE))
for (i in 1:length(args)) {
eval(parse(text = args[[i]]))
}
cat(paste0(args, collapse = "\n"), fill = TRUE)
##############################################################################
if(!file.exists(outdir))
dir.create(outdir, recursive = TRUE)
linkage <- "average"
pheatmap_palette <- 'YlGnBu'
pheatmap_palette_rev <- FALSE
pheatmap_palette_norm <- 'RdYlBu'
pheatmap_palette_norm_rev <- TRUE
plot_HD <- FALSE
if(!any(grepl("aggregate_fun=", args))){
aggregate_fun='median'
}
if(!any(grepl("scale=", args))){
scale=TRUE
}
# ------------------------------------------------------------
# Load expression data
# ------------------------------------------------------------
expr <- readRDS(path_data)
cell_id <- expr[, "cell_id"]
samp <- expr[, "sample_id"]
fcs_colnames <- colnames(expr)[!grepl("cell_id|sample_id", colnames(expr))]
e <- expr[, fcs_colnames]
if(!is.null(path_data_norm)){
expr_norm <- readRDS(path_data_norm)
e_norm <- expr_norm[, fcs_colnames]
}
# ------------------------------------------------------------
# Load clustering data
# ------------------------------------------------------------
# clustering
clustering <- read.table(path_clustering, header = TRUE, sep = "\t", as.is = TRUE)
clust <- clustering[, "cluster"]
names(clust) <- clustering[, "cell_id"]
# clustering labels
labels <- read.table(path_clustering_labels, header = TRUE, sep = "\t", as.is = TRUE)
labels <- labels[order(labels$cluster, decreasing = FALSE), ]
labels$label <- factor(labels$label, levels = unique(labels$label))
rownames(labels) <- labels$cluster
labels
# clustering observables
clustering_observables <- read.table(path_clustering_observables, header = TRUE, sep = "\t", as.is = TRUE)
rownames(clustering_observables) <- clustering_observables$mass
clustering_observables
clust_observ <- clustering_observables[clustering_observables$clustering_observable, "mass"]
clust_observ
# ------------------------------------------------------------
# Prepare a color annotation for heatmaps
# ------------------------------------------------------------
# --------------------
# Colors for clusters
# --------------------
# ggplot palette
gg_color_hue <- function(n) {
hues = seq(15, 375, length=n+1)
hcl(h=hues, l=60 , c=100)[1:n]
}
# color blind palette
colors_muted <- c("#DC050C", "#E8601C", "#1965B0", "#7BAFDE", "#882E72", "#B17BA6", "#F1932D", "#F6C141", "#F7EE55", "#4EB265", "#90C987", "#CAEDAB")
color_ramp <- c(colors_muted, gg_color_hue(max(1, nlevels(labels$label) - length(colors_muted))))
colors_clusters <- color_ramp[1:nlevels(labels$label)]
names(colors_clusters) <- levels(labels$label)
colors_clusters
# ------------------------------------------------------------
# Keep expression and clustering results for the cells that are common in both
# ------------------------------------------------------------
common_cells <- intersect(clustering[, "cell_id"], expr[, "cell_id"])
samp <- expr[expr[, "cell_id"] %in% common_cells, "sample_id"]
clust <- clustering[clustering[, "cell_id"] %in% common_cells, "cluster"]
e <- expr[expr[, "cell_id"] %in% common_cells, fcs_colnames]
labels <- labels[as.character(sort(unique(clust))), , drop = FALSE]
labels
# ------------------------------
# Annotation for merging or for the original clusters
# ------------------------------
annotation_row <- data.frame(cluster = labels$label)
rownames(annotation_row) <- labels$label
annotation_colors <- list(cluster = colors_clusters)
rows_order <- 1:nrow(labels)
### Drop the "drop" cluster
rows_order <- rows_order[labels$label != "drop"]
if(!is.null(path_cluster_merging)){
### Read in cluster merging file
cm <- gdata::read.xls(path_cluster_merging)
if(!all(c("old_cluster", "label", "new_cluster") %in% colnames(cm)))
stop("Merging file must contain 'old_cluster', 'label' and 'new_cluster' columns!")
### Remove spaces in labels bcs they are problematic...
cm$label <- factor(cm$label, labels = gsub(" ", "_", levels(cm$label)))
cm_unique <- unique(cm[, c("label", "new_cluster")])
cm_unique <- cm_unique[order(cm_unique$new_cluster), ]
### Add merging to the annotation
mm <- match(annotation_row$cluster, cm$old_cluster)
annotation_row$cluster_merging <- cm$label[mm]
annotation_row$cluster_merging <- factor(annotation_row$cluster_merging, levels = cm_unique$label)
### Add colors for merging
color_ramp <- c(colors_muted, gg_color_hue(max(1, nlevels(cm_unique$label) - length(colors_muted))))
colors_clusters_merging <- color_ramp[1:nlevels(cm_unique$label)]
names(colors_clusters_merging) <- cm_unique$label
annotation_colors[["cluster_merging"]] <- colors_clusters_merging
rows_order <- order(annotation_row$cluster_merging, annotation_row$cluster)
### Drop the "drop" cluster
rows_order <- rows_order[annotation_row$cluster_merging[rows_order] != "drop"]
}
# ------------------------------------------------------------
# Load marker selection for plotting on the heatmaps
# ------------------------------------------------------------
marker_selection <- NULL
if(!is.null(path_marker_selection)){
if(file.exists(path_marker_selection)){
marker_selection <- read.table(file.path(path_marker_selection), header = TRUE, sep = "\t", as.is = TRUE)
marker_selection <- marker_selection[, 1]
if(!all(marker_selection %in% clustering_observables$marker))
stop("Marker selection is wrong")
}
}
# ------------------------------------------------------------
# Marker information
# ------------------------------------------------------------
# Get the isotope and antigen for fcs markers
m <- match(fcs_colnames, clustering_observables$mass)
fcs_panel <- data.frame(fcs_colname = fcs_colnames, Isotope = clustering_observables$mass[m], Antigen = clustering_observables$marker[m], stringsAsFactors = FALSE)
# Indeces of observables used for clustering
scols <- which(fcs_colnames %in% clust_observ)
# Indeces of other observables
xcols <- which(!fcs_colnames %in% clust_observ)
# Ordered by decreasing pca score
if("avg_score" %in% colnames(clustering_observables)){
scols <- scols[order(clustering_observables[fcs_colnames[scols], "avg_score"], decreasing = TRUE)]
xcols <- xcols[order(clustering_observables[fcs_colnames[xcols], "avg_score"], decreasing = TRUE)]
}
# ------------------------------------------------------------
# Plotting heatmaps
# ------------------------------------------------------------
samp_org <- samp
clust_org <- clust
e_org <- e
if(!is.null(path_data_norm)){
e_norm <- expr_norm[expr_norm[, "cell_id"] %in% common_cells, fcs_colnames]
e_norm_org <- e_norm
}
subset_samp <- list()
subset_samp[["all"]] <- unique(samp)
if(plot_HD){
subset_samp[["HD"]] <- unique(samp)[grep("_HD", unique(samp))]
}
### Plot heatmaps based on all the data or the HD samples only
for(ii in 1:length(subset_samp)){
# ii = 1
subset_name <- names(subset_samp)[ii]
cells2keep <- samp_org %in% subset_samp[[ii]]
samp <- samp_org[cells2keep]
clust <- clust_org[cells2keep]
e <- e_org[cells2keep, , drop = FALSE]
# ------------------------------------------------------------
# Get the median expression
# ------------------------------------------------------------
colnames(e) <- fcs_panel$Antigen
a <- aggregate(e, by = list(clust = clust), FUN = aggregate_fun)
# get cluster frequencies
freq_clust <- table(clust)
### Save cluster frequencies and the median expression
clusters_out <- data.frame(cluster = names(freq_clust), label = labels[names(freq_clust), "label"], counts = as.numeric(freq_clust), frequencies = as.numeric(freq_clust)/sum(freq_clust), a[, fcs_panel$Antigen[c(scols, xcols)]])
write.table(clusters_out, file.path(outdir, paste0(prefix, "cluster_median_expression_", subset_name, "_raw.xls")), sep = "\t", quote = FALSE, row.names = FALSE, col.names = TRUE)
# ------------------------------------------------------------
# Row clustering
# ------------------------------------------------------------
### This clustering is based on the markers that were used for the main clustering, and it is used in all the heatmaps
expr <- as.matrix(a[, fcs_panel$Antigen[scols]])
rownames(expr) <- labels[as.character(a[, "clust"]), "label"]
if(nrow(expr) > 1)
cluster_rows <- hclust(dist(expr), method = linkage)
# ------------------------------------------------------------
# Heatmaps of raw median expression
# ------------------------------------------------------------
### Use all markers for plotting
expr <- as.matrix(a[, fcs_panel$Antigen[c(scols, xcols)]])
rownames(expr) <- labels[as.character(a[, "clust"]), "label"]
labels_row <- paste0(rownames(expr), " (", round(as.numeric(freq_clust)/sum(freq_clust)*100, 2), "%)")
labels_col <- colnames(expr)
if(pheatmap_palette_rev){
color <- colorRampPalette(rev(brewer.pal(n = 8, name = pheatmap_palette)))(100)
}else{
color <- colorRampPalette(brewer.pal(n = 8, name = pheatmap_palette))(100)
}
## With row clustering
if(nrow(expr) > 1)
pheatmap(expr, color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = cluster_rows, labels_col = labels_col, labels_row = labels_row, display_numbers = TRUE, number_color = "black", fontsize_number = 8, gaps_col = length(scols), fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_all_row_clust_raw.pdf")))
## No row clustering
pheatmap(expr[rows_order, , drop = FALSE], color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = FALSE, labels_col = labels_col, labels_row = labels_row[rows_order], display_numbers = FALSE, number_color = "black", fontsize_number = 8, gaps_col = length(scols), fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_all_no_clust_raw.pdf")))
## Plot only the selected markers
if(!is.null(marker_selection)){
expr_sub <- expr[, marker_selection, drop = FALSE]
labels_col_sub <- colnames(expr_sub)
if(nrow(expr) > 1)
pheatmap(expr_sub, color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = cluster_rows, labels_col = labels_col_sub, labels_row = labels_row, fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_sel_row_clust_raw.pdf")))
pheatmap(expr_sub[rows_order, , drop = FALSE], color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = FALSE, labels_col = labels_col_sub, labels_row = labels_row[rows_order], fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_sel_no_clust_raw.pdf")))
}
if(scale){
# ------------------------------------------------------------
# Heatmaps of raw median expression scalled by marker (column)
# ------------------------------------------------------------
scalling_type <- "s01"
switch(scalling_type,
snorm = {
## scalled to mean = 0, sd = 1
expr_scaled <- apply(expr, 2, function(x){(x-mean(x))/sd(x)})
th <- 2.5
expr_scaled[expr_scaled > th] <- th
expr_scaled[expr_scaled < -th] <- -th
breaks = seq(from = -th, to = th, length.out = 101)
legend_breaks = seq(from = -round(th), to = round(th), by = 1)
},
s01 = {
## scalled to 01
expr_scaled <- apply(expr, 2, function(x){(x-min(x))/(max(x)-min(x))})
breaks = seq(from = 0, to = 1, length.out = 101)
legend_breaks = seq(from = 0, to = 1, by = 0.25)
}
)
color <- colorRampPalette(brewer.pal(n = 8, name = "Greys"))(120)[11:110]
## With row clustering
if(nrow(expr) > 1)
pheatmap(expr_scaled, color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = cluster_rows, labels_col = labels_col, labels_row = labels_row, breaks = breaks, legend_breaks = legend_breaks, display_numbers = TRUE, number_color = "black", fontsize_number = 8, gaps_col = length(scols), fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_all_row_clust_scale.pdf")))
## No row clustering
pheatmap(expr_scaled[rows_order, , drop = FALSE], color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = FALSE, labels_col = labels_col, labels_row = labels_row[rows_order], breaks = breaks, legend_breaks = legend_breaks, display_numbers = FALSE, number_color = "black", fontsize_number = 8, gaps_col = length(scols), fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_all_no_clust_scale.pdf")))
## Plot only the selected markers
if(!is.null(marker_selection)){
expr_sub <- expr_scaled[, marker_selection, drop = FALSE]
labels_col_sub <- colnames(expr_sub)
if(nrow(expr) > 1)
pheatmap(expr_sub, color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = cluster_rows, labels_col = labels_col_sub, labels_row = labels_row, breaks = breaks, legend_breaks = legend_breaks, fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_sel_row_clust_scale.pdf")))
pheatmap(expr_sub[rows_order, , drop = FALSE], color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = FALSE, labels_col = labels_col_sub, labels_row = labels_row[rows_order], breaks = breaks, legend_breaks = legend_breaks, fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_sel_no_clust_scale.pdf")))
}
}
if(!is.null(path_data_norm)){
# ------------------------------------------------------------
# Heatmaps of norm median expression
# Had to do this way because I want to plot the 01 normalized data, but I want to keep row clustering from the raw data
# ------------------------------------------------------------
# ------------------------------------------------------------
# Get the median expression
# ------------------------------------------------------------
e_norm <- e_norm_org[cells2keep, , drop = FALSE]
colnames(e_norm) <- fcs_panel$Antigen
a_norm <- aggregate(e_norm, by = list(clust = clust), FUN = aggregate_fun)
# ------------------------------------------------------------
# pheatmaps of median expression
# ------------------------------------------------------------
### Use all markers for plotting
expr <- as.matrix(a_norm[, fcs_panel$Antigen[c(scols, xcols)]])
rownames(expr) <- labels[as.character(a_norm[, "clust"]), "label"]
labels_row <- paste0(rownames(expr), " (", round(as.numeric(freq_clust)/sum(freq_clust)*100, 2), "%)")
labels_col <- colnames(expr)
if(pheatmap_palette_norm_rev){
color <- colorRampPalette(rev(brewer.pal(n = 8, name = pheatmap_palette_norm)))(101)
}else{
color <- colorRampPalette(brewer.pal(n = 8, name = pheatmap_palette_norm))(101)
}
### Fixed legend range from 0 to 1
breaks = seq(from = 0, to = 1, length.out = 101)
legend_breaks = seq(from = 0, to = 1, by = 0.2)
## With row clustering
if(nrow(expr) > 1)
pheatmap(expr, color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = cluster_rows, labels_col = labels_col, labels_row = labels_row, breaks = breaks, legend_breaks = legend_breaks, display_numbers = TRUE, number_color = "black", fontsize_number = 8, gaps_col = length(scols), fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_all_row_clust_norm.pdf")))
color <- colorRampPalette(brewer.pal(n = 8, name = "Greys"))(110)[11:110]
## No row clustering
pheatmap(expr[rows_order, , drop = FALSE], color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = FALSE, labels_col = labels_col, labels_row = labels_row[rows_order], breaks = breaks, legend_breaks = legend_breaks, display_numbers = FALSE, number_color = "black", fontsize_number = 8, gaps_col = length(scols), fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_all_no_clust_norm.pdf")))
## Plot only the selected markers
if(!is.null(marker_selection)){
color <- colorRampPalette(brewer.pal(n = 8, name = "Greys"))(110)[11:110]
expr_sub <- expr[, marker_selection, drop = FALSE]
labels_col_sub <- colnames(expr_sub)
if(nrow(expr) > 1)
pheatmap(expr_sub, color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = cluster_rows, labels_col = labels_col_sub, labels_row = labels_row, breaks = breaks, legend_breaks = legend_breaks, fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_sel_row_clust_norm.pdf")))
pheatmap(expr_sub[rows_order, , drop = FALSE], color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = FALSE, labels_col = labels_col_sub, labels_row = labels_row[rows_order], breaks = breaks, legend_breaks = legend_breaks, fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_sel_no_clust_norm.pdf")))
}
}
}
sessionInfo()
|
/Nowicka2017/02_heatmaps.R
|
no_license
|
yhoang/drfz
|
R
| false
| false
| 22,187
|
r
|
Sys.time()
# Load packages
library(gdata)
library(pheatmap)
library(RColorBrewer)
##############################################################################
# Test arguments
##############################################################################
prefix='23_01_pca1_mergingNEW2_'
outdir='../carsten_cytof/PD1_project/CK_2016-06-23_01/030_heatmaps'
path_data='../carsten_cytof/PD1_project/CK_2016-06-23_01/010_data/23_01_expr_raw.rds'
path_data_norm='../carsten_cytof/PD1_project/CK_2016-06-23_01/010_data/23_01_expr_norm.rds'
path_clustering_observables='../carsten_cytof/PD1_project/CK_2016-06-23_01/030_heatmaps/23_01_pca1_clustering_observables.xls'
path_clustering='../carsten_cytof/PD1_project/CK_2016-06-23_01/030_heatmaps/23_01_pca1_mergingNEW2_clustering.xls'
path_clustering_labels='../carsten_cytof/PD1_project/CK_2016-06-23_01/030_heatmaps/23_01_pca1_mergingNEW2_clustering_labels.xls'
path_marker_selection='../carsten_cytof/PD1_project/CK_2016-06-23_01/010_helpfiles/23_01_pca1_mergingNEW2_marker_selection.txt'
path_cluster_merging=NULL
prefix='23_03_pca1_cl20_merging4_'
outdir='../carsten_cytof/PD1_project/CK_2016-06-23_03/030_heatmaps'
path_data='../carsten_cytof/PD1_project/CK_2016-06-23_03/010_data/23_03_expr_raw.rds'
path_data_norm='../carsten_cytof/PD1_project/CK_2016-06-23_03/010_data/23_03_expr_norm.rds'
path_clustering_observables='../carsten_cytof/PD1_project/CK_2016-06-23_03/030_heatmaps/23_03_pca1_clustering_observables.xls'
path_clustering='../carsten_cytof/PD1_project/CK_2016-06-23_03/030_heatmaps/23_03_pca1_cl20_clustering.xls'
path_clustering_labels='../carsten_cytof/PD1_project/CK_2016-06-23_03/030_heatmaps/23_03_pca1_cl20_clustering_labels.xls'
path_marker_selection='../carsten_cytof/PD1_project/CK_2016-06-23_03/010_helpfiles/23_03_pca1_cl20_marker_selection.txt'
path_cluster_merging='../carsten_cytof/PD1_project/CK_2016-06-23_03/010_helpfiles/23_03_pca1_cl20_cluster_merging4.xlsx'
### Cytokine profiles
prefix='23CD4TmemCD69_29CD4TmemCD69_02CD4_cl49_clustering_data23CD4_cl1_'
outdir='../carsten_cytof/PD1_project/CK_2016-06-merged_23_29/02_CD4/090_cytokine_bimatrix_frequencies_clustering/cytokine_profiles'
path_data='../carsten_cytof/PD1_project/CK_2016-06-23_02_CD4_merging2/010_data/23CD4_02CD4_expr_raw.rds'
path_data_norm='../carsten_cytof/PD1_project/CK_2016-06-23_02_CD4_merging2/010_data/23CD4_02CD4_expr_norm.rds'
path_clustering_observables='../carsten_cytof/PD1_project/CK_2016-06-23_02_CD4_merging2/030_heatmaps/23CD4_02CD4_pca1_clustering_observables.xls'
path_clustering_labels='../carsten_cytof/PD1_project/CK_2016-06-23_02_CD4_merging2/030_heatmaps/23CD4_02CD4_pca1_merging2_clustering_labels.xls'
path_clustering='../carsten_cytof/PD1_project/CK_2016-06-merged_23_29/02_CD4/090_cytokine_bimatrix_frequencies_clustering/cytokine_profiles/23CD4TmemCD69_29CD4TmemCD69_02CD4_cl49_clustering_data23CD4_cl1.txt'
path_marker_selection='../carsten_cytof/PD1_project/CK_2016-06-23_02_CD4_merging2/010_helpfiles/23CD4_02CD4_pca1_merging2_marker_selection.txt'
path_cluster_merging=NULL
args <- NULL
##############################################################################
# Read in the arguments
##############################################################################
rm(list = ls())
args <- (commandArgs(trailingOnly = TRUE))
for (i in 1:length(args)) {
eval(parse(text = args[[i]]))
}
cat(paste0(args, collapse = "\n"), fill = TRUE)
##############################################################################
if(!file.exists(outdir))
dir.create(outdir, recursive = TRUE)
linkage <- "average"
pheatmap_palette <- 'YlGnBu'
pheatmap_palette_rev <- FALSE
pheatmap_palette_norm <- 'RdYlBu'
pheatmap_palette_norm_rev <- TRUE
plot_HD <- FALSE
if(!any(grepl("aggregate_fun=", args))){
aggregate_fun='median'
}
if(!any(grepl("scale=", args))){
scale=TRUE
}
# ------------------------------------------------------------
# Load expression data
# ------------------------------------------------------------
expr <- readRDS(path_data)
cell_id <- expr[, "cell_id"]
samp <- expr[, "sample_id"]
fcs_colnames <- colnames(expr)[!grepl("cell_id|sample_id", colnames(expr))]
e <- expr[, fcs_colnames]
if(!is.null(path_data_norm)){
expr_norm <- readRDS(path_data_norm)
e_norm <- expr_norm[, fcs_colnames]
}
# ------------------------------------------------------------
# Load clustering data
# ------------------------------------------------------------
# clustering
clustering <- read.table(path_clustering, header = TRUE, sep = "\t", as.is = TRUE)
clust <- clustering[, "cluster"]
names(clust) <- clustering[, "cell_id"]
# clustering labels
labels <- read.table(path_clustering_labels, header = TRUE, sep = "\t", as.is = TRUE)
labels <- labels[order(labels$cluster, decreasing = FALSE), ]
labels$label <- factor(labels$label, levels = unique(labels$label))
rownames(labels) <- labels$cluster
labels
# clustering observables
clustering_observables <- read.table(path_clustering_observables, header = TRUE, sep = "\t", as.is = TRUE)
rownames(clustering_observables) <- clustering_observables$mass
clustering_observables
clust_observ <- clustering_observables[clustering_observables$clustering_observable, "mass"]
clust_observ
# ------------------------------------------------------------
# Prepare a color annotation for heatmaps
# ------------------------------------------------------------
# --------------------
# Colors for clusters
# --------------------
# ggplot palette
gg_color_hue <- function(n) {
hues = seq(15, 375, length=n+1)
hcl(h=hues, l=60 , c=100)[1:n]
}
# color blind palette
colors_muted <- c("#DC050C", "#E8601C", "#1965B0", "#7BAFDE", "#882E72", "#B17BA6", "#F1932D", "#F6C141", "#F7EE55", "#4EB265", "#90C987", "#CAEDAB")
color_ramp <- c(colors_muted, gg_color_hue(max(1, nlevels(labels$label) - length(colors_muted))))
colors_clusters <- color_ramp[1:nlevels(labels$label)]
names(colors_clusters) <- levels(labels$label)
colors_clusters
# ------------------------------------------------------------
# Keep expression and clustering results for the cells that are common in both
# ------------------------------------------------------------
common_cells <- intersect(clustering[, "cell_id"], expr[, "cell_id"])
samp <- expr[expr[, "cell_id"] %in% common_cells, "sample_id"]
clust <- clustering[clustering[, "cell_id"] %in% common_cells, "cluster"]
e <- expr[expr[, "cell_id"] %in% common_cells, fcs_colnames]
labels <- labels[as.character(sort(unique(clust))), , drop = FALSE]
labels
# ------------------------------
# Annotation for merging or for the original clusters
# ------------------------------
annotation_row <- data.frame(cluster = labels$label)
rownames(annotation_row) <- labels$label
annotation_colors <- list(cluster = colors_clusters)
rows_order <- 1:nrow(labels)
### Drop the "drop" cluster
rows_order <- rows_order[labels$label != "drop"]
if(!is.null(path_cluster_merging)){
### Read in cluster merging file
cm <- gdata::read.xls(path_cluster_merging)
if(!all(c("old_cluster", "label", "new_cluster") %in% colnames(cm)))
stop("Merging file must contain 'old_cluster', 'label' and 'new_cluster' columns!")
### Remove spaces in labels bcs they are problematic...
cm$label <- factor(cm$label, labels = gsub(" ", "_", levels(cm$label)))
cm_unique <- unique(cm[, c("label", "new_cluster")])
cm_unique <- cm_unique[order(cm_unique$new_cluster), ]
### Add merging to the annotation
mm <- match(annotation_row$cluster, cm$old_cluster)
annotation_row$cluster_merging <- cm$label[mm]
annotation_row$cluster_merging <- factor(annotation_row$cluster_merging, levels = cm_unique$label)
### Add colors for merging
color_ramp <- c(colors_muted, gg_color_hue(max(1, nlevels(cm_unique$label) - length(colors_muted))))
colors_clusters_merging <- color_ramp[1:nlevels(cm_unique$label)]
names(colors_clusters_merging) <- cm_unique$label
annotation_colors[["cluster_merging"]] <- colors_clusters_merging
rows_order <- order(annotation_row$cluster_merging, annotation_row$cluster)
### Drop the "drop" cluster
rows_order <- rows_order[annotation_row$cluster_merging[rows_order] != "drop"]
}
# ------------------------------------------------------------
# Load marker selection for plotting on the heatmaps
# ------------------------------------------------------------
marker_selection <- NULL
if(!is.null(path_marker_selection)){
if(file.exists(path_marker_selection)){
marker_selection <- read.table(file.path(path_marker_selection), header = TRUE, sep = "\t", as.is = TRUE)
marker_selection <- marker_selection[, 1]
if(!all(marker_selection %in% clustering_observables$marker))
stop("Marker selection is wrong")
}
}
# ------------------------------------------------------------
# Marker information
# ------------------------------------------------------------
# Get the isotope and antigen for fcs markers
m <- match(fcs_colnames, clustering_observables$mass)
fcs_panel <- data.frame(fcs_colname = fcs_colnames, Isotope = clustering_observables$mass[m], Antigen = clustering_observables$marker[m], stringsAsFactors = FALSE)
# Indeces of observables used for clustering
scols <- which(fcs_colnames %in% clust_observ)
# Indeces of other observables
xcols <- which(!fcs_colnames %in% clust_observ)
# Ordered by decreasing pca score
if("avg_score" %in% colnames(clustering_observables)){
scols <- scols[order(clustering_observables[fcs_colnames[scols], "avg_score"], decreasing = TRUE)]
xcols <- xcols[order(clustering_observables[fcs_colnames[xcols], "avg_score"], decreasing = TRUE)]
}
# ------------------------------------------------------------
# Plotting heatmaps
# ------------------------------------------------------------
samp_org <- samp
clust_org <- clust
e_org <- e
if(!is.null(path_data_norm)){
e_norm <- expr_norm[expr_norm[, "cell_id"] %in% common_cells, fcs_colnames]
e_norm_org <- e_norm
}
subset_samp <- list()
subset_samp[["all"]] <- unique(samp)
if(plot_HD){
subset_samp[["HD"]] <- unique(samp)[grep("_HD", unique(samp))]
}
### Plot heatmaps based on all the data or the HD samples only
for(ii in 1:length(subset_samp)){
# ii = 1
subset_name <- names(subset_samp)[ii]
cells2keep <- samp_org %in% subset_samp[[ii]]
samp <- samp_org[cells2keep]
clust <- clust_org[cells2keep]
e <- e_org[cells2keep, , drop = FALSE]
# ------------------------------------------------------------
# Get the median expression
# ------------------------------------------------------------
colnames(e) <- fcs_panel$Antigen
a <- aggregate(e, by = list(clust = clust), FUN = aggregate_fun)
# get cluster frequencies
freq_clust <- table(clust)
### Save cluster frequencies and the median expression
clusters_out <- data.frame(cluster = names(freq_clust), label = labels[names(freq_clust), "label"], counts = as.numeric(freq_clust), frequencies = as.numeric(freq_clust)/sum(freq_clust), a[, fcs_panel$Antigen[c(scols, xcols)]])
write.table(clusters_out, file.path(outdir, paste0(prefix, "cluster_median_expression_", subset_name, "_raw.xls")), sep = "\t", quote = FALSE, row.names = FALSE, col.names = TRUE)
# ------------------------------------------------------------
# Row clustering
# ------------------------------------------------------------
### This clustering is based on the markers that were used for the main clustering, and it is used in all the heatmaps
expr <- as.matrix(a[, fcs_panel$Antigen[scols]])
rownames(expr) <- labels[as.character(a[, "clust"]), "label"]
if(nrow(expr) > 1)
cluster_rows <- hclust(dist(expr), method = linkage)
# ------------------------------------------------------------
# Heatmaps of raw median expression
# ------------------------------------------------------------
### Use all markers for plotting
expr <- as.matrix(a[, fcs_panel$Antigen[c(scols, xcols)]])
rownames(expr) <- labels[as.character(a[, "clust"]), "label"]
labels_row <- paste0(rownames(expr), " (", round(as.numeric(freq_clust)/sum(freq_clust)*100, 2), "%)")
labels_col <- colnames(expr)
if(pheatmap_palette_rev){
color <- colorRampPalette(rev(brewer.pal(n = 8, name = pheatmap_palette)))(100)
}else{
color <- colorRampPalette(brewer.pal(n = 8, name = pheatmap_palette))(100)
}
## With row clustering
if(nrow(expr) > 1)
pheatmap(expr, color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = cluster_rows, labels_col = labels_col, labels_row = labels_row, display_numbers = TRUE, number_color = "black", fontsize_number = 8, gaps_col = length(scols), fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_all_row_clust_raw.pdf")))
## No row clustering
pheatmap(expr[rows_order, , drop = FALSE], color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = FALSE, labels_col = labels_col, labels_row = labels_row[rows_order], display_numbers = FALSE, number_color = "black", fontsize_number = 8, gaps_col = length(scols), fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_all_no_clust_raw.pdf")))
## Plot only the selected markers
if(!is.null(marker_selection)){
expr_sub <- expr[, marker_selection, drop = FALSE]
labels_col_sub <- colnames(expr_sub)
if(nrow(expr) > 1)
pheatmap(expr_sub, color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = cluster_rows, labels_col = labels_col_sub, labels_row = labels_row, fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_sel_row_clust_raw.pdf")))
pheatmap(expr_sub[rows_order, , drop = FALSE], color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = FALSE, labels_col = labels_col_sub, labels_row = labels_row[rows_order], fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_sel_no_clust_raw.pdf")))
}
if(scale){
# ------------------------------------------------------------
# Heatmaps of raw median expression scalled by marker (column)
# ------------------------------------------------------------
scalling_type <- "s01"
switch(scalling_type,
snorm = {
## scalled to mean = 0, sd = 1
expr_scaled <- apply(expr, 2, function(x){(x-mean(x))/sd(x)})
th <- 2.5
expr_scaled[expr_scaled > th] <- th
expr_scaled[expr_scaled < -th] <- -th
breaks = seq(from = -th, to = th, length.out = 101)
legend_breaks = seq(from = -round(th), to = round(th), by = 1)
},
s01 = {
## scalled to 01
expr_scaled <- apply(expr, 2, function(x){(x-min(x))/(max(x)-min(x))})
breaks = seq(from = 0, to = 1, length.out = 101)
legend_breaks = seq(from = 0, to = 1, by = 0.25)
}
)
color <- colorRampPalette(brewer.pal(n = 8, name = "Greys"))(120)[11:110]
## With row clustering
if(nrow(expr) > 1)
pheatmap(expr_scaled, color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = cluster_rows, labels_col = labels_col, labels_row = labels_row, breaks = breaks, legend_breaks = legend_breaks, display_numbers = TRUE, number_color = "black", fontsize_number = 8, gaps_col = length(scols), fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_all_row_clust_scale.pdf")))
## No row clustering
pheatmap(expr_scaled[rows_order, , drop = FALSE], color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = FALSE, labels_col = labels_col, labels_row = labels_row[rows_order], breaks = breaks, legend_breaks = legend_breaks, display_numbers = FALSE, number_color = "black", fontsize_number = 8, gaps_col = length(scols), fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_all_no_clust_scale.pdf")))
## Plot only the selected markers
if(!is.null(marker_selection)){
expr_sub <- expr_scaled[, marker_selection, drop = FALSE]
labels_col_sub <- colnames(expr_sub)
if(nrow(expr) > 1)
pheatmap(expr_sub, color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = cluster_rows, labels_col = labels_col_sub, labels_row = labels_row, breaks = breaks, legend_breaks = legend_breaks, fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_sel_row_clust_scale.pdf")))
pheatmap(expr_sub[rows_order, , drop = FALSE], color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = FALSE, labels_col = labels_col_sub, labels_row = labels_row[rows_order], breaks = breaks, legend_breaks = legend_breaks, fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_sel_no_clust_scale.pdf")))
}
}
if(!is.null(path_data_norm)){
# ------------------------------------------------------------
# Heatmaps of norm median expression
# Had to do this way because I want to plot the 01 normalized data, but I want to keep row clustering from the raw data
# ------------------------------------------------------------
# ------------------------------------------------------------
# Get the median expression
# ------------------------------------------------------------
e_norm <- e_norm_org[cells2keep, , drop = FALSE]
colnames(e_norm) <- fcs_panel$Antigen
a_norm <- aggregate(e_norm, by = list(clust = clust), FUN = aggregate_fun)
# ------------------------------------------------------------
# pheatmaps of median expression
# ------------------------------------------------------------
### Use all markers for plotting
expr <- as.matrix(a_norm[, fcs_panel$Antigen[c(scols, xcols)]])
rownames(expr) <- labels[as.character(a_norm[, "clust"]), "label"]
labels_row <- paste0(rownames(expr), " (", round(as.numeric(freq_clust)/sum(freq_clust)*100, 2), "%)")
labels_col <- colnames(expr)
if(pheatmap_palette_norm_rev){
color <- colorRampPalette(rev(brewer.pal(n = 8, name = pheatmap_palette_norm)))(101)
}else{
color <- colorRampPalette(brewer.pal(n = 8, name = pheatmap_palette_norm))(101)
}
### Fixed legend range from 0 to 1
breaks = seq(from = 0, to = 1, length.out = 101)
legend_breaks = seq(from = 0, to = 1, by = 0.2)
## With row clustering
if(nrow(expr) > 1)
pheatmap(expr, color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = cluster_rows, labels_col = labels_col, labels_row = labels_row, breaks = breaks, legend_breaks = legend_breaks, display_numbers = TRUE, number_color = "black", fontsize_number = 8, gaps_col = length(scols), fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_all_row_clust_norm.pdf")))
color <- colorRampPalette(brewer.pal(n = 8, name = "Greys"))(110)[11:110]
## No row clustering
pheatmap(expr[rows_order, , drop = FALSE], color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = FALSE, labels_col = labels_col, labels_row = labels_row[rows_order], breaks = breaks, legend_breaks = legend_breaks, display_numbers = FALSE, number_color = "black", fontsize_number = 8, gaps_col = length(scols), fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_all_no_clust_norm.pdf")))
## Plot only the selected markers
if(!is.null(marker_selection)){
color <- colorRampPalette(brewer.pal(n = 8, name = "Greys"))(110)[11:110]
expr_sub <- expr[, marker_selection, drop = FALSE]
labels_col_sub <- colnames(expr_sub)
if(nrow(expr) > 1)
pheatmap(expr_sub, color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = cluster_rows, labels_col = labels_col_sub, labels_row = labels_row, breaks = breaks, legend_breaks = legend_breaks, fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_sel_row_clust_norm.pdf")))
pheatmap(expr_sub[rows_order, , drop = FALSE], color = color, cellwidth = 24, cellheight = 24, cluster_cols = FALSE, cluster_rows = FALSE, labels_col = labels_col_sub, labels_row = labels_row[rows_order], breaks = breaks, legend_breaks = legend_breaks, fontsize_row = 14, fontsize_col = 14, fontsize = 12, annotation_row = annotation_row, annotation_colors = annotation_colors, filename = file.path(outdir, paste0(prefix, "pheatmap_", subset_name, "_sel_no_clust_norm.pdf")))
}
}
}
sessionInfo()
|
# hospitalization data for St. Louis Metro
# =============================================================================
# load data
stl_hosp <- read_csv("data/MO_HEALTH_Covid_Tracking/data/metro/stl_hospital.csv")
# =============================================================================
# define colors
pal <- brewer.pal(n = 3, name = "Set1")
cols <- c("7-day Average" = pal[1], "Count" = pal[2])
# =============================================================================
# plot new in patient
## define top_val
top_val <- round_any(x = max(stl_hosp$new_in_pt, na.rm = TRUE), accuracy = 10, f = ceiling)
## subset
stl_hosp %>%
filter(report_date <= date-2) %>%
select(report_date, new_in_pt, new_in_pt_avg) %>%
pivot_longer(cols = c(new_in_pt, new_in_pt_avg), names_to = "category", values_to = "value") %>%
mutate(category = case_when(
category == "new_in_pt" ~ "Count",
category == "new_in_pt_avg" ~ "7-day Average"
)) %>%
mutate(category = fct_relevel(category, "Count", "7-day Average")) -> stl_subset
avg_line <- filter(stl_subset, category == "7-day Average")
## create points
hosp_points <- filter(stl_subset, report_date == hosp_date-2)
## create factors
stl_subset <- mutate(stl_subset, factor_var = fct_reorder2(category, report_date, value))
hosp_points <- mutate(hosp_points, factor_var = fct_reorder2(category, report_date, value))
## plot
p <- ggplot() +
geom_line(stl_subset, mapping = aes(x = report_date, y = value, color = factor_var), size = 2) +
geom_line(avg_line, mapping = aes(x = report_date, y = value), color = cols[1], size = 2) +
geom_point(hosp_points, mapping = aes(x = report_date, y = value, color = factor_var),
size = 4, show.legend = FALSE) +
scale_colour_manual(values = cols, name = "Measure") +
scale_x_date(date_breaks = hosp_breaks, date_labels = "%b") +
scale_y_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = 10)) +
labs(
title = "New COVID-19 Hospitalizations in Metro St. Louis",
subtitle = paste0("St. Louis Metropolitan Pandemic Task Force Hospitals\n", min(stl_subset$report_date), " through ", as.character(hosp_date-2)),
x = "Date",
y = "New Patients",
caption = "Plot by Christopher Prener, Ph.D.\nData via the St. Louis Metro Parademic Task Force"
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = x_angle))
## save plot
save_plots(filename = "results/high_res/stl_metro/n_new_in_pt.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_metro/n_new_in_pt.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# plot in patient
## define top_val
top_val <- round_any(x = max(stl_hosp$in_pt, na.rm = TRUE), accuracy = 100, f = ceiling)
## subset
stl_hosp %>%
filter(report_date >= as.Date("2020-04-05")) %>%
select(report_date, in_pt, in_pt_avg) %>%
pivot_longer(cols = c(in_pt, in_pt_avg), names_to = "category", values_to = "value") %>%
mutate(category = case_when(
category == "in_pt" ~ "Count",
category == "in_pt_avg" ~ "7-day Average"
)) %>%
mutate(category = fct_relevel(category, "Count", "7-day Average")) -> stl_subset
avg_line <- filter(stl_subset, category == "7-day Average")
## create points
hosp_points <- filter(stl_subset, report_date == hosp_date)
## create factors
stl_subset <- mutate(stl_subset, factor_var = fct_reorder2(category, report_date, value))
hosp_points <- mutate(hosp_points, factor_var = fct_reorder2(category, report_date, value))
## plot
p <- ggplot() +
geom_line(stl_subset, mapping = aes(x = report_date, y = value, color = factor_var), size = 2) +
geom_line(avg_line, mapping = aes(x = report_date, y = value), color = cols[1], size = 2) +
geom_point(hosp_points, mapping = aes(x = report_date, y = value, color = factor_var),
size = 4, show.legend = FALSE) +
scale_colour_manual(values = cols, name = "Measure") +
scale_x_date(date_breaks = hosp_breaks, date_labels = "%b") +
scale_y_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = 100)) +
labs(
title = "Total COVID-19 Hospitalizations in Metro St. Louis",
subtitle = paste0("St. Louis Metropolitan Pandemic Task Force Hospitals\n", min(stl_subset$report_date), " through ", as.character(hosp_date)),
x = "Date",
y = "Total Patients",
caption = "Plot by Christopher Prener, Ph.D.\nData via the St. Louis Metro Parademic Task Force"
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = x_angle))
## save plot
save_plots(filename = "results/high_res/stl_metro/o_in_pt.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_metro/o_in_pt.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# plot icu
## define top_val
top_val <- round_any(x = max(stl_hosp$icu, na.rm = TRUE), accuracy = 25, f = ceiling)
## subset
stl_hosp %>%
filter(report_date >= as.Date("2020-04-05")) %>%
select(report_date, icu, icu_avg) %>%
pivot_longer(cols = c(icu, icu_avg), names_to = "category", values_to = "value") %>%
mutate(category = case_when(
category == "icu" ~ "Count",
category == "icu_avg" ~ "7-day Average"
)) %>%
mutate(category = fct_relevel(category, "Count", "7-day Average")) -> stl_subset
avg_line <- filter(stl_subset, category == "7-day Average")
## create points
hosp_points <- filter(stl_subset, report_date == hosp_date)
## create factors
stl_subset <- mutate(stl_subset, factor_var = fct_reorder2(category, report_date, value))
hosp_points <- mutate(hosp_points, factor_var = fct_reorder2(category, report_date, value))
## plot
p <- ggplot() +
geom_line(stl_subset, mapping = aes(x = report_date, y = value, color = factor_var), size = 2) +
geom_line(avg_line, mapping = aes(x = report_date, y = value), color = cols[1], size = 2) +
geom_point(hosp_points, mapping = aes(x = report_date, y = value, color = factor_var),
size = 4, show.legend = FALSE) +
scale_colour_manual(values = cols, name = "Measure") +
scale_x_date(date_breaks = hosp_breaks, date_labels = "%b") +
scale_y_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = 25)) +
labs(
title = "Total COVID-19 ICU Patients in Metro St. Louis",
subtitle = paste0("St. Louis Metropolitan Pandemic Task Force Hospitals\n", min(stl_subset$report_date), " through ", as.character(hosp_date)),
x = "Date",
y = "Total ICU Patients",
caption = "Plot by Christopher Prener, Ph.D.\nData via the St. Louis Metro Parademic Task Force"
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = x_angle))
## save plot
save_plots(filename = "results/high_res/stl_metro/p_icu.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_metro/p_icu.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# plot icu
## define top_val
top_val <- round_any(x = max(stl_hosp$vent, na.rm = TRUE), accuracy = 20, f = ceiling)
## subset
stl_hosp %>%
filter(report_date >= as.Date("2020-04-05")) %>%
select(report_date, vent, vent_avg) %>%
pivot_longer(cols = c(vent, vent_avg), names_to = "category", values_to = "value") %>%
mutate(category = case_when(
category == "vent" ~ "Count",
category == "vent_avg" ~ "7-day Average"
)) %>%
mutate(category = fct_relevel(category, "Count", "7-day Average")) -> stl_subset
avg_line <- filter(stl_subset, category == "7-day Average")
## create points
hosp_points <- filter(stl_subset, report_date == hosp_date)
## create factors
stl_subset <- mutate(stl_subset, factor_var = fct_reorder2(category, report_date, value))
hosp_points <- mutate(hosp_points, factor_var = fct_reorder2(category, report_date, value))
## plot
p <- ggplot() +
geom_line(stl_subset, mapping = aes(x = report_date, y = value, color = factor_var), size = 2) +
geom_line(avg_line, mapping = aes(x = report_date, y = value), color = cols[1], size = 2) +
geom_point(hosp_points, mapping = aes(x = report_date, y = value, color = factor_var),
size = 4, show.legend = FALSE) +
scale_colour_manual(values = cols, name = "Measure") +
scale_x_date(date_breaks = hosp_breaks, date_labels = "%b") +
scale_y_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = 20)) +
labs(
title = "Total COVID-19 Ventilated Patients in Metro St. Louis",
subtitle = paste0("St. Louis Metropolitan Pandemic Task Force Hospitals\n", min(stl_subset$report_date), " through ", as.character(hosp_date)),
x = "Date",
y = "Total Ventilated Patients",
caption = "Plot by Christopher Prener, Ph.D.\nData via the St. Louis Metro Parademic Task Force"
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = x_angle))
## save plot
save_plots(filename = "results/high_res/stl_metro/q_vent.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_metro/q_vent.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# plot icu
## define top_val
top_val <- round_any(x = max(stl_hosp$mortality, na.rm = TRUE), accuracy = 2, f = ceiling)
## subset
stl_hosp %>%
filter(report_date >= as.Date("2020-10-07")) %>%
select(report_date, mortality, mortality_avg) %>%
pivot_longer(cols = c(mortality, mortality_avg), names_to = "category", values_to = "value") %>%
mutate(category = case_when(
category == "mortality" ~ "Count",
category == "mortality_avg" ~ "7-day Average"
)) %>%
mutate(category = fct_relevel(category, "Count", "7-day Average")) -> stl_subset
avg_line <- filter(stl_subset, category == "7-day Average")
## create points
hosp_points <- filter(stl_subset, report_date == hosp_date)
## create factors
stl_subset <- mutate(stl_subset, factor_var = fct_reorder2(category, report_date, value))
hosp_points <- mutate(hosp_points, factor_var = fct_reorder2(category, report_date, value))
## plot
p <- ggplot() +
geom_line(stl_subset, mapping = aes(x = report_date, y = value, color = factor_var), size = 2) +
geom_line(avg_line, mapping = aes(x = report_date, y = value), color = cols[1], size = 2) +
geom_point(hosp_points, mapping = aes(x = report_date, y = value, color = factor_var),
size = 4, show.legend = FALSE) +
scale_colour_manual(values = cols, name = "Measure") +
scale_x_date(date_breaks = "1 month", date_labels = "%b") +
scale_y_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = 2)) +
labs(
title = "Total COVID-19 Deaths for In-patients in Metro St. Louis",
subtitle = paste0("St. Louis Metropolitan Pandemic Task Force Hospitals\n", min(stl_subset$report_date), " through ", as.character(hosp_date)),
x = "Date",
y = "Total Deaths",
caption = "Plot by Christopher Prener, Ph.D.\nData via the St. Louis Metro Parademic Task Force"
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = x_angle))
## save plot
save_plots(filename = "results/high_res/stl_metro/r_inpt_mortality.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_metro/r_inpt_mortality.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# plot ratio of icu/vent to all patients
## define colors
pal <- brewer.pal(n = 4, name = "Set1")
cols <- c("ICU" = pal[3], "Ventialed" = pal[4])
## calculate ratios
stl_hosp %>%
mutate(icu_pct = icu_avg/in_pt_avg*100) %>%
mutate(vent_pct = vent_avg/in_pt_avg*100) %>%
select(report_date, icu_pct, vent_pct) %>%
pivot_longer(cols = c("icu_pct", "vent_pct"), names_to = "category",
values_to = "value") %>%
filter(is.na(value) == FALSE) %>%
mutate(category = case_when(
category == "icu_pct" ~ "ICU",
category == "vent_pct" ~ "Ventialed"
)) -> stl_hosp
## define top_val
top_val <- round_any(x = max(stl_hosp$value, na.rm = TRUE), accuracy = 5, f = ceiling)
## plot
p <- ggplot() +
geom_line(stl_hosp, mapping = aes(x = report_date, y = value, color = category), size = 2) +
scale_colour_manual(values = cols, name = "Measure") +
scale_x_date(date_breaks = "1 month", date_labels = "%b") +
scale_y_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = 5)) +
labs(
title = "COVID-19 Critical Care Patient Ratios in Metro St. Louis",
subtitle = paste0("St. Louis Metropolitan Pandemic Task Force Hospitals\n", min(stl_subset$report_date), " through ", as.character(hosp_date)),
x = "Date",
y = "Percent of All In-Patients",
caption = "Plot by Christopher Prener, Ph.D.\nData via the St. Louis Metro Parademic Task Force"
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = x_angle))
## save plot
save_plots(filename = "results/high_res/stl_metro/u_inpt_ratio.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_metro/u_inpt_ratio.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# load data
stl_hosp <- read_csv("data/MO_HEALTH_Covid_Tracking/data/metro/stl_hospital_peds.csv")
# =============================================================================
# define colors
pal <- brewer.pal(n = 3, name = "Set1")
cols <- c("7-day Average" = pal[1], "Count" = pal[2])
# =============================================================================
# pediatric hospitalizations
## subset
stl_hosp %>%
filter(report_date >= as.Date("2021-09-01")) %>%
select(report_date, starts_with("peds_in")) %>%
pivot_longer(cols = c(peds_in_pt_0_11, peds_in_pt_0_11_avg,
peds_in_pt_12_17, peds_in_pt_12_17_avg,
peds_in_pt, peds_in_pt_avg), names_to = "category", values_to = "value") %>%
mutate(facet = case_when(
category == "peds_in_pt_0_11" ~ "Pediatric Patients, 0-11 Years",
category == "peds_in_pt_0_11_avg" ~ "Pediatric Patients, 0-11 Years",
category == "peds_in_pt_12_17" ~ "Pediatric Patients, 12-17 Years",
category == "peds_in_pt_12_17_avg" ~ "Pediatric Patients, 12-17 Years",
category == "peds_in_pt" ~ "Pediatric Patients, All",
category == "peds_in_pt_avg" ~ "Pediatric Patients, All"
)) %>%
mutate(category = case_when(
category %in% c("peds_in_pt_0_11", "peds_in_pt_12_17", "peds_in_pt") ~ "Count",
category %in% c("peds_in_pt_0_11_avg", "peds_in_pt_12_17_avg", "peds_in_pt_avg") ~ "7-day Average"
)) %>%
mutate(category = fct_relevel(category, "Count", "7-day Average")) %>%
mutate(facet = fct_relevel(facet, "Pediatric Patients, All", "Pediatric Patients, 0-11 Years", "Pediatric Patients, 12-17 Years")) -> stl_subset
avg_line <- filter(stl_subset, category == "7-day Average")
## define top_val
top_val <- round_any(x = max(stl_subset$value, na.rm = TRUE), accuracy = 10, f = ceiling)
## plot
p <- ggplot() +
geom_line(stl_subset, mapping = aes(x = report_date, y = value, color = category), size = 2) +
geom_line(avg_line, mapping = aes(x = report_date, y = value), color = cols[1], size = 2) +
scale_colour_manual(values = cols, name = "Measure") +
scale_x_date(date_breaks = "1 month", date_labels = "%b") +
scale_y_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = 10)) +
facet_wrap(vars(facet), nrow = 3) +
labs(
title = "COVID-19 Pediatric Patients in Metro St. Louis",
subtitle = paste0("St. Louis Metropolitan Pandemic Task Force Hospitals\n", min(stl_subset$report_date), " through ", as.character(hosp_date)),
x = "Date",
y = "Total Pediatric Patients",
caption = "Plot by Christopher Prener, Ph.D.\nData via the St. Louis Metro Parademic Task Force"
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = x_angle))
## save plot
save_plots(filename = "results/high_res/stl_metro/s_inpt_peds.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_metro/s_inpt_peds.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# pediatric ICU
## subset
stl_hosp %>%
filter(report_date >= as.Date("2021-09-01")) %>%
select(report_date, starts_with("peds_icu")) %>%
pivot_longer(cols = c(peds_icu_0_11, peds_icu_0_11_avg,
peds_icu_12_17, peds_icu_12_17_avg,
peds_icu, peds_icu_avg), names_to = "category", values_to = "value") %>%
mutate(facet = case_when(
category == "peds_icu_0_11" ~ "Pediatric ICU Patients, 0-11 Years",
category == "peds_icu_0_11_avg" ~ "Pediatric ICU Patients, 0-11 Years",
category == "peds_icu_12_17" ~ "Pediatric ICU Patients, 12-17 Years",
category == "peds_icu_12_17_avg" ~ "Pediatric ICU Patients, 12-17 Years",
category == "peds_icu" ~ "Pediatric ICU Patients, All",
category == "peds_icu_avg" ~ "Pediatric ICU Patients, All"
)) %>%
mutate(category = case_when(
category %in% c("peds_icu_0_11", "peds_icu_12_17", "peds_icu") ~ "Count",
category %in% c("peds_icu_0_11_avg", "peds_icu_12_17_avg", "peds_icu_avg") ~ "7-day Average"
)) %>%
mutate(category = fct_relevel(category, "Count", "7-day Average")) %>%
mutate(facet = fct_relevel(facet, "Pediatric ICU Patients, All", "Pediatric ICU Patients, 0-11 Years", "Pediatric ICU Patients, 12-17 Years")) -> stl_subset
avg_line <- filter(stl_subset, category == "7-day Average")
## define top_val
top_val <- round_any(x = max(stl_subset$value, na.rm = TRUE), accuracy = 2, f = ceiling)
## plot
p <- ggplot() +
geom_line(stl_subset, mapping = aes(x = report_date, y = value, color = category), size = 2) +
geom_line(avg_line, mapping = aes(x = report_date, y = value), color = cols[1], size = 2) +
scale_colour_manual(values = cols, name = "Measure") +
scale_x_date(date_breaks = "1 month", date_labels = "%b") +
scale_y_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = 2)) +
facet_wrap(vars(facet), nrow = 3) +
labs(
title = "COVID-19 Pediatric ICU Patients in Metro St. Louis",
subtitle = paste0("St. Louis Metropolitan Pandemic Task Force Hospitals\n", min(stl_subset$report_date), " through ", as.character(hosp_date)),
x = "Date",
y = "Pediatric ICU Patients",
caption = "Plot by Christopher Prener, Ph.D.\nData via the St. Louis Metro Parademic Task Force"
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = x_angle))
## save plot
save_plots(filename = "results/high_res/stl_metro/t_icu_peds.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_metro/t_icu_peds.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# clean-up
rm(stl_hosp, stl_subset, hosp_points, avg_line, hosp_date)
rm(top_val, p, cols, pal)
|
/source/workflow/20_stl_hospital_plots.R
|
permissive
|
slu-openGIS/covid_daily_viz
|
R
| false
| false
| 19,158
|
r
|
# hospitalization data for St. Louis Metro
# =============================================================================
# load data
stl_hosp <- read_csv("data/MO_HEALTH_Covid_Tracking/data/metro/stl_hospital.csv")
# =============================================================================
# define colors
pal <- brewer.pal(n = 3, name = "Set1")
cols <- c("7-day Average" = pal[1], "Count" = pal[2])
# =============================================================================
# plot new in patient
## define top_val
top_val <- round_any(x = max(stl_hosp$new_in_pt, na.rm = TRUE), accuracy = 10, f = ceiling)
## subset
stl_hosp %>%
filter(report_date <= date-2) %>%
select(report_date, new_in_pt, new_in_pt_avg) %>%
pivot_longer(cols = c(new_in_pt, new_in_pt_avg), names_to = "category", values_to = "value") %>%
mutate(category = case_when(
category == "new_in_pt" ~ "Count",
category == "new_in_pt_avg" ~ "7-day Average"
)) %>%
mutate(category = fct_relevel(category, "Count", "7-day Average")) -> stl_subset
avg_line <- filter(stl_subset, category == "7-day Average")
## create points
hosp_points <- filter(stl_subset, report_date == hosp_date-2)
## create factors
stl_subset <- mutate(stl_subset, factor_var = fct_reorder2(category, report_date, value))
hosp_points <- mutate(hosp_points, factor_var = fct_reorder2(category, report_date, value))
## plot
p <- ggplot() +
geom_line(stl_subset, mapping = aes(x = report_date, y = value, color = factor_var), size = 2) +
geom_line(avg_line, mapping = aes(x = report_date, y = value), color = cols[1], size = 2) +
geom_point(hosp_points, mapping = aes(x = report_date, y = value, color = factor_var),
size = 4, show.legend = FALSE) +
scale_colour_manual(values = cols, name = "Measure") +
scale_x_date(date_breaks = hosp_breaks, date_labels = "%b") +
scale_y_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = 10)) +
labs(
title = "New COVID-19 Hospitalizations in Metro St. Louis",
subtitle = paste0("St. Louis Metropolitan Pandemic Task Force Hospitals\n", min(stl_subset$report_date), " through ", as.character(hosp_date-2)),
x = "Date",
y = "New Patients",
caption = "Plot by Christopher Prener, Ph.D.\nData via the St. Louis Metro Parademic Task Force"
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = x_angle))
## save plot
save_plots(filename = "results/high_res/stl_metro/n_new_in_pt.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_metro/n_new_in_pt.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# plot in patient
## define top_val
top_val <- round_any(x = max(stl_hosp$in_pt, na.rm = TRUE), accuracy = 100, f = ceiling)
## subset
stl_hosp %>%
filter(report_date >= as.Date("2020-04-05")) %>%
select(report_date, in_pt, in_pt_avg) %>%
pivot_longer(cols = c(in_pt, in_pt_avg), names_to = "category", values_to = "value") %>%
mutate(category = case_when(
category == "in_pt" ~ "Count",
category == "in_pt_avg" ~ "7-day Average"
)) %>%
mutate(category = fct_relevel(category, "Count", "7-day Average")) -> stl_subset
avg_line <- filter(stl_subset, category == "7-day Average")
## create points
hosp_points <- filter(stl_subset, report_date == hosp_date)
## create factors
stl_subset <- mutate(stl_subset, factor_var = fct_reorder2(category, report_date, value))
hosp_points <- mutate(hosp_points, factor_var = fct_reorder2(category, report_date, value))
## plot
p <- ggplot() +
geom_line(stl_subset, mapping = aes(x = report_date, y = value, color = factor_var), size = 2) +
geom_line(avg_line, mapping = aes(x = report_date, y = value), color = cols[1], size = 2) +
geom_point(hosp_points, mapping = aes(x = report_date, y = value, color = factor_var),
size = 4, show.legend = FALSE) +
scale_colour_manual(values = cols, name = "Measure") +
scale_x_date(date_breaks = hosp_breaks, date_labels = "%b") +
scale_y_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = 100)) +
labs(
title = "Total COVID-19 Hospitalizations in Metro St. Louis",
subtitle = paste0("St. Louis Metropolitan Pandemic Task Force Hospitals\n", min(stl_subset$report_date), " through ", as.character(hosp_date)),
x = "Date",
y = "Total Patients",
caption = "Plot by Christopher Prener, Ph.D.\nData via the St. Louis Metro Parademic Task Force"
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = x_angle))
## save plot
save_plots(filename = "results/high_res/stl_metro/o_in_pt.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_metro/o_in_pt.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# plot icu
## define top_val
top_val <- round_any(x = max(stl_hosp$icu, na.rm = TRUE), accuracy = 25, f = ceiling)
## subset
stl_hosp %>%
filter(report_date >= as.Date("2020-04-05")) %>%
select(report_date, icu, icu_avg) %>%
pivot_longer(cols = c(icu, icu_avg), names_to = "category", values_to = "value") %>%
mutate(category = case_when(
category == "icu" ~ "Count",
category == "icu_avg" ~ "7-day Average"
)) %>%
mutate(category = fct_relevel(category, "Count", "7-day Average")) -> stl_subset
avg_line <- filter(stl_subset, category == "7-day Average")
## create points
hosp_points <- filter(stl_subset, report_date == hosp_date)
## create factors
stl_subset <- mutate(stl_subset, factor_var = fct_reorder2(category, report_date, value))
hosp_points <- mutate(hosp_points, factor_var = fct_reorder2(category, report_date, value))
## plot
p <- ggplot() +
geom_line(stl_subset, mapping = aes(x = report_date, y = value, color = factor_var), size = 2) +
geom_line(avg_line, mapping = aes(x = report_date, y = value), color = cols[1], size = 2) +
geom_point(hosp_points, mapping = aes(x = report_date, y = value, color = factor_var),
size = 4, show.legend = FALSE) +
scale_colour_manual(values = cols, name = "Measure") +
scale_x_date(date_breaks = hosp_breaks, date_labels = "%b") +
scale_y_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = 25)) +
labs(
title = "Total COVID-19 ICU Patients in Metro St. Louis",
subtitle = paste0("St. Louis Metropolitan Pandemic Task Force Hospitals\n", min(stl_subset$report_date), " through ", as.character(hosp_date)),
x = "Date",
y = "Total ICU Patients",
caption = "Plot by Christopher Prener, Ph.D.\nData via the St. Louis Metro Parademic Task Force"
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = x_angle))
## save plot
save_plots(filename = "results/high_res/stl_metro/p_icu.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_metro/p_icu.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# plot icu
## define top_val
top_val <- round_any(x = max(stl_hosp$vent, na.rm = TRUE), accuracy = 20, f = ceiling)
## subset
stl_hosp %>%
filter(report_date >= as.Date("2020-04-05")) %>%
select(report_date, vent, vent_avg) %>%
pivot_longer(cols = c(vent, vent_avg), names_to = "category", values_to = "value") %>%
mutate(category = case_when(
category == "vent" ~ "Count",
category == "vent_avg" ~ "7-day Average"
)) %>%
mutate(category = fct_relevel(category, "Count", "7-day Average")) -> stl_subset
avg_line <- filter(stl_subset, category == "7-day Average")
## create points
hosp_points <- filter(stl_subset, report_date == hosp_date)
## create factors
stl_subset <- mutate(stl_subset, factor_var = fct_reorder2(category, report_date, value))
hosp_points <- mutate(hosp_points, factor_var = fct_reorder2(category, report_date, value))
## plot
p <- ggplot() +
geom_line(stl_subset, mapping = aes(x = report_date, y = value, color = factor_var), size = 2) +
geom_line(avg_line, mapping = aes(x = report_date, y = value), color = cols[1], size = 2) +
geom_point(hosp_points, mapping = aes(x = report_date, y = value, color = factor_var),
size = 4, show.legend = FALSE) +
scale_colour_manual(values = cols, name = "Measure") +
scale_x_date(date_breaks = hosp_breaks, date_labels = "%b") +
scale_y_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = 20)) +
labs(
title = "Total COVID-19 Ventilated Patients in Metro St. Louis",
subtitle = paste0("St. Louis Metropolitan Pandemic Task Force Hospitals\n", min(stl_subset$report_date), " through ", as.character(hosp_date)),
x = "Date",
y = "Total Ventilated Patients",
caption = "Plot by Christopher Prener, Ph.D.\nData via the St. Louis Metro Parademic Task Force"
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = x_angle))
## save plot
save_plots(filename = "results/high_res/stl_metro/q_vent.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_metro/q_vent.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# plot icu
## define top_val
top_val <- round_any(x = max(stl_hosp$mortality, na.rm = TRUE), accuracy = 2, f = ceiling)
## subset
stl_hosp %>%
filter(report_date >= as.Date("2020-10-07")) %>%
select(report_date, mortality, mortality_avg) %>%
pivot_longer(cols = c(mortality, mortality_avg), names_to = "category", values_to = "value") %>%
mutate(category = case_when(
category == "mortality" ~ "Count",
category == "mortality_avg" ~ "7-day Average"
)) %>%
mutate(category = fct_relevel(category, "Count", "7-day Average")) -> stl_subset
avg_line <- filter(stl_subset, category == "7-day Average")
## create points
hosp_points <- filter(stl_subset, report_date == hosp_date)
## create factors
stl_subset <- mutate(stl_subset, factor_var = fct_reorder2(category, report_date, value))
hosp_points <- mutate(hosp_points, factor_var = fct_reorder2(category, report_date, value))
## plot
p <- ggplot() +
geom_line(stl_subset, mapping = aes(x = report_date, y = value, color = factor_var), size = 2) +
geom_line(avg_line, mapping = aes(x = report_date, y = value), color = cols[1], size = 2) +
geom_point(hosp_points, mapping = aes(x = report_date, y = value, color = factor_var),
size = 4, show.legend = FALSE) +
scale_colour_manual(values = cols, name = "Measure") +
scale_x_date(date_breaks = "1 month", date_labels = "%b") +
scale_y_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = 2)) +
labs(
title = "Total COVID-19 Deaths for In-patients in Metro St. Louis",
subtitle = paste0("St. Louis Metropolitan Pandemic Task Force Hospitals\n", min(stl_subset$report_date), " through ", as.character(hosp_date)),
x = "Date",
y = "Total Deaths",
caption = "Plot by Christopher Prener, Ph.D.\nData via the St. Louis Metro Parademic Task Force"
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = x_angle))
## save plot
save_plots(filename = "results/high_res/stl_metro/r_inpt_mortality.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_metro/r_inpt_mortality.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# plot ratio of icu/vent to all patients
## define colors
pal <- brewer.pal(n = 4, name = "Set1")
cols <- c("ICU" = pal[3], "Ventialed" = pal[4])
## calculate ratios
stl_hosp %>%
mutate(icu_pct = icu_avg/in_pt_avg*100) %>%
mutate(vent_pct = vent_avg/in_pt_avg*100) %>%
select(report_date, icu_pct, vent_pct) %>%
pivot_longer(cols = c("icu_pct", "vent_pct"), names_to = "category",
values_to = "value") %>%
filter(is.na(value) == FALSE) %>%
mutate(category = case_when(
category == "icu_pct" ~ "ICU",
category == "vent_pct" ~ "Ventialed"
)) -> stl_hosp
## define top_val
top_val <- round_any(x = max(stl_hosp$value, na.rm = TRUE), accuracy = 5, f = ceiling)
## plot
p <- ggplot() +
geom_line(stl_hosp, mapping = aes(x = report_date, y = value, color = category), size = 2) +
scale_colour_manual(values = cols, name = "Measure") +
scale_x_date(date_breaks = "1 month", date_labels = "%b") +
scale_y_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = 5)) +
labs(
title = "COVID-19 Critical Care Patient Ratios in Metro St. Louis",
subtitle = paste0("St. Louis Metropolitan Pandemic Task Force Hospitals\n", min(stl_subset$report_date), " through ", as.character(hosp_date)),
x = "Date",
y = "Percent of All In-Patients",
caption = "Plot by Christopher Prener, Ph.D.\nData via the St. Louis Metro Parademic Task Force"
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = x_angle))
## save plot
save_plots(filename = "results/high_res/stl_metro/u_inpt_ratio.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_metro/u_inpt_ratio.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# load data
stl_hosp <- read_csv("data/MO_HEALTH_Covid_Tracking/data/metro/stl_hospital_peds.csv")
# =============================================================================
# define colors
pal <- brewer.pal(n = 3, name = "Set1")
cols <- c("7-day Average" = pal[1], "Count" = pal[2])
# =============================================================================
# pediatric hospitalizations
## subset
stl_hosp %>%
filter(report_date >= as.Date("2021-09-01")) %>%
select(report_date, starts_with("peds_in")) %>%
pivot_longer(cols = c(peds_in_pt_0_11, peds_in_pt_0_11_avg,
peds_in_pt_12_17, peds_in_pt_12_17_avg,
peds_in_pt, peds_in_pt_avg), names_to = "category", values_to = "value") %>%
mutate(facet = case_when(
category == "peds_in_pt_0_11" ~ "Pediatric Patients, 0-11 Years",
category == "peds_in_pt_0_11_avg" ~ "Pediatric Patients, 0-11 Years",
category == "peds_in_pt_12_17" ~ "Pediatric Patients, 12-17 Years",
category == "peds_in_pt_12_17_avg" ~ "Pediatric Patients, 12-17 Years",
category == "peds_in_pt" ~ "Pediatric Patients, All",
category == "peds_in_pt_avg" ~ "Pediatric Patients, All"
)) %>%
mutate(category = case_when(
category %in% c("peds_in_pt_0_11", "peds_in_pt_12_17", "peds_in_pt") ~ "Count",
category %in% c("peds_in_pt_0_11_avg", "peds_in_pt_12_17_avg", "peds_in_pt_avg") ~ "7-day Average"
)) %>%
mutate(category = fct_relevel(category, "Count", "7-day Average")) %>%
mutate(facet = fct_relevel(facet, "Pediatric Patients, All", "Pediatric Patients, 0-11 Years", "Pediatric Patients, 12-17 Years")) -> stl_subset
avg_line <- filter(stl_subset, category == "7-day Average")
## define top_val
top_val <- round_any(x = max(stl_subset$value, na.rm = TRUE), accuracy = 10, f = ceiling)
## plot
p <- ggplot() +
geom_line(stl_subset, mapping = aes(x = report_date, y = value, color = category), size = 2) +
geom_line(avg_line, mapping = aes(x = report_date, y = value), color = cols[1], size = 2) +
scale_colour_manual(values = cols, name = "Measure") +
scale_x_date(date_breaks = "1 month", date_labels = "%b") +
scale_y_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = 10)) +
facet_wrap(vars(facet), nrow = 3) +
labs(
title = "COVID-19 Pediatric Patients in Metro St. Louis",
subtitle = paste0("St. Louis Metropolitan Pandemic Task Force Hospitals\n", min(stl_subset$report_date), " through ", as.character(hosp_date)),
x = "Date",
y = "Total Pediatric Patients",
caption = "Plot by Christopher Prener, Ph.D.\nData via the St. Louis Metro Parademic Task Force"
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = x_angle))
## save plot
save_plots(filename = "results/high_res/stl_metro/s_inpt_peds.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_metro/s_inpt_peds.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# pediatric ICU
## subset
stl_hosp %>%
filter(report_date >= as.Date("2021-09-01")) %>%
select(report_date, starts_with("peds_icu")) %>%
pivot_longer(cols = c(peds_icu_0_11, peds_icu_0_11_avg,
peds_icu_12_17, peds_icu_12_17_avg,
peds_icu, peds_icu_avg), names_to = "category", values_to = "value") %>%
mutate(facet = case_when(
category == "peds_icu_0_11" ~ "Pediatric ICU Patients, 0-11 Years",
category == "peds_icu_0_11_avg" ~ "Pediatric ICU Patients, 0-11 Years",
category == "peds_icu_12_17" ~ "Pediatric ICU Patients, 12-17 Years",
category == "peds_icu_12_17_avg" ~ "Pediatric ICU Patients, 12-17 Years",
category == "peds_icu" ~ "Pediatric ICU Patients, All",
category == "peds_icu_avg" ~ "Pediatric ICU Patients, All"
)) %>%
mutate(category = case_when(
category %in% c("peds_icu_0_11", "peds_icu_12_17", "peds_icu") ~ "Count",
category %in% c("peds_icu_0_11_avg", "peds_icu_12_17_avg", "peds_icu_avg") ~ "7-day Average"
)) %>%
mutate(category = fct_relevel(category, "Count", "7-day Average")) %>%
mutate(facet = fct_relevel(facet, "Pediatric ICU Patients, All", "Pediatric ICU Patients, 0-11 Years", "Pediatric ICU Patients, 12-17 Years")) -> stl_subset
avg_line <- filter(stl_subset, category == "7-day Average")
## define top_val
top_val <- round_any(x = max(stl_subset$value, na.rm = TRUE), accuracy = 2, f = ceiling)
## plot
p <- ggplot() +
geom_line(stl_subset, mapping = aes(x = report_date, y = value, color = category), size = 2) +
geom_line(avg_line, mapping = aes(x = report_date, y = value), color = cols[1], size = 2) +
scale_colour_manual(values = cols, name = "Measure") +
scale_x_date(date_breaks = "1 month", date_labels = "%b") +
scale_y_continuous(limits = c(0, top_val), breaks = seq(0, top_val, by = 2)) +
facet_wrap(vars(facet), nrow = 3) +
labs(
title = "COVID-19 Pediatric ICU Patients in Metro St. Louis",
subtitle = paste0("St. Louis Metropolitan Pandemic Task Force Hospitals\n", min(stl_subset$report_date), " through ", as.character(hosp_date)),
x = "Date",
y = "Pediatric ICU Patients",
caption = "Plot by Christopher Prener, Ph.D.\nData via the St. Louis Metro Parademic Task Force"
) +
sequoia_theme(base_size = 22, background = "white") +
theme(axis.text.x = element_text(angle = x_angle))
## save plot
save_plots(filename = "results/high_res/stl_metro/t_icu_peds.png", plot = p, preset = "lg")
save_plots(filename = "results/low_res/stl_metro/t_icu_peds.png", plot = p, preset = "lg", dpi = 72)
# =============================================================================
# clean-up
rm(stl_hosp, stl_subset, hosp_points, avg_line, hosp_date)
rm(top_val, p, cols, pal)
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(mlbstatsR)
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
get_reference_players_mlb(1945,"batting", "value")
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
get_reference_players_mlb(1965,"pitching", "ratio")
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
get_reference_players_mlb(2002,"fielding", "appearances")
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
get_reference_team_mlb(2021,"batting", "advanced")
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
get_reference_team_mlb(1980,"pitching", "battingagainst")
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
get_reference_team_mlb(1980,"fielding", "centerfield")
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
get_reference_team_standings(1999)
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
espn_player_stats(2015, "pitching", "regular")
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
espn_player_stats(2004, "batting", "playoffs")
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
espn_team_stats(2021, "fielding", "regular")
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
espn_team_stats(2011, "fielding", "playoffs")
|
/inst/doc/mlbstatsR.R
|
no_license
|
cran/mlbstatsR
|
R
| false
| false
| 1,906
|
r
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(mlbstatsR)
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
get_reference_players_mlb(1945,"batting", "value")
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
get_reference_players_mlb(1965,"pitching", "ratio")
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
get_reference_players_mlb(2002,"fielding", "appearances")
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
get_reference_team_mlb(2021,"batting", "advanced")
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
get_reference_team_mlb(1980,"pitching", "battingagainst")
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
get_reference_team_mlb(1980,"fielding", "centerfield")
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
get_reference_team_standings(1999)
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
espn_player_stats(2015, "pitching", "regular")
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
espn_player_stats(2004, "batting", "playoffs")
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
espn_team_stats(2021, "fielding", "regular")
## ----echo=FALSE---------------------------------------------------------------
library(mlbstatsR)
espn_team_stats(2011, "fielding", "playoffs")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chart_amCandlestick.R
\name{amCandlestick}
\alias{amCandlestick}
\title{Plotting candlestick chart using rAmCharts}
\usage{
amCandlestick(data, xlab = "", ylab = "", horiz = FALSE,
positiveColor = "#7f8da9", negativeColor = "#db4c3c", names = c("low",
"open", "close", "high"), dataDateFormat = NULL,
minPeriod = ifelse(!is.null(dataDateFormat), "DD", ""), ...)
}
\arguments{
\item{data}{\code{data.frame}, dataframe with at least 5 columns:
category, open (numeric), close (numeric), low (numeric),
high (numeric). See \link{data_candleStick1} and \link{data_candleStick2}.}
\item{xlab}{\code{character}, label for x-axis.}
\item{ylab}{\code{character}, label for y-axis.}
\item{horiz}{\code{logical}, TRUE for an horizontal chart, FALSE for a vertical one}
\item{positiveColor}{\code{character}, color for positive values (in hexadecimal).}
\item{negativeColor}{\code{character}, color for negative values (in hexadecimal).}
\item{names}{\code{character}, names for the tooltip. Default set to c("low", "open", "close", "high").}
\item{dataDateFormat}{\code{character}, default set to NULL. Even if your chart parses dates,
you can pass them as strings in your dataframe -
all you need to do is to set data date format and the chart will parse dates to date objects.
Check this page for available formats.
Please note that two-digit years (YY) as well as literal month names (MMM) are NOT supported in this setting.}
\item{minPeriod}{\code{character}, minPeriod Specifies the shortest period of your data.
This should be set only if dataDateFormat is not NULL.
Possible period values:
fff - milliseconds, ss - seconds, mm - minutes, hh - hours, DD - days, MM - months, YYYY - years.
It's also possible to supply a number for increments, i.e. '15mm'
which will instruct the chart that your data is supplied in 15 minute increments.}
\item{...}{see \code{\link{amOptions}} for more options.}
}
\description{
amCandlestick computes a candlestick chart of the given value.
}
\examples{
data("data_candleStick2")
amCandlestick(data = data_candleStick2)
\donttest{
# Change colors
amCandlestick(data = data_candleStick2, positiveColor = "black", negativeColor = "green")
# Naming the axes
amCandlestick(data = data_candleStick2, xlab = "categories", ylab = "values")
# Rotate the labels for x axis
amCandlestick(data = data_candleStick2, labelRotation = 90)
# Change names
amCandlestick(data = data_candleStick2, names = c("min", "begin", "end", "max"))
# Horizontal chart :
amCandlestick(data = data_candleStick2, horiz = TRUE)
# Parse date
amCandlestick(data = data_candleStick2, dataDateFormat = "YYYY-MM-DD")
# Datas over months
data_candleStick2$category <- c("2015-01-01", "2015-02-01", "2015-03-01",
"2015-04-01", "2015-05-01", "2015-06-01",
"2015-07-01", "2015-08-01", "2015-09-01",
"2015-10-01", "2015-11-01", "2015-12-01")
amCandlestick(data = data_candleStick2, dataDateFormat = "YYYY-MM-DD", minPeriod = "MM")
# Decimal precision
require(pipeR)
amCandlestick(data = data_candleStick2, horiz = TRUE) \%>>\%
setProperties(precision = 2)
}
}
\references{
See online documentation \url{https://datastorm-open.github.io/introduction_ramcharts/}
and \link{amChartsAPI}
}
\seealso{
\link{amOptions}, \link{amBarplot}, \link{amBoxplot}, \link{amHist}, \link{amPie},
\link{amPlot}, \link{amTimeSeries}, \link{amStockMultiSet}, \link{amBullet}, \link{amRadar},
\link{amWind}, \link{amFunnel}, \link{amAngularGauge}, \link{amSolidGauge}, \link{amMekko},
\link{amCandlestick}, \link{amFloatingBar}, \link{amOHLC}, \link{amWaterfall}
}
|
/man/amCandlestick.Rd
|
no_license
|
aghozlane/rAmCharts
|
R
| false
| true
| 3,847
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chart_amCandlestick.R
\name{amCandlestick}
\alias{amCandlestick}
\title{Plotting candlestick chart using rAmCharts}
\usage{
amCandlestick(data, xlab = "", ylab = "", horiz = FALSE,
positiveColor = "#7f8da9", negativeColor = "#db4c3c", names = c("low",
"open", "close", "high"), dataDateFormat = NULL,
minPeriod = ifelse(!is.null(dataDateFormat), "DD", ""), ...)
}
\arguments{
\item{data}{\code{data.frame}, dataframe with at least 5 columns:
category, open (numeric), close (numeric), low (numeric),
high (numeric). See \link{data_candleStick1} and \link{data_candleStick2}.}
\item{xlab}{\code{character}, label for x-axis.}
\item{ylab}{\code{character}, label for y-axis.}
\item{horiz}{\code{logical}, TRUE for an horizontal chart, FALSE for a vertical one}
\item{positiveColor}{\code{character}, color for positive values (in hexadecimal).}
\item{negativeColor}{\code{character}, color for negative values (in hexadecimal).}
\item{names}{\code{character}, names for the tooltip. Default set to c("low", "open", "close", "high").}
\item{dataDateFormat}{\code{character}, default set to NULL. Even if your chart parses dates,
you can pass them as strings in your dataframe -
all you need to do is to set data date format and the chart will parse dates to date objects.
Check this page for available formats.
Please note that two-digit years (YY) as well as literal month names (MMM) are NOT supported in this setting.}
\item{minPeriod}{\code{character}, minPeriod Specifies the shortest period of your data.
This should be set only if dataDateFormat is not NULL.
Possible period values:
fff - milliseconds, ss - seconds, mm - minutes, hh - hours, DD - days, MM - months, YYYY - years.
It's also possible to supply a number for increments, i.e. '15mm'
which will instruct the chart that your data is supplied in 15 minute increments.}
\item{...}{see \code{\link{amOptions}} for more options.}
}
\description{
amCandlestick computes a candlestick chart of the given value.
}
\examples{
data("data_candleStick2")
amCandlestick(data = data_candleStick2)
\donttest{
# Change colors
amCandlestick(data = data_candleStick2, positiveColor = "black", negativeColor = "green")
# Naming the axes
amCandlestick(data = data_candleStick2, xlab = "categories", ylab = "values")
# Rotate the labels for x axis
amCandlestick(data = data_candleStick2, labelRotation = 90)
# Change names
amCandlestick(data = data_candleStick2, names = c("min", "begin", "end", "max"))
# Horizontal chart :
amCandlestick(data = data_candleStick2, horiz = TRUE)
# Parse date
amCandlestick(data = data_candleStick2, dataDateFormat = "YYYY-MM-DD")
# Datas over months
data_candleStick2$category <- c("2015-01-01", "2015-02-01", "2015-03-01",
"2015-04-01", "2015-05-01", "2015-06-01",
"2015-07-01", "2015-08-01", "2015-09-01",
"2015-10-01", "2015-11-01", "2015-12-01")
amCandlestick(data = data_candleStick2, dataDateFormat = "YYYY-MM-DD", minPeriod = "MM")
# Decimal precision
require(pipeR)
amCandlestick(data = data_candleStick2, horiz = TRUE) \%>>\%
setProperties(precision = 2)
}
}
\references{
See online documentation \url{https://datastorm-open.github.io/introduction_ramcharts/}
and \link{amChartsAPI}
}
\seealso{
\link{amOptions}, \link{amBarplot}, \link{amBoxplot}, \link{amHist}, \link{amPie},
\link{amPlot}, \link{amTimeSeries}, \link{amStockMultiSet}, \link{amBullet}, \link{amRadar},
\link{amWind}, \link{amFunnel}, \link{amAngularGauge}, \link{amSolidGauge}, \link{amMekko},
\link{amCandlestick}, \link{amFloatingBar}, \link{amOHLC}, \link{amWaterfall}
}
|
library(dplyr)
library(lubridate)
mydata <- read.csv("household_power_consumption.txt", sep=";")
startDate <- ymd("2007-02-01")
endDate <- ymd("2007-02-03")
mydata <- mutate(mydata, DateTime = dmy_hms(paste(as.character(Date), " ", as.character(Time))))
mydata <- filter(mydata, DateTime >= startDate)
mydata <- filter(mydata, DateTime < endDate)
x <- strptime(mydata$DateTime,"%Y-%m-%d %H:%M:%S")
png("plot3.png", width=480, height=480)
plot(x, as.numeric(as.character(mydata$Sub_metering_1)), type="l", xlab="", ylab="Energy sub metering", col="black")
lines(x, as.numeric(as.character(mydata$Sub_metering_2)), xlab="", ylab="Energy sub metering", col="red")
lines(x, as.numeric(as.character(mydata$Sub_metering_3)), xlab="", ylab="Energy sub metering", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), col=c("black","red","blue"), cex=0.6)
dev.off()
|
/plot3.R
|
no_license
|
lcheeme1/ExData_Plotting1
|
R
| false
| false
| 914
|
r
|
library(dplyr)
library(lubridate)
mydata <- read.csv("household_power_consumption.txt", sep=";")
startDate <- ymd("2007-02-01")
endDate <- ymd("2007-02-03")
mydata <- mutate(mydata, DateTime = dmy_hms(paste(as.character(Date), " ", as.character(Time))))
mydata <- filter(mydata, DateTime >= startDate)
mydata <- filter(mydata, DateTime < endDate)
x <- strptime(mydata$DateTime,"%Y-%m-%d %H:%M:%S")
png("plot3.png", width=480, height=480)
plot(x, as.numeric(as.character(mydata$Sub_metering_1)), type="l", xlab="", ylab="Energy sub metering", col="black")
lines(x, as.numeric(as.character(mydata$Sub_metering_2)), xlab="", ylab="Energy sub metering", col="red")
lines(x, as.numeric(as.character(mydata$Sub_metering_3)), xlab="", ylab="Energy sub metering", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), col=c("black","red","blue"), cex=0.6)
dev.off()
|
####Week 2 Statistical Linear Regression MOdels
##Least squares is an estimation tool
##Consider developing a probabilistic model for linear regression
## Yi=b0+b1*Xi+Ei
## E are assumed iid N(0,sigmal^2)
## Error term -- maybe considered as missing variables in model
##Note
##E[Yi|Xi=xi]=mui=b0+b1*xi
##Var(Yi|Xi=xi)=sigma^2
|
/Week 2/1_StatisticalLinearRegressionModel.R
|
no_license
|
hd1812/Regression_Models
|
R
| false
| false
| 344
|
r
|
####Week 2 Statistical Linear Regression MOdels
##Least squares is an estimation tool
##Consider developing a probabilistic model for linear regression
## Yi=b0+b1*Xi+Ei
## E are assumed iid N(0,sigmal^2)
## Error term -- maybe considered as missing variables in model
##Note
##E[Yi|Xi=xi]=mui=b0+b1*xi
##Var(Yi|Xi=xi)=sigma^2
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biopaxToCytoscape.R
\name{getPublicationRefs}
\alias{getPublicationRefs}
\title{getPublicationRefs()}
\usage{
getPublicationRefs(df, tdf)
}
\arguments{
\item{df}{the main biopax data frame}
\item{tdf}{a subsegment of the biopax data frame from which citations and citation dates are to be found. Must have columns "property", "property_value" and "id".}
}
\value{
data frame with columns "id" "citation" "date"
}
\description{
getPublicationRefs()
}
|
/man/getPublicationRefs.Rd
|
no_license
|
biodev/packageDir
|
R
| false
| true
| 535
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biopaxToCytoscape.R
\name{getPublicationRefs}
\alias{getPublicationRefs}
\title{getPublicationRefs()}
\usage{
getPublicationRefs(df, tdf)
}
\arguments{
\item{df}{the main biopax data frame}
\item{tdf}{a subsegment of the biopax data frame from which citations and citation dates are to be found. Must have columns "property", "property_value" and "id".}
}
\value{
data frame with columns "id" "citation" "date"
}
\description{
getPublicationRefs()
}
|
# Copyright 2018 Observational Health Data Sciences and Informatics
#
# This file is part of finalWoo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Execute the Study
#'
#' @details
#' This function executes the finalWoo Study.
#'
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in the
#' DatabaseConnector package.
#' @param cdmDatabaseSchema Schema name where your patient-level data in OMOP CDM format resides.
#' Note that for SQL Server, this should include both the database and
#' schema name, for example 'cdm_data.dbo'.
#' @param cdmDatabaseName Shareable name of the database
#' @param cohortDatabaseSchema Schema name where intermediate data can be stored. You will need to have
#' write priviliges in this schema. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param cohortTable The name of the table that will be created in the work database schema.
#' This table will hold the target population cohorts used in this
#' study.
#' @param oracleTempSchema Should be used in Oracle to specify a schema where the user has write
#' priviliges for storing temporary tables.
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/). Do not use a folder on a network drive since this greatly impacts
#' performance.
#' @param createProtocol Creates a protocol based on the analyses specification
#' @param createCohorts Create the cohortTable table with the target population and outcome cohorts?
#' @param runAnalyses Run the model development
#' @param createResultsDoc Create a document containing the results of each prediction
#' @param createValidationPackage Create a package for sharing the models
#' @param analysesToValidate A vector of analysis ids (e.g., c(1,3,10)) specifying which analysese to export into validation package. Default is NULL and all are exported.
#' @param packageResults Should results be packaged for later sharing?
#' @param minCellCount The minimum number of subjects contributing to a count before it can be included
#' in packaged results.
#' @param createShiny Create a shiny app with the results
#' @param createJournalDocument Do you want to create a template journal document populated with results?
#' @param analysisIdDocument Which Analysis_id do you want to create the document for?
#' @param verbosity Sets the level of the verbosity. If the log level is at or higher in priority than the logger threshold, a message will print. The levels are:
#' \itemize{
#' \item{DEBUG}{Highest verbosity showing all debug statements}
#' \item{TRACE}{Showing information about start and end of steps}
#' \item{INFO}{Show informative information (Default)}
#' \item{WARN}{Show warning messages}
#' \item{ERROR}{Show error messages}
#' \item{FATAL}{Be silent except for fatal errors}
#' }
#' @param cdmVersion The version of the common data model
#'
#' @examples
#' \dontrun{
#' connectionDetails <- createConnectionDetails(dbms = "postgresql",
#' user = "joe",
#' password = "secret",
#' server = "myserver")
#'
#' execute(connectionDetails,
#' cdmDatabaseSchema = "cdm_data",
#' cdmDatabaseName = 'shareable name of the database'
#' cohortDatabaseSchema = "study_results",
#' cohortTable = "cohort",
#' oracleTempSchema = NULL,
#' outputFolder = "c:/temp/study_results",
#' createProtocol = T,
#' createCohorts = T,
#' runAnalyses = T,
#' createResultsDoc = T,
#' createValidationPackage = T,
#' packageResults = F,
#' minCellCount = 5,
#' createShiny = F,
#' verbosity = "INFO",
#' cdmVersion = 5)
#' }
#'
#' @export
execute <- function(connectionDetails,
cdmDatabaseSchema,
cdmDatabaseName = 'friendly database name',
cohortDatabaseSchema = cdmDatabaseSchema,
cohortTable = "cohort",
oracleTempSchema = cohortDatabaseSchema,
outputFolder,
createProtocol = F,
createCohorts = F,
runAnalyses = F,
createResultsDoc = F,
createValidationPackage = F,
analysesToValidate = NULL,
packageResults = F,
minCellCount= 5,
createShiny = F,
createJournalDocument = F,
analysisIdDocument = 1,
verbosity = "INFO",
cdmVersion = 5) {
if (!file.exists(outputFolder))
dir.create(outputFolder, recursive = TRUE)
ParallelLogger::addDefaultFileLogger(file.path(outputFolder, "log.txt"))
if(createProtocol){
createPlpProtocol(outputFolder)
}
if (createCohorts) {
ParallelLogger::logInfo("Creating cohorts")
createCohorts(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = oracleTempSchema,
outputFolder = outputFolder)
}
if(runAnalyses){
ParallelLogger::logInfo("Running predictions")
predictionAnalysisListFile <- system.file("settings",
"predictionAnalysisList.json",
package = "finalWoo")
predictionAnalysisList <- PatientLevelPrediction::loadPredictionAnalysisList(predictionAnalysisListFile)
predictionAnalysisList$connectionDetails = connectionDetails
predictionAnalysisList$cdmDatabaseSchema = cdmDatabaseSchema
predictionAnalysisList$cdmDatabaseName = cdmDatabaseName
predictionAnalysisList$oracleTempSchema = oracleTempSchema
predictionAnalysisList$cohortDatabaseSchema = cohortDatabaseSchema
predictionAnalysisList$cohortTable = cohortTable
predictionAnalysisList$outcomeDatabaseSchema = cohortDatabaseSchema
predictionAnalysisList$outcomeTable = cohortTable
predictionAnalysisList$cdmVersion = cdmVersion
predictionAnalysisList$outputFolder = outputFolder
predictionAnalysisList$verbosity = verbosity
result <- do.call(PatientLevelPrediction::runPlpAnalyses, predictionAnalysisList)
}
if (packageResults) {
ParallelLogger::logInfo("Packaging results")
packageResults(outputFolder = outputFolder,
minCellCount = minCellCount)
}
if(createResultsDoc){
createMultiPlpReport(analysisLocation=outputFolder,
protocolLocation = file.path(outputFolder,'protocol.docx'),
includeModels = F)
}
if(createValidationPackage){
predictionAnalysisListFile <- system.file("settings",
"predictionAnalysisList.json",
package = "finalWoo")
jsonSettings <- tryCatch({Hydra::loadSpecifications(file=predictionAnalysisListFile)},
error=function(cond) {
stop('Issue with json file...')
})
pn <- jsonlite::fromJSON(jsonSettings)$packageName
jsonSettings <- gsub(pn,paste0(pn,'Validation'),jsonSettings)
jsonSettings <- gsub('PatientLevelPredictionStudy','PatientLevelPredictionValidationStudy',jsonSettings)
createValidationPackage(modelFolder = outputFolder,
outputFolder = file.path(outputFolder, paste0(pn,'Validation')),
minCellCount = minCellCount,
databaseName = cdmDatabaseName,
jsonSettings = jsonSettings,
analysisIds = analysesToValidate)
}
if (createShiny) {
populateShinyApp(resultDirectory = outputFolder,
minCellCount = minCellCount,
databaseName = cdmDatabaseName)
}
if(createJournalDocument){
predictionAnalysisListFile <- system.file("settings",
"predictionAnalysisList.json",
package = "finalWoo")
jsonSettings <- tryCatch({Hydra::loadSpecifications(file=predictionAnalysisListFile)},
error=function(cond) {
stop('Issue with json file...')
})
pn <- jsonlite::fromJSON(jsonSettings)
createJournalDocument(resultDirectory = outputFolder,
analysisId = analysisIdDocument,
includeValidation = T,
cohortIds = pn$cohortDefinitions$id,
cohortNames = pn$cohortDefinitions$name)
}
invisible(NULL)
}
|
/finalWoo/R/Main.R
|
no_license
|
OHDSI/StudyProtocols
|
R
| false
| false
| 10,539
|
r
|
# Copyright 2018 Observational Health Data Sciences and Informatics
#
# This file is part of finalWoo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Execute the Study
#'
#' @details
#' This function executes the finalWoo Study.
#'
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in the
#' DatabaseConnector package.
#' @param cdmDatabaseSchema Schema name where your patient-level data in OMOP CDM format resides.
#' Note that for SQL Server, this should include both the database and
#' schema name, for example 'cdm_data.dbo'.
#' @param cdmDatabaseName Shareable name of the database
#' @param cohortDatabaseSchema Schema name where intermediate data can be stored. You will need to have
#' write priviliges in this schema. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param cohortTable The name of the table that will be created in the work database schema.
#' This table will hold the target population cohorts used in this
#' study.
#' @param oracleTempSchema Should be used in Oracle to specify a schema where the user has write
#' priviliges for storing temporary tables.
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/). Do not use a folder on a network drive since this greatly impacts
#' performance.
#' @param createProtocol Creates a protocol based on the analyses specification
#' @param createCohorts Create the cohortTable table with the target population and outcome cohorts?
#' @param runAnalyses Run the model development
#' @param createResultsDoc Create a document containing the results of each prediction
#' @param createValidationPackage Create a package for sharing the models
#' @param analysesToValidate A vector of analysis ids (e.g., c(1,3,10)) specifying which analysese to export into validation package. Default is NULL and all are exported.
#' @param packageResults Should results be packaged for later sharing?
#' @param minCellCount The minimum number of subjects contributing to a count before it can be included
#' in packaged results.
#' @param createShiny Create a shiny app with the results
#' @param createJournalDocument Do you want to create a template journal document populated with results?
#' @param analysisIdDocument Which Analysis_id do you want to create the document for?
#' @param verbosity Sets the level of the verbosity. If the log level is at or higher in priority than the logger threshold, a message will print. The levels are:
#' \itemize{
#' \item{DEBUG}{Highest verbosity showing all debug statements}
#' \item{TRACE}{Showing information about start and end of steps}
#' \item{INFO}{Show informative information (Default)}
#' \item{WARN}{Show warning messages}
#' \item{ERROR}{Show error messages}
#' \item{FATAL}{Be silent except for fatal errors}
#' }
#' @param cdmVersion The version of the common data model
#'
#' @examples
#' \dontrun{
#' connectionDetails <- createConnectionDetails(dbms = "postgresql",
#' user = "joe",
#' password = "secret",
#' server = "myserver")
#'
#' execute(connectionDetails,
#' cdmDatabaseSchema = "cdm_data",
#' cdmDatabaseName = 'shareable name of the database'
#' cohortDatabaseSchema = "study_results",
#' cohortTable = "cohort",
#' oracleTempSchema = NULL,
#' outputFolder = "c:/temp/study_results",
#' createProtocol = T,
#' createCohorts = T,
#' runAnalyses = T,
#' createResultsDoc = T,
#' createValidationPackage = T,
#' packageResults = F,
#' minCellCount = 5,
#' createShiny = F,
#' verbosity = "INFO",
#' cdmVersion = 5)
#' }
#'
#' @export
execute <- function(connectionDetails,
cdmDatabaseSchema,
cdmDatabaseName = 'friendly database name',
cohortDatabaseSchema = cdmDatabaseSchema,
cohortTable = "cohort",
oracleTempSchema = cohortDatabaseSchema,
outputFolder,
createProtocol = F,
createCohorts = F,
runAnalyses = F,
createResultsDoc = F,
createValidationPackage = F,
analysesToValidate = NULL,
packageResults = F,
minCellCount= 5,
createShiny = F,
createJournalDocument = F,
analysisIdDocument = 1,
verbosity = "INFO",
cdmVersion = 5) {
if (!file.exists(outputFolder))
dir.create(outputFolder, recursive = TRUE)
ParallelLogger::addDefaultFileLogger(file.path(outputFolder, "log.txt"))
if(createProtocol){
createPlpProtocol(outputFolder)
}
if (createCohorts) {
ParallelLogger::logInfo("Creating cohorts")
createCohorts(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = oracleTempSchema,
outputFolder = outputFolder)
}
if(runAnalyses){
ParallelLogger::logInfo("Running predictions")
predictionAnalysisListFile <- system.file("settings",
"predictionAnalysisList.json",
package = "finalWoo")
predictionAnalysisList <- PatientLevelPrediction::loadPredictionAnalysisList(predictionAnalysisListFile)
predictionAnalysisList$connectionDetails = connectionDetails
predictionAnalysisList$cdmDatabaseSchema = cdmDatabaseSchema
predictionAnalysisList$cdmDatabaseName = cdmDatabaseName
predictionAnalysisList$oracleTempSchema = oracleTempSchema
predictionAnalysisList$cohortDatabaseSchema = cohortDatabaseSchema
predictionAnalysisList$cohortTable = cohortTable
predictionAnalysisList$outcomeDatabaseSchema = cohortDatabaseSchema
predictionAnalysisList$outcomeTable = cohortTable
predictionAnalysisList$cdmVersion = cdmVersion
predictionAnalysisList$outputFolder = outputFolder
predictionAnalysisList$verbosity = verbosity
result <- do.call(PatientLevelPrediction::runPlpAnalyses, predictionAnalysisList)
}
if (packageResults) {
ParallelLogger::logInfo("Packaging results")
packageResults(outputFolder = outputFolder,
minCellCount = minCellCount)
}
if(createResultsDoc){
createMultiPlpReport(analysisLocation=outputFolder,
protocolLocation = file.path(outputFolder,'protocol.docx'),
includeModels = F)
}
if(createValidationPackage){
predictionAnalysisListFile <- system.file("settings",
"predictionAnalysisList.json",
package = "finalWoo")
jsonSettings <- tryCatch({Hydra::loadSpecifications(file=predictionAnalysisListFile)},
error=function(cond) {
stop('Issue with json file...')
})
pn <- jsonlite::fromJSON(jsonSettings)$packageName
jsonSettings <- gsub(pn,paste0(pn,'Validation'),jsonSettings)
jsonSettings <- gsub('PatientLevelPredictionStudy','PatientLevelPredictionValidationStudy',jsonSettings)
createValidationPackage(modelFolder = outputFolder,
outputFolder = file.path(outputFolder, paste0(pn,'Validation')),
minCellCount = minCellCount,
databaseName = cdmDatabaseName,
jsonSettings = jsonSettings,
analysisIds = analysesToValidate)
}
if (createShiny) {
populateShinyApp(resultDirectory = outputFolder,
minCellCount = minCellCount,
databaseName = cdmDatabaseName)
}
if(createJournalDocument){
predictionAnalysisListFile <- system.file("settings",
"predictionAnalysisList.json",
package = "finalWoo")
jsonSettings <- tryCatch({Hydra::loadSpecifications(file=predictionAnalysisListFile)},
error=function(cond) {
stop('Issue with json file...')
})
pn <- jsonlite::fromJSON(jsonSettings)
createJournalDocument(resultDirectory = outputFolder,
analysisId = analysisIdDocument,
includeValidation = T,
cohortIds = pn$cohortDefinitions$id,
cohortNames = pn$cohortDefinitions$name)
}
invisible(NULL)
}
|
#load in lubridate
library(lubridate)
#read in streamflow data
datH <- read.csv("activity5/stream_flow_data.csv",
na.strings = c("Eqp"))
head(datH)
#read in precipitation data
#hourly precipitation is in mm
datP <- read.csv("activity5/2049867.csv")
head(datP)
#only use most reliable measurements
datD <- datH[datH$discharge.flag == "A",]
#### define time for streamflow #####
#convert date and time
datesD <- as.Date(datD$date, "%m/%d/%Y")
#get day of year
datD$doy <- yday(datesD)
#calculate year
datD$year <- year(datesD)
#define time
timesD <- hm(datD$time)
#### define time for precipitation #####
dateP <- ymd_hm(datP$DATE)
#get day of year
datP$doy <- yday(dateP)
#get year
datP$year <- year(dateP)
#### get decimal formats #####
#convert time from a string to a more usable format
#with a decimal hour
datD$hour <- hour(timesD ) + (minute(timesD )/60)
#get full decimal time
datD$decDay <- datD$doy + (datD$hour/24)
#calculate a decimal year, but account for leap year
datD$decYear <- datD$year + (datD$doy-1)/365
#calculate times for datP
datP$hour <- hour(dateP ) + (minute(dateP )/60)
#get full decimal time
datP$decDay <- datP$doy + (datP$hour/24)
#calculate a decimal year, but account for leap year
datP$decYear <- ifelse(leap_year(datP$year),datP$year + (datP$decDay/366),
datP$year + (datP$decDay/365))
#plot discharge
plot(datD$decYear, datD$discharge, type="l", xlab="Year", ylab=expression(paste("Discharge ft"^"3 ","sec"^"-1")))
# number of observations
nrow(datD)
nrow(datP)
#basic formatting
aveF <- aggregate(datD$discharge, by=list(datD$doy), FUN="mean")
colnames(aveF) <- c("doy","dailyAve")
sdF <- aggregate(datD$discharge, by=list(datD$doy), FUN="sd")
colnames(sdF) <- c("doy","dailySD")
#start new plot
dev.new(width=8,height=8)
#bigger margins
par(mai=c(1,1,1,1))
#make plot
plot(aveF$doy,aveF$dailyAve,
type="l",
xlab="Year",
ylab=expression(paste("Discharge ft"^"3 ","sec"^"-1")),
lwd=2,
ylim=c(0,90),
xaxs="i", yaxs ="i",#remove gaps from axes
axes=FALSE)#no axes
polygon(c(aveF$doy, rev(aveF$doy)),#x coordinates
c(aveF$dailyAve-sdF$dailySD,rev(aveF$dailyAve+sdF$dailySD)),#ycoord
col=rgb(0.392, 0.584, 0.929,.2), #color that is semi-transparent
border=NA#no border
)
axis(1, seq(0,360, by=40), #tick intervals
lab=seq(0,360, by=40)) #tick labels
axis(2, seq(0,80, by=20),
seq(0,80, by=20),
las = 2)#show ticks at 90 degree angle
legend("topright", c("mean","1 standard deviation"), #legend items
lwd=c(2,NA),#lines
col=c("black",rgb(0.392, 0.584, 0.929,.2)),#colors
pch=c(NA,15),#symbols
bty="n")#no legend border
##### QUESTION 5
library(dplyr)
#bigger margins
par(mai=c(1,1,1,1))
#make plot
plot(aveF$doy,aveF$dailyAve,
type="l",
xlab="Month",
ylab=expression(paste("Discharge ft"^"3 ","sec"^"-1")),
lwd=2,
ylim=c(0,170),
xaxs="i", yaxs ="i",#remove gaps from axes
axes=FALSE)#no axes
lines(filter(datD,year==2017)$doy,filter(datD,year==2017)$discharge,col="red") # add
polygon(c(aveF$doy, rev(aveF$doy)),#x coordinates
c(aveF$dailyAve-sdF$dailySD,rev(aveF$dailyAve+sdF$dailySD)),#ycoord
col=rgb(0.392, 0.584, 0.929,.2), #color that is semi-transparent
border=NA#no border
)
axis(1, seq(0,360, by=30), #tick intervals
lab=seq(0,12, by=1)) #tick labels
axis(2, seq(0,160, by=20),
seq(0,160, by=20),
las = 2)#show ticks at 90 degree angle
legend("topright", c("mean","1 standard deviation","2017"), #legend items
lwd=c(2,NA,2),#lines
col=c("black",rgb(0.392, 0.584, 0.929,.2),"red"),#colors
pch=c(NA,15,NA),#symbols
bty="n")#no legend border
##### what days have full precip
datP2 <- datP %>%
group_by(doy, year) %>%
add_count() %>%
mutate(fullPrecip = n >= 24)
fullDays <- datP2 %>%
select(doy,year,fullPrecip) %>%
distinct()
datD2 <- left_join(datD,fullDays)
# make plot
par(mai=c(1,1,1,1))
plot(datD2$decYear, datD2$discharge, type="l", xlab="Year", ylab=expression(paste("Discharge ft"^"3 ","sec"^"-1")))
points(filter(datD2,fullPrecip)$decYear,filter(datD2,fullPrecip)$discharge,col="red",pch=20)
legend("topright", c("full precip. available"), #legend items
lwd=c(NA),#lines
col=c("red"),#colors
pch=c(20),#symbols
bty="n")#no legend border
#subsest discharge and precipitation within range of interest
hydroD <- datD[datD$doy >= 248 & datD$doy < 250 & datD$year == 2011,]
hydroP <- datP[datP$doy >= 248 & datP$doy < 250 & datP$year == 2011,]
min(hydroD$discharge)
#get minimum and maximum range of discharge to plot
#go outside of the range so that it's easy to see high/low values
#floor rounds down the integer
yl <- floor(min(hydroD$discharge))-1
#celing rounds up to the integer
yh <- ceiling(max(hydroD$discharge))+1
#minimum and maximum range of precipitation to plot
pl <- 0
pm <- ceiling(max(hydroP$HPCP))+.5
#scale precipitation to fit on the
hydroP$pscale <- (((yh-yl)/(pm-pl)) * hydroP$HPCP) + yl
par(mai=c(1,1,1,1))
#make plot of discharge
plot(hydroD$decDay,
hydroD$discharge,
type="l",
ylim=c(yl,yh),
lwd=2,
xlab="Day of year",
ylab=expression(paste("Discharge ft"^"3 ","sec"^"-1")))
#add bars to indicate precipitation
for(i in 1:nrow(hydroP)){
polygon(c(hydroP$decDay[i]-0.017,hydroP$decDay[i]-0.017,
hydroP$decDay[i]+0.017,hydroP$decDay[i]+0.017),
c(yl,hydroP$pscale[i],hydroP$pscale[i],yl),
col=rgb(0.392, 0.584, 0.929,.2), border=NA)
}
## look for winter days with full precip
datD2 %>% filter(fullPrecip,year==2012) %>% arrange(-doy)
## 2012, doy = 362 looks like good option
#subsest discharge and precipitation within range of interest
hydroD <- datD[datD$doy >= 361 & datD$doy < 363 & datD$year == 2012,]
hydroP <- datP[datP$doy >= 361 & datP$doy < 363 & datP$year == 2012,]
min(hydroD$discharge)
#get minimum and maximum range of discharge to plot
#go outside of the range so that it's easy to see high/low values
#floor rounds down the integer
yl <- floor(min(hydroD$discharge))-1
#celing rounds up to the integer
yh <- ceiling(max(hydroD$discharge))+1
#minimum and maximum range of precipitation to plot
pl <- 0
pm <- ceiling(max(hydroP$HPCP))+.5
#scale precipitation to fit on the
hydroP$pscale <- (((yh-yl)/(pm-pl)) * hydroP$HPCP) + yl
par(mai=c(1,1,1,1))
#make plot of discharge
plot(hydroD$decDay,
hydroD$discharge,
type="l",
ylim=c(yl,yh),
lwd=2,
xlab="Day of year",
ylab=expression(paste("Discharge ft"^"3 ","sec"^"-1")))
#add bars to indicate precipitation
for(i in 1:nrow(hydroP)){
polygon(c(hydroP$decDay[i]-0.017,hydroP$decDay[i]-0.017,
hydroP$decDay[i]+0.017,hydroP$decDay[i]+0.017),
c(yl,hydroP$pscale[i],hydroP$pscale[i],yl),
col=rgb(0.392, 0.584, 0.929,.2), border=NA)
}
library(ggplot2)
#specify year as a factor
datD$yearPlot <- as.factor(datD$year)
#make a boxplot
ggplot(data= datD, aes(yearPlot,discharge)) +
geom_boxplot()
#make a violin plot
ggplot(data= datD, aes(yearPlot,discharge)) +
geom_violin()
## seasons plot
### WINTER - DEC. 1ST TO FEBRUARY 28TH
### SPRING - MARCH 1ST TO MAY 31ST
### SUMMER - JUNE 1ST TO AUGUST 31ST
### AUTUMN - SEPTEMBER 1ST TO NOVEMBER 30TH
library(tidyr)
datD3 <- datD2 %>%
separate(date,into=c("month","day","year"),sep="/") %>%
mutate(season = case_when(month %in% c("12","1","2") ~ "WINTER",
month %in% c("3","4","5") ~ "SPRING",
month %in% c("6","7","8") ~ "SUMMER",
month %in% c("9","10","11") ~ "AUTUMN"
))
datD3 %>%
filter(year %in% c(2016,2017)) %>%
ggplot(aes(x=season,y=discharge)) +
geom_violin(aes(fill=season,color=season)) +
facet_wrap(~year) +
theme_grey() +
labs(x = "Season",
y = expression(paste("Discharge ft"^"3 ","sec"^"-1")),
fill = "Season",
color = "Season"
)
|
/activity5/activity5_script.R
|
no_license
|
CaioBrighenti/GEOG331
|
R
| false
| false
| 8,289
|
r
|
#load in lubridate
library(lubridate)
#read in streamflow data
datH <- read.csv("activity5/stream_flow_data.csv",
na.strings = c("Eqp"))
head(datH)
#read in precipitation data
#hourly precipitation is in mm
datP <- read.csv("activity5/2049867.csv")
head(datP)
#only use most reliable measurements
datD <- datH[datH$discharge.flag == "A",]
#### define time for streamflow #####
#convert date and time
datesD <- as.Date(datD$date, "%m/%d/%Y")
#get day of year
datD$doy <- yday(datesD)
#calculate year
datD$year <- year(datesD)
#define time
timesD <- hm(datD$time)
#### define time for precipitation #####
dateP <- ymd_hm(datP$DATE)
#get day of year
datP$doy <- yday(dateP)
#get year
datP$year <- year(dateP)
#### get decimal formats #####
#convert time from a string to a more usable format
#with a decimal hour
datD$hour <- hour(timesD ) + (minute(timesD )/60)
#get full decimal time
datD$decDay <- datD$doy + (datD$hour/24)
#calculate a decimal year, but account for leap year
datD$decYear <- datD$year + (datD$doy-1)/365
#calculate times for datP
datP$hour <- hour(dateP ) + (minute(dateP )/60)
#get full decimal time
datP$decDay <- datP$doy + (datP$hour/24)
#calculate a decimal year, but account for leap year
datP$decYear <- ifelse(leap_year(datP$year),datP$year + (datP$decDay/366),
datP$year + (datP$decDay/365))
#plot discharge
plot(datD$decYear, datD$discharge, type="l", xlab="Year", ylab=expression(paste("Discharge ft"^"3 ","sec"^"-1")))
# number of observations
nrow(datD)
nrow(datP)
#basic formatting
aveF <- aggregate(datD$discharge, by=list(datD$doy), FUN="mean")
colnames(aveF) <- c("doy","dailyAve")
sdF <- aggregate(datD$discharge, by=list(datD$doy), FUN="sd")
colnames(sdF) <- c("doy","dailySD")
#start new plot
dev.new(width=8,height=8)
#bigger margins
par(mai=c(1,1,1,1))
#make plot
plot(aveF$doy,aveF$dailyAve,
type="l",
xlab="Year",
ylab=expression(paste("Discharge ft"^"3 ","sec"^"-1")),
lwd=2,
ylim=c(0,90),
xaxs="i", yaxs ="i",#remove gaps from axes
axes=FALSE)#no axes
polygon(c(aveF$doy, rev(aveF$doy)),#x coordinates
c(aveF$dailyAve-sdF$dailySD,rev(aveF$dailyAve+sdF$dailySD)),#ycoord
col=rgb(0.392, 0.584, 0.929,.2), #color that is semi-transparent
border=NA#no border
)
axis(1, seq(0,360, by=40), #tick intervals
lab=seq(0,360, by=40)) #tick labels
axis(2, seq(0,80, by=20),
seq(0,80, by=20),
las = 2)#show ticks at 90 degree angle
legend("topright", c("mean","1 standard deviation"), #legend items
lwd=c(2,NA),#lines
col=c("black",rgb(0.392, 0.584, 0.929,.2)),#colors
pch=c(NA,15),#symbols
bty="n")#no legend border
##### QUESTION 5
library(dplyr)
#bigger margins
par(mai=c(1,1,1,1))
#make plot
plot(aveF$doy,aveF$dailyAve,
type="l",
xlab="Month",
ylab=expression(paste("Discharge ft"^"3 ","sec"^"-1")),
lwd=2,
ylim=c(0,170),
xaxs="i", yaxs ="i",#remove gaps from axes
axes=FALSE)#no axes
lines(filter(datD,year==2017)$doy,filter(datD,year==2017)$discharge,col="red") # add
polygon(c(aveF$doy, rev(aveF$doy)),#x coordinates
c(aveF$dailyAve-sdF$dailySD,rev(aveF$dailyAve+sdF$dailySD)),#ycoord
col=rgb(0.392, 0.584, 0.929,.2), #color that is semi-transparent
border=NA#no border
)
axis(1, seq(0,360, by=30), #tick intervals
lab=seq(0,12, by=1)) #tick labels
axis(2, seq(0,160, by=20),
seq(0,160, by=20),
las = 2)#show ticks at 90 degree angle
legend("topright", c("mean","1 standard deviation","2017"), #legend items
lwd=c(2,NA,2),#lines
col=c("black",rgb(0.392, 0.584, 0.929,.2),"red"),#colors
pch=c(NA,15,NA),#symbols
bty="n")#no legend border
##### what days have full precip
datP2 <- datP %>%
group_by(doy, year) %>%
add_count() %>%
mutate(fullPrecip = n >= 24)
fullDays <- datP2 %>%
select(doy,year,fullPrecip) %>%
distinct()
datD2 <- left_join(datD,fullDays)
# make plot
par(mai=c(1,1,1,1))
plot(datD2$decYear, datD2$discharge, type="l", xlab="Year", ylab=expression(paste("Discharge ft"^"3 ","sec"^"-1")))
points(filter(datD2,fullPrecip)$decYear,filter(datD2,fullPrecip)$discharge,col="red",pch=20)
legend("topright", c("full precip. available"), #legend items
lwd=c(NA),#lines
col=c("red"),#colors
pch=c(20),#symbols
bty="n")#no legend border
#subsest discharge and precipitation within range of interest
hydroD <- datD[datD$doy >= 248 & datD$doy < 250 & datD$year == 2011,]
hydroP <- datP[datP$doy >= 248 & datP$doy < 250 & datP$year == 2011,]
min(hydroD$discharge)
#get minimum and maximum range of discharge to plot
#go outside of the range so that it's easy to see high/low values
#floor rounds down the integer
yl <- floor(min(hydroD$discharge))-1
#celing rounds up to the integer
yh <- ceiling(max(hydroD$discharge))+1
#minimum and maximum range of precipitation to plot
pl <- 0
pm <- ceiling(max(hydroP$HPCP))+.5
#scale precipitation to fit on the
hydroP$pscale <- (((yh-yl)/(pm-pl)) * hydroP$HPCP) + yl
par(mai=c(1,1,1,1))
#make plot of discharge
plot(hydroD$decDay,
hydroD$discharge,
type="l",
ylim=c(yl,yh),
lwd=2,
xlab="Day of year",
ylab=expression(paste("Discharge ft"^"3 ","sec"^"-1")))
#add bars to indicate precipitation
for(i in 1:nrow(hydroP)){
polygon(c(hydroP$decDay[i]-0.017,hydroP$decDay[i]-0.017,
hydroP$decDay[i]+0.017,hydroP$decDay[i]+0.017),
c(yl,hydroP$pscale[i],hydroP$pscale[i],yl),
col=rgb(0.392, 0.584, 0.929,.2), border=NA)
}
## look for winter days with full precip
datD2 %>% filter(fullPrecip,year==2012) %>% arrange(-doy)
## 2012, doy = 362 looks like good option
#subsest discharge and precipitation within range of interest
hydroD <- datD[datD$doy >= 361 & datD$doy < 363 & datD$year == 2012,]
hydroP <- datP[datP$doy >= 361 & datP$doy < 363 & datP$year == 2012,]
min(hydroD$discharge)
#get minimum and maximum range of discharge to plot
#go outside of the range so that it's easy to see high/low values
#floor rounds down the integer
yl <- floor(min(hydroD$discharge))-1
#celing rounds up to the integer
yh <- ceiling(max(hydroD$discharge))+1
#minimum and maximum range of precipitation to plot
pl <- 0
pm <- ceiling(max(hydroP$HPCP))+.5
#scale precipitation to fit on the
hydroP$pscale <- (((yh-yl)/(pm-pl)) * hydroP$HPCP) + yl
par(mai=c(1,1,1,1))
#make plot of discharge
plot(hydroD$decDay,
hydroD$discharge,
type="l",
ylim=c(yl,yh),
lwd=2,
xlab="Day of year",
ylab=expression(paste("Discharge ft"^"3 ","sec"^"-1")))
#add bars to indicate precipitation
for(i in 1:nrow(hydroP)){
polygon(c(hydroP$decDay[i]-0.017,hydroP$decDay[i]-0.017,
hydroP$decDay[i]+0.017,hydroP$decDay[i]+0.017),
c(yl,hydroP$pscale[i],hydroP$pscale[i],yl),
col=rgb(0.392, 0.584, 0.929,.2), border=NA)
}
library(ggplot2)
#specify year as a factor
datD$yearPlot <- as.factor(datD$year)
#make a boxplot
ggplot(data= datD, aes(yearPlot,discharge)) +
geom_boxplot()
#make a violin plot
ggplot(data= datD, aes(yearPlot,discharge)) +
geom_violin()
## seasons plot
### WINTER - DEC. 1ST TO FEBRUARY 28TH
### SPRING - MARCH 1ST TO MAY 31ST
### SUMMER - JUNE 1ST TO AUGUST 31ST
### AUTUMN - SEPTEMBER 1ST TO NOVEMBER 30TH
library(tidyr)
datD3 <- datD2 %>%
separate(date,into=c("month","day","year"),sep="/") %>%
mutate(season = case_when(month %in% c("12","1","2") ~ "WINTER",
month %in% c("3","4","5") ~ "SPRING",
month %in% c("6","7","8") ~ "SUMMER",
month %in% c("9","10","11") ~ "AUTUMN"
))
datD3 %>%
filter(year %in% c(2016,2017)) %>%
ggplot(aes(x=season,y=discharge)) +
geom_violin(aes(fill=season,color=season)) +
facet_wrap(~year) +
theme_grey() +
labs(x = "Season",
y = expression(paste("Discharge ft"^"3 ","sec"^"-1")),
fill = "Season",
color = "Season"
)
|
makeVector <- function(x = numeric()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmean <- function(mean) m <<- mean
getmean <- function() m
list(
set = set,
get = get,
setmean = setmean,
getmean = getmean
)
}
cachemean <- function(x, ...) {
m <- x$getmean()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- mean(data, ...)
x$setmean(m)
m
}
|
/example.R
|
no_license
|
ittegrat/RProgramming_Assignment2
|
R
| false
| false
| 499
|
r
|
makeVector <- function(x = numeric()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmean <- function(mean) m <<- mean
getmean <- function() m
list(
set = set,
get = get,
setmean = setmean,
getmean = getmean
)
}
cachemean <- function(x, ...) {
m <- x$getmean()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- mean(data, ...)
x$setmean(m)
m
}
|
prev_dir = setwd(system.file("tests/test_data/", package = "cancereffectsizeR"))
luad = CESAnalysis(genome = "hg19", progression_order = 1:4)
luad = load_maf(luad, maf = "luad.hg19.maf.txt", sample_col = "sample_id",
tumor_allele_col = "Tumor_Seq_Allele2", progression_col = "fake_stage")
luad = calc_baseline_mutation_rates(luad, covariate_file = "lung_pca")
saveRDS(luad, "cesa_for_snv_multi.rds")
test_genes = c("TTN", "KRAS", "RYR2", "EGFR", "TP53", "ASXL3","IFITM2")
luad = ces_snv(luad, genes = test_genes)
saveRDS(luad@selection_results, "multi_stage_snv_results.rds")
# repeat with subset of data for dndscv testing
dndscv_samples = c("sample-1", "sample-106", "sample-108", "sample-11", "sample-31", "sample-33", "sample-35",
"sample-40", "sample-46", "sample-6", "sample-67", "sample-68", "sample-7", "sample-71",
"sample-73", "sample-74", "sample-77", "sample-82", "sample-83", "sample-95", "sample-99")
maf_for_dndscv = data.table::fread("luad.hg19.maf.txt")
maf_for_dndscv = maf_for_dndscv[sample_id %in% dndscv_samples]
for_dndscv = load_maf(cesa = CESAnalysis(genome="hg19", progression_order = 1:4), maf = maf_for_dndscv, sample_col = "sample_id",
tumor_allele_col = "Tumor_Seq_Allele2", progression_col = "fake_stage")
for_dndscv = trinucleotide_mutation_weights(for_dndscv)
saveRDS(for_dndscv, "cesa_for_multi_dndscv.rds")
# long tests will actually run dNdScv; short tests will just make sure internal preprocess/postprocess functions behave as expected
dndscv_input = cancereffectsizeR:::dndscv_preprocess(cesa = for_dndscv, covariate_file = "lung_pca")
saveRDS(dndscv_input, "dndscv_input_multi.rds")
dndscv_raw_output = lapply(dndscv_input, function(x) do.call(dndscv::dndscv, x))
# a few attributes are huge (>1 GB); drop these
dndscv_raw_output = lapply(dndscv_raw_output, function(x) { x$nbreg$terms = NULL; x$nbreg$model = NULL; x$poissmodel = NULL; return(x)})
saveRDS(dndscv_raw_output, "dndscv_raw_output_multi.rds")
dndscv_out = dndscv_postprocess(cesa = for_dndscv, dndscv_raw_output = dndscv_raw_output)
sel_cv = lapply(dndscv_out@dndscv_out_list, function(x) x$sel_cv)
saveRDS(sel_cv, "sel_cv_multi.rds")
saveRDS(dndscv_out@mutrates_list, "mutrates_multi.rds")
anno_out = annotate_gene_maf(dndscv_out)
saveRDS(anno_out@annotated.snv.maf, "multi_annotated_maf_df.rds")
setwd(prev_dir)
|
/tests/generate_test_data/generate_luad_cesa_multi.R
|
no_license
|
chriscross11/cancereffectsizeR
|
R
| false
| false
| 2,404
|
r
|
prev_dir = setwd(system.file("tests/test_data/", package = "cancereffectsizeR"))
luad = CESAnalysis(genome = "hg19", progression_order = 1:4)
luad = load_maf(luad, maf = "luad.hg19.maf.txt", sample_col = "sample_id",
tumor_allele_col = "Tumor_Seq_Allele2", progression_col = "fake_stage")
luad = calc_baseline_mutation_rates(luad, covariate_file = "lung_pca")
saveRDS(luad, "cesa_for_snv_multi.rds")
test_genes = c("TTN", "KRAS", "RYR2", "EGFR", "TP53", "ASXL3","IFITM2")
luad = ces_snv(luad, genes = test_genes)
saveRDS(luad@selection_results, "multi_stage_snv_results.rds")
# repeat with subset of data for dndscv testing
dndscv_samples = c("sample-1", "sample-106", "sample-108", "sample-11", "sample-31", "sample-33", "sample-35",
"sample-40", "sample-46", "sample-6", "sample-67", "sample-68", "sample-7", "sample-71",
"sample-73", "sample-74", "sample-77", "sample-82", "sample-83", "sample-95", "sample-99")
maf_for_dndscv = data.table::fread("luad.hg19.maf.txt")
maf_for_dndscv = maf_for_dndscv[sample_id %in% dndscv_samples]
for_dndscv = load_maf(cesa = CESAnalysis(genome="hg19", progression_order = 1:4), maf = maf_for_dndscv, sample_col = "sample_id",
tumor_allele_col = "Tumor_Seq_Allele2", progression_col = "fake_stage")
for_dndscv = trinucleotide_mutation_weights(for_dndscv)
saveRDS(for_dndscv, "cesa_for_multi_dndscv.rds")
# long tests will actually run dNdScv; short tests will just make sure internal preprocess/postprocess functions behave as expected
dndscv_input = cancereffectsizeR:::dndscv_preprocess(cesa = for_dndscv, covariate_file = "lung_pca")
saveRDS(dndscv_input, "dndscv_input_multi.rds")
dndscv_raw_output = lapply(dndscv_input, function(x) do.call(dndscv::dndscv, x))
# a few attributes are huge (>1 GB); drop these
dndscv_raw_output = lapply(dndscv_raw_output, function(x) { x$nbreg$terms = NULL; x$nbreg$model = NULL; x$poissmodel = NULL; return(x)})
saveRDS(dndscv_raw_output, "dndscv_raw_output_multi.rds")
dndscv_out = dndscv_postprocess(cesa = for_dndscv, dndscv_raw_output = dndscv_raw_output)
sel_cv = lapply(dndscv_out@dndscv_out_list, function(x) x$sel_cv)
saveRDS(sel_cv, "sel_cv_multi.rds")
saveRDS(dndscv_out@mutrates_list, "mutrates_multi.rds")
anno_out = annotate_gene_maf(dndscv_out)
saveRDS(anno_out@annotated.snv.maf, "multi_annotated_maf_df.rds")
setwd(prev_dir)
|
scoreF <- function(Z,R) {
sum((Z-R)^2/Z)
}
|
/pkg/R/scoreF.R
|
no_license
|
r-forge/polrep
|
R
| false
| false
| 47
|
r
|
scoreF <- function(Z,R) {
sum((Z-R)^2/Z)
}
|
data(gapminder, package = "gapminder")
dataset <- gapminder %>%
select(-continent, -lifeExp, -pop) %>%
mutate(country = as.character(country)) %>%
tidyr::pivot_wider(
names_from = year,
values_from = gdpPercap
) %>%
mutate_if(is.numeric, scales::rescale, to = c(.06, .1)) %>%
mutate(
country = case_when(
country == "United States" ~ "United States of America",
TRUE ~ country
)
)
years <- names(dataset)[2:length(names(dataset))]
add_color <- function(dataset){
scl <- scales::col_numeric(c("#2c7fb8", "#7fcdbb", "#edf8b1"), c(.06, .1))
nms <- names(dataset)
nms <- nms[2:length(nms)]
nms <- paste0("color_", nms)
colors <- dataset %>%
mutate_if(is.numeric, scl) %>%
purrr::set_names(c("country", nms))
left_join(dataset, colors, by = "country")
}
dataset <- add_color(dataset)
all_vars <- names(dataset)
|
/data/preprocess.R
|
no_license
|
JohnCoene/gdp-app
|
R
| false
| false
| 881
|
r
|
data(gapminder, package = "gapminder")
dataset <- gapminder %>%
select(-continent, -lifeExp, -pop) %>%
mutate(country = as.character(country)) %>%
tidyr::pivot_wider(
names_from = year,
values_from = gdpPercap
) %>%
mutate_if(is.numeric, scales::rescale, to = c(.06, .1)) %>%
mutate(
country = case_when(
country == "United States" ~ "United States of America",
TRUE ~ country
)
)
years <- names(dataset)[2:length(names(dataset))]
add_color <- function(dataset){
scl <- scales::col_numeric(c("#2c7fb8", "#7fcdbb", "#edf8b1"), c(.06, .1))
nms <- names(dataset)
nms <- nms[2:length(nms)]
nms <- paste0("color_", nms)
colors <- dataset %>%
mutate_if(is.numeric, scl) %>%
purrr::set_names(c("country", nms))
left_join(dataset, colors, by = "country")
}
dataset <- add_color(dataset)
all_vars <- names(dataset)
|
# Matrix inversion is usually a costly computation and there may be some benefit
# to caching the inverse of a matrix rather than compute it repeatedly. The
# following two functions are used to cache the inverse of a matrix.
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
## makeCacheMatrix creates a special matrix object, and then cacheSolve
## calculates the inverse of the matrix.
## If the matrix inverse has already been calculated, it will instead
## find it in the cache and return it, and not calculate it again.
makeCacheMatrix <- function(x = matrix()) {
inv_x <- NULL
set <- function(y) {
x <<- y
inv_x <<- NULL
}
get <- function() x
setinverse<- function(inverse) inv_x <<-inverse
getinverse <- function() inv_x
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The function cacheSolve returns the inverse of a matrix A created with
## the makeCacheMatrix function.
## If the cached inverse is available, cacheSolve retrieves it, while if
## not, it computes, caches, and returns it.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv_x <- x$getinverse()
if (!is.null(inv_x)) {
message("getting cached inverse matrix")
return(inv_x)
} else {
inv_x <- solve(x$get())
x$setinverse(inv_x)
return(inv_x)
}
}
## Sample run:
## > x = rbind(c(1, -1/4), c(-1/4, 1))
## > m = makeCacheMatrix(x)
## > m$get()
## [,1] [,2]
## [1,] 1.00 -0.25
## [2,] -0.25 1.00
## No cache in the first run
## > cacheSolve(m)
## [,1] [,2]
## [1,] 1.0666667 0.2666667
## [2,] 0.2666667 1.0666667
## Retrieving from the cache in the second run
## > cacheSolve(m)
## getting cached data.
## [,1] [,2]
## [1,] 1.0666667 0.2666667
## [2,] 0.2666667 1.0666667
## >
|
/cachematrix.R
|
no_license
|
neelamsingh/ProgrammingAssignment2
|
R
| false
| false
| 2,046
|
r
|
# Matrix inversion is usually a costly computation and there may be some benefit
# to caching the inverse of a matrix rather than compute it repeatedly. The
# following two functions are used to cache the inverse of a matrix.
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
## makeCacheMatrix creates a special matrix object, and then cacheSolve
## calculates the inverse of the matrix.
## If the matrix inverse has already been calculated, it will instead
## find it in the cache and return it, and not calculate it again.
makeCacheMatrix <- function(x = matrix()) {
inv_x <- NULL
set <- function(y) {
x <<- y
inv_x <<- NULL
}
get <- function() x
setinverse<- function(inverse) inv_x <<-inverse
getinverse <- function() inv_x
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The function cacheSolve returns the inverse of a matrix A created with
## the makeCacheMatrix function.
## If the cached inverse is available, cacheSolve retrieves it, while if
## not, it computes, caches, and returns it.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv_x <- x$getinverse()
if (!is.null(inv_x)) {
message("getting cached inverse matrix")
return(inv_x)
} else {
inv_x <- solve(x$get())
x$setinverse(inv_x)
return(inv_x)
}
}
## Sample run:
## > x = rbind(c(1, -1/4), c(-1/4, 1))
## > m = makeCacheMatrix(x)
## > m$get()
## [,1] [,2]
## [1,] 1.00 -0.25
## [2,] -0.25 1.00
## No cache in the first run
## > cacheSolve(m)
## [,1] [,2]
## [1,] 1.0666667 0.2666667
## [2,] 0.2666667 1.0666667
## Retrieving from the cache in the second run
## > cacheSolve(m)
## getting cached data.
## [,1] [,2]
## [1,] 1.0666667 0.2666667
## [2,] 0.2666667 1.0666667
## >
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chart.Drawdown.R
\name{chart.Drawdown}
\alias{chart.Drawdown}
\title{Time series chart of drawdowns through time}
\usage{
chart.Drawdown(
R,
geometric = TRUE,
legend.loc = NULL,
colorset = (1:12),
plot.engine = "default",
...
)
}
\arguments{
\item{R}{an xts, vector, matrix, data frame, timeSeries or zoo object of
asset returns}
\item{geometric}{utilize geometric chaining (TRUE) or simple/arithmetic chaining (FALSE) to aggregate returns,
default TRUE}
\item{legend.loc}{places a legend into one of nine locations on the chart:
bottomright, bottom, bottomleft, left, topleft, top, topright, right, or
center.}
\item{colorset}{color palette to use, set by default to rational choices}
\item{plot.engine}{choose the plot engine you wish to use:
ggplot2, plotly,dygraph,googlevis and default}
\item{\dots}{any other passthru parameters}
}
\description{
A time series chart demonstrating drawdowns from peak equity attained
through time, calculated from periodic returns.
}
\details{
Any time the cumulative returns dips below the maximum cumulative returns,
it's a drawdown. Drawdowns are measured as a percentage of that maximum
cumulative return, in effect, measured from peak equity.
}
\examples{
data(edhec)
chart.Drawdown(edhec[,c(1,2)],
main="Drawdown from Peak Equity Attained",
legend.loc="bottomleft")
}
\references{
Bacon, C. \emph{Practical Portfolio Performance Measurement and
Attribution}. Wiley. 2004. p. 88 \cr
}
\seealso{
\code{\link{plot}} \cr
\code{\link{chart.TimeSeries}} \cr
\code{\link{findDrawdowns}} \cr
\code{\link{sortDrawdowns}} \cr
\code{\link{maxDrawdown}} \cr
\code{\link{table.Drawdowns}} \cr
\code{\link{table.DownsideRisk}}
}
\author{
Peter Carl
}
|
/man/chart.Drawdown.Rd
|
no_license
|
braverock/PerformanceAnalytics
|
R
| false
| true
| 1,787
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chart.Drawdown.R
\name{chart.Drawdown}
\alias{chart.Drawdown}
\title{Time series chart of drawdowns through time}
\usage{
chart.Drawdown(
R,
geometric = TRUE,
legend.loc = NULL,
colorset = (1:12),
plot.engine = "default",
...
)
}
\arguments{
\item{R}{an xts, vector, matrix, data frame, timeSeries or zoo object of
asset returns}
\item{geometric}{utilize geometric chaining (TRUE) or simple/arithmetic chaining (FALSE) to aggregate returns,
default TRUE}
\item{legend.loc}{places a legend into one of nine locations on the chart:
bottomright, bottom, bottomleft, left, topleft, top, topright, right, or
center.}
\item{colorset}{color palette to use, set by default to rational choices}
\item{plot.engine}{choose the plot engine you wish to use:
ggplot2, plotly,dygraph,googlevis and default}
\item{\dots}{any other passthru parameters}
}
\description{
A time series chart demonstrating drawdowns from peak equity attained
through time, calculated from periodic returns.
}
\details{
Any time the cumulative returns dips below the maximum cumulative returns,
it's a drawdown. Drawdowns are measured as a percentage of that maximum
cumulative return, in effect, measured from peak equity.
}
\examples{
data(edhec)
chart.Drawdown(edhec[,c(1,2)],
main="Drawdown from Peak Equity Attained",
legend.loc="bottomleft")
}
\references{
Bacon, C. \emph{Practical Portfolio Performance Measurement and
Attribution}. Wiley. 2004. p. 88 \cr
}
\seealso{
\code{\link{plot}} \cr
\code{\link{chart.TimeSeries}} \cr
\code{\link{findDrawdowns}} \cr
\code{\link{sortDrawdowns}} \cr
\code{\link{maxDrawdown}} \cr
\code{\link{table.Drawdowns}} \cr
\code{\link{table.DownsideRisk}}
}
\author{
Peter Carl
}
|
# TASK 4 ------------------------------------------------------------------
a <- (5:14)
a
# TASK 5 ------------------------------------------------------------------
a[1]
a[7]
a[1]
a[7]
b <- c(a[1],a[7])
b
# TASK 6 ------------------------------------------------------------------
a<b
b>a
a>=b
# TASK 7 ------------------------------------------------------------------
x <- a[1]
x
y <- a[6]
y
z <- a[9]
z
((z+x)*(z+y))/2
10*(x-y)
# TASK 8 ------------------------------------------------------------------
# The R operator for 'not' is '!'
# TASK 9
|
/R test 1.R
|
no_license
|
MinaCarnero/R-Test-1
|
R
| false
| false
| 567
|
r
|
# TASK 4 ------------------------------------------------------------------
a <- (5:14)
a
# TASK 5 ------------------------------------------------------------------
a[1]
a[7]
a[1]
a[7]
b <- c(a[1],a[7])
b
# TASK 6 ------------------------------------------------------------------
a<b
b>a
a>=b
# TASK 7 ------------------------------------------------------------------
x <- a[1]
x
y <- a[6]
y
z <- a[9]
z
((z+x)*(z+y))/2
10*(x-y)
# TASK 8 ------------------------------------------------------------------
# The R operator for 'not' is '!'
# TASK 9
|
forecast <- function(obj, ...) UseMethod("forecast")
|
/R/generics.r
|
no_license
|
lnsongxf/bvar
|
R
| false
| false
| 53
|
r
|
forecast <- function(obj, ...) UseMethod("forecast")
|
# -*- tab-width:2;indent-tabs-mode:t;show-trailing-whitespace:t;rm-trailing-spaces:t -*-
# vi: set ts=2 noet:
#
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington UW TechTransfer, email: license@u.washington.edu.
source("../../plots/hbonds/hbond_geo_dim_scales.R")
feature_analyses <- c(feature_analyses, methods::new("FeaturesAnalysis",
id = "BAH_chem_type_comparison",
author = "Matthew O'Meara",
brief_description = "",
feature_reporter_dependencies = c("HBondFeatures"),
run=function(self, sample_sources, output_dir, output_formats){
sele <-"
SELECT
geom.cosBAH,
acc.HBChemType AS acc_chem_type,
don.HBChemType AS don_chem_type,
CASE acc.HBChemType
WHEN 'hbacc_IMD' THEN 'ring' WHEN 'hbacc_IME' THEN 'ring'
WHEN 'hbacc_AHX' THEN 'sp3' WHEN 'hbacc_HXL' THEN 'sp3'
WHEN 'hbacc_CXA' THEN 'sp2' WHEN 'hbacc_CXL' THEN 'sp2'
WHEN 'hbacc_PBA' THEN 'sp2' END AS acc_hybrid
FROM
hbond_geom_coords AS geom,
hbonds AS hb,
hbond_sites_pdb AS don_pdb,
hbond_sites_pdb AS acc_pdb,
hbond_sites AS don,
hbond_sites AS acc
WHERE
hb.struct_id = geom.struct_id AND
hb.hbond_id = geom.hbond_id AND
hb.struct_id = don.struct_id AND
hb.don_id = don.site_id AND
hb.struct_id = acc.struct_id AND
hb.acc_id = acc.site_id AND
don_pdb.struct_id = hb.struct_id AND don_pdb.site_id = hb.don_id AND
don_pdb.heavy_atom_temperature < 30 AND
acc_pdb.struct_id = hb.struct_id AND acc_pdb.site_id = hb.acc_id AND
acc_pdb.heavy_atom_temperature < 30;";
f <- query_sample_sources(sample_sources, sele)
f$BAH <- acos(f$cosBAH)
f$don_chem_type_name <- don_chem_type_name_linear(f$don_chem_type)
f$acc_chem_type_name <- acc_chem_type_name_linear(f$acc_chem_type)
f <- na.omit(f, method="r")
tests <- c("kolmogorov_smirnov_test", "histogram_kl_divergence")
comp_stats <- comparison_statistics(
sample_sources, f, c(), "BAH", tests)
table_id <- "BAH_chem_type_comparison"
table_title <- "H-Bond BAH Angle Distribution Comparison, B-Factor < 30"
save_tables(self,
comp_stats, table_id,
sample_sources, output_dir, output_formats,
caption=table_title, caption.placement="top")
comp_stats <- comparison_statistics(
sample_sources, f, c("don_chem_type_name"), "BAH", tests)
table_id <- paste("BAH_chem_type_comparison", "by_don_chem_type", sep="_")
table_title <- "H-Bond BAH Angle by Donor Chemical Type\nDistribution Comparison, B-Factor < 30"
save_tables(self,
comp_stats, table_id, sample_sources, output_dir, output_formats,
caption=table_title, caption.placement="top")
comp_stats <- comparison_statistics(
sample_sources, f, c("acc_chem_type_name"), "BAH", tests)
table_id <- paste("BAH_chem_type_comparison", "by_acc_chem_type", sep="_")
table_title <- "H-Bond BAH Angle by Acceptor Chemical Type\nDistribution Comparison, B-Factor < 30"
save_tables(self,
comp_stats, table_id, sample_sources, output_dir, output_formats,
caption=table_title, caption.placement="top")
comp_stats <- comparison_statistics(
sample_sources, f, c("acc_hybrid"), "BAH", tests)
table_id <- paste("BAH_chem_type_comparison", "by_acc_hybrid", sep="_")
table_title <- "H-Bond BAH Angle by Acceptor Hybrid\nDistribution Comparison, B-Factor < 30"
save_tables(self,
comp_stats, table_id, sample_sources, output_dir, output_formats,
caption=table_title, caption.placement="top")
comp_stats <- comparison_statistics(
sample_sources, f, c("don_chem_type_name", "acc_chem_type_name"), "BAH", tests)
table_id <- paste("BAH_chem_type_comparison", "by_don_chem_type_acc_chem_type", sep="_")
table_title <- "H-Bond BAH Angle by Donor and Acceptor Chemical Types\nDistribution Comparison, B-Factor < 30"
save_tables(self,
comp_stats, table_id, sample_sources, output_dir, output_formats,
caption=table_title, caption.placement="top")
})) # end FeaturesAnalysis
|
/inst/scripts/analysis/statistics/hbonds/BAH_chem_type_comparison.R
|
no_license
|
momeara/RosettaFeatures
|
R
| false
| false
| 4,081
|
r
|
# -*- tab-width:2;indent-tabs-mode:t;show-trailing-whitespace:t;rm-trailing-spaces:t -*-
# vi: set ts=2 noet:
#
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington UW TechTransfer, email: license@u.washington.edu.
source("../../plots/hbonds/hbond_geo_dim_scales.R")
feature_analyses <- c(feature_analyses, methods::new("FeaturesAnalysis",
id = "BAH_chem_type_comparison",
author = "Matthew O'Meara",
brief_description = "",
feature_reporter_dependencies = c("HBondFeatures"),
run=function(self, sample_sources, output_dir, output_formats){
sele <-"
SELECT
geom.cosBAH,
acc.HBChemType AS acc_chem_type,
don.HBChemType AS don_chem_type,
CASE acc.HBChemType
WHEN 'hbacc_IMD' THEN 'ring' WHEN 'hbacc_IME' THEN 'ring'
WHEN 'hbacc_AHX' THEN 'sp3' WHEN 'hbacc_HXL' THEN 'sp3'
WHEN 'hbacc_CXA' THEN 'sp2' WHEN 'hbacc_CXL' THEN 'sp2'
WHEN 'hbacc_PBA' THEN 'sp2' END AS acc_hybrid
FROM
hbond_geom_coords AS geom,
hbonds AS hb,
hbond_sites_pdb AS don_pdb,
hbond_sites_pdb AS acc_pdb,
hbond_sites AS don,
hbond_sites AS acc
WHERE
hb.struct_id = geom.struct_id AND
hb.hbond_id = geom.hbond_id AND
hb.struct_id = don.struct_id AND
hb.don_id = don.site_id AND
hb.struct_id = acc.struct_id AND
hb.acc_id = acc.site_id AND
don_pdb.struct_id = hb.struct_id AND don_pdb.site_id = hb.don_id AND
don_pdb.heavy_atom_temperature < 30 AND
acc_pdb.struct_id = hb.struct_id AND acc_pdb.site_id = hb.acc_id AND
acc_pdb.heavy_atom_temperature < 30;";
f <- query_sample_sources(sample_sources, sele)
f$BAH <- acos(f$cosBAH)
f$don_chem_type_name <- don_chem_type_name_linear(f$don_chem_type)
f$acc_chem_type_name <- acc_chem_type_name_linear(f$acc_chem_type)
f <- na.omit(f, method="r")
tests <- c("kolmogorov_smirnov_test", "histogram_kl_divergence")
comp_stats <- comparison_statistics(
sample_sources, f, c(), "BAH", tests)
table_id <- "BAH_chem_type_comparison"
table_title <- "H-Bond BAH Angle Distribution Comparison, B-Factor < 30"
save_tables(self,
comp_stats, table_id,
sample_sources, output_dir, output_formats,
caption=table_title, caption.placement="top")
comp_stats <- comparison_statistics(
sample_sources, f, c("don_chem_type_name"), "BAH", tests)
table_id <- paste("BAH_chem_type_comparison", "by_don_chem_type", sep="_")
table_title <- "H-Bond BAH Angle by Donor Chemical Type\nDistribution Comparison, B-Factor < 30"
save_tables(self,
comp_stats, table_id, sample_sources, output_dir, output_formats,
caption=table_title, caption.placement="top")
comp_stats <- comparison_statistics(
sample_sources, f, c("acc_chem_type_name"), "BAH", tests)
table_id <- paste("BAH_chem_type_comparison", "by_acc_chem_type", sep="_")
table_title <- "H-Bond BAH Angle by Acceptor Chemical Type\nDistribution Comparison, B-Factor < 30"
save_tables(self,
comp_stats, table_id, sample_sources, output_dir, output_formats,
caption=table_title, caption.placement="top")
comp_stats <- comparison_statistics(
sample_sources, f, c("acc_hybrid"), "BAH", tests)
table_id <- paste("BAH_chem_type_comparison", "by_acc_hybrid", sep="_")
table_title <- "H-Bond BAH Angle by Acceptor Hybrid\nDistribution Comparison, B-Factor < 30"
save_tables(self,
comp_stats, table_id, sample_sources, output_dir, output_formats,
caption=table_title, caption.placement="top")
comp_stats <- comparison_statistics(
sample_sources, f, c("don_chem_type_name", "acc_chem_type_name"), "BAH", tests)
table_id <- paste("BAH_chem_type_comparison", "by_don_chem_type_acc_chem_type", sep="_")
table_title <- "H-Bond BAH Angle by Donor and Acceptor Chemical Types\nDistribution Comparison, B-Factor < 30"
save_tables(self,
comp_stats, table_id, sample_sources, output_dir, output_formats,
caption=table_title, caption.placement="top")
})) # end FeaturesAnalysis
|
#' AMARETTO_Download
#'
#' Downloading TCGA dataset for AMARETTO analysis
#' @param CancerSite TCGA cancer code for data download
#' @param TargetDirectory Directory path to download data
#' @param downloadData TRUE
#' @return result
#' @importFrom curatedTCGAData curatedTCGAData
#' @importFrom httr GET stop_for_status
#' @importFrom limma strsplit2
#' @importFrom BiocFileCache BiocFileCache bfcadd bfcquery
#' @importFrom doParallel registerDoParallel
#' @importFrom dplyr everything mutate select
#' @importFrom foreach foreach
#' @import grDevices
#' @importFrom parallel makeCluster stopCluster
#' @importFrom readr write_tsv
#' @importFrom tibble rownames_to_column
#' @importFrom utils untar zip
#' @export
#' @examples
#' TargetDirectory <- file.path(getwd(),"Downloads/");dir.create(TargetDirectory)
#' CancerSite <- 'CHOL'
#' DataSetDirectories <- AMARETTO_Download(CancerSite,TargetDirectory = TargetDirectory)
AMARETTO_Download <- function(CancerSite = "CHOL",
TargetDirectory = TargetDirectory) {
ori.dir <- getwd()
message("Downloading Gene Expression and Copy Number Variation data for: ",
CancerSite, "\n")
Cancers = c("BLCA", "BRCA", "LUAD", "LUSC", "COADREAD",
"HNSC", "KIRC", "GBM", "OV", "LAML", "UCEC",
"COAD", "READ")
if (!(CancerSite %in% Cancers)) {
message("This TCGA cancer site/type was not tested, continue at your own risk.\n")
}
if (!file.exists(TargetDirectory))
dir.create(TargetDirectory, showWarnings = FALSE)
TCGA_acronym_uppercase = toupper(CancerSite)
assays <- c("RNASeq2GeneNorm")
MAEO <- suppressMessages(curatedTCGAData::curatedTCGAData(CancerSite,
assays, FALSE))
saveRDS(MAEO, file = paste0(TargetDirectory, CancerSite,
"_RNASeq_MAEO.rds"))
dataType = "analyses"
dataFileTag = "CopyNumber_Gistic2.Level_4"
message("Searching CNV data for:", CancerSite,
"\n")
CNVdirectory = get_firehoseData(saveDir = TargetDirectory,
TCGA_acronym_uppercase = TCGA_acronym_uppercase,
dataType = dataType, dataFileTag = dataFileTag)
on.exit(setwd(ori.dir))
return(list(CancerSite = CancerSite, MAdirectory = TargetDirectory,
CNVdirectory = CNVdirectory))
}
#' get_firehoseData
#'
#' Downloading TCGA dataset via firehose
#' @param downloadData
#' @param saveDir
#' @param TCGA_acronym_uppercase
#' @param dataType
#' @param dataFileTag
#' @param FFPE
#' @param fileType
#' @param gdacURL
#' @param untarUngzip
#' @param printDisease_abbr
#'
#' @return result
#' @keywords internal
#' @examples
get_firehoseData <- function(downloadData = TRUE, saveDir = "./",
TCGA_acronym_uppercase = "LUAD", dataType = "stddata",
dataFileTag = "mRNAseq_Preprocess.Level_3", FFPE = FALSE,
fileType = "tar.gz", gdacURL = "http://gdac.broadinstitute.org/runs/",
untarUngzip = TRUE, printDisease_abbr = FALSE) {
# Cases Shipped by BCR # Cases with Data* Date Last
# Updated (mm/dd/yy)
ori.dir <- getwd()
cancers <- c("Acute Myeloid Leukemia [LAML] \n",
"Adrenocortical carcinoma [ACC]\t\n", "Bladder Urothelial Carcinoma [BLCA] \n",
"Brain Lower Grade Glioma [LGG] \n", "Breast invasive carcinoma [BRCA] \n",
"Cervical squamous cell carcinoma and endocervical adenocarcinoma [CESC] \n",
"Cholangiocarcinoma [CHOL] \n", "Colon adenocarcinoma [COAD] \n",
"Esophageal carcinoma [ESCA] \n", "Glioblastoma multiforme [GBM] \n",
"Head and Neck squamous cell carcinoma [HNSC]\t\n",
"Kidney Chromophobe [KICH]\t\n", "Kidney renal clear cell carcinoma [KIRC]\t\n",
"Kidney renal papillary cell carcinoma [KIRP]\t\n",
"Liver hepatocellular carcinoma [LIHC]\t\n",
"Lung adenocarcinoma [LUAD]\t\n", "Lung squamous cell carcinoma [LUSC] \n",
"Lymphoid Neoplasm Diffuse Large B-cell Lymphoma [DLBC]\t\n",
"Mesothelioma [MESO] \n", "Ovarian serous cystadenocarcinoma [OV]\t\n",
"Pancreatic adenocarcinoma [PAAD]\t\n", "Pheochromocytoma and Paraganglioma [PCPG] \n",
"Prostate adenocarcinoma [PRAD] \n", "Rectum adenocarcinoma [READ]\t\n",
"Sarcoma [SARC]\t\n", "Skin Cutaneous Melanoma [SKCM]\t\n",
"Stomach adenocarcinoma [STAD] \n", "Testicular Germ Cell Tumors [TGCT] \n",
"Thymoma [THYM] \n", "Thyroid carcinoma [THCA]\t\n",
"Uterine Carcinosarcoma [UCS]\t \n", "Uterine Corpus Endometrial Carcinoma [UCEC]\t\n",
"Uveal Melanoma [UVM] \n")
cancers_acronyms <- c("LAML", "ACC", "BLCA", "LGG",
"BRCA", "CESC", "CHOL", "COAD", "ESCA", "GBM",
"HNSC", "KICH", "KIRC", "LIHC", "LUAD", "LUSC",
"DLBC", "MESO", "OV", "PAAD", "PCPG", "PRAD",
"READ", "SARC", "SKCM", "STAD", "TGCT", "THYM",
"THCA", "UCS", "UCEC", "UVM")
if (printDisease_abbr) {
message(cat("Here are the possible TCGA database disease acronyms. \nRe-run this function with printDisease_abbr=FALSE to then run an actual query.\n\n",
cancers))
}
if (TCGA_acronym_uppercase %in% cancers_acronyms) {
gdacURL_orig <- gdacURL
urlData <- web.lnk <- httr::GET(gdacURL)
urlData <- limma::strsplit2(urlData, paste(dataType,
"__", sep = ""))
urlData <- urlData[, 2:dim(urlData)[2]]
urlData <- limma::strsplit2(urlData, "/")
urlData <- urlData[, 1]
urlData <- as.POSIXct(strptime(urlData, "%Y_%m_%d"))
dateData <- as.Date(as.character(urlData[which(!is.na(urlData))]))
lastDate <- dateData[match(summary(dateData)[which(names(summary(dateData)) ==
"Max.")], dateData)]
lastDate <- gsub("-", "_", as.character(lastDate))
lastDateCompress <- gsub("_", "", lastDate)
gdacURL <- paste(gdacURL, dataType, "__", lastDate,
"/data/", TCGA_acronym_uppercase, "/",
lastDateCompress, "/", sep = "")
urlData <- web.lnk <- httr::GET(gdacURL)
urlData <- limma::strsplit2(urlData, "href=\\\"")
while (length(grep("was not found", urlData)) >
0) {
message(paste0("\tNOTE: the TCGA run dated ",
lastDate, " for ", TCGA_acronym_uppercase,
" isn't available for download yet. \n"))
message("\tTaking the run dated just before this one.\n")
dateData <- dateData[-which(dateData ==
(summary(dateData)[which(names(summary(dateData)) ==
"Max.")]))]
lastDate <- dateData[match(summary(dateData)[which(names(summary(dateData)) ==
"Max.")], dateData)]
lastDate <- gsub("-", "_", as.character(lastDate))
lastDateCompress <- gsub("_", "", lastDate)
gdacURL <- paste(gdacURL_orig, dataType,
"__", lastDate, "/data/", TCGA_acronym_uppercase,
"/", lastDateCompress, "/", sep = "")
urlData <- web.lnk <- httr::GET(gdacURL)
urlData <- limma::strsplit2(urlData, "href=\\\"")
if (length(dateData) <= 1) {
break
}
}
httr::stop_for_status(web.lnk, task = "FALIED to download input TCGA data type")
if (FFPE) {
urlData <- urlData[grep("FFPE", urlData)]
if (length(urlData) == 0) {
stop("\nNo FFPE data found for this query. Try FFPE=FALSE.\n")
}
} else {
if (length(grep("FFPE", urlData)) > 0) {
urlData <- urlData[-grep("FFPE", urlData)]
}
if (length(urlData) == 0) {
stop("\nNo non-FFPE data found for this query. Try FFPE=TRUE.\n")
}
}
fileName <- urlData[grep(dataFileTag, urlData)]
if (length(fileName) == 0) {
warnMessage <- paste0("\nNot returning any viable url data paths after searching by date for disease ",
TCGA_acronym_uppercase, " \tfor data type ",
dataFileTag, ".No data was downloaded.\n")
warning(warnMessage)
return(NA)
}
fileName <- limma::strsplit2(fileName, "tar.gz")[1,
1]
fileName <- paste(fileName, fileType, sep = "")
gdacURL <- paste(gdacURL, fileName, sep = "")
cancer_url <- computeGisticURL(url = gdacURL)
cache_target <- cacheResource(resource = cancer_url)
utils::untar(cache_target$rpath, exdir = TargetDirectory)
DownloadedFile <- list.dirs(TargetDirectory,
full.names = TRUE)[grep(CancerSite, list.dirs(TargetDirectory,
full.names = TRUE))]
DownloadedFile <- paste0(DownloadedFile, "/")
return(DownloadedFile)
}
on.exit(setwd(ori.dir))
}
#' AMARETTO_ExportResults
#'
#' Retrieve a download of all the data linked with the run (including heatmaps)
#' @param AMARETTOinit AMARETTO initialize output
#' @param AMARETTOresults AMARETTO results output
#' @param data_address Directory to save data folder
#' @param Heatmaps Output heatmaps as pdf
#' @param CNV_matrix CNV_matrix
#' @param MET_matrix MET_matrix
#' @return result
#' @export
#'
#' @examples
#' data('ProcessedDataLIHC')
#' TargetDirectory <- file.path(getwd(),"Downloads/");dir.create(TargetDirectory)
#' AMARETTOinit <- AMARETTO_Initialize(ProcessedData = ProcessedDataLIHC,
#' NrModules = 2, VarPercentage = 50)
#'
#' AMARETTOresults <- AMARETTO_Run(AMARETTOinit)
#' AMARETTO_ExportResults(AMARETTOinit,AMARETTOresults,TargetDirectory,Heatmaps = FALSE)
AMARETTO_ExportResults <- function(AMARETTOinit, AMARETTOresults,
data_address, Heatmaps = TRUE, CNV_matrix = NULL,
MET_matrix = NULL) {
if (!dir.exists(data_address)) {
stop("Output directory is not existing.")
}
# add a date stamp to the output directory
output_dir <- paste0("AMARETTOresults_", gsub("-|:",
"", gsub(" ", "_", Sys.time())))
dir.create(file.path(data_address, output_dir))
NrCores <- AMARETTOinit$NrCores
NrModules <- AMARETTOresults$NrModules
# parallelize the heatmap production
cluster <- parallel::makeCluster(c(rep("localhost",
NrCores)), type = "SOCK")
doParallel::registerDoParallel(cluster, cores = NrCores)
if (Heatmaps == TRUE) {
foreach::foreach(ModuleNr = 1:NrModules, .packages = c("AMARETTO")) %dopar%
{
pdf(file = file.path(data_address,
output_dir, paste0("Module_", as.character(ModuleNr),
".pdf")))
AMARETTO_VisualizeModule(AMARETTOinit,
AMARETTOresults, CNV_matrix, MET_matrix,
ModuleNr = ModuleNr)
dev.off()
}
}
parallel::stopCluster(cluster)
# save rdata files for AMARETTO_Run and
# AMARETTO_Initialize output
save(AMARETTOresults, file = file.path(data_address,
output_dir, "/amarettoResults.RData"))
save(AMARETTOinit, file = file.path(data_address,
output_dir, "/amarettoInit.RData"))
# save some tables that might be useful for further
# analysis
write_gct(AMARETTOresults$ModuleData, file.path(data_address,
output_dir, "/ModuleData_amaretto.gct"))
write_gct(AMARETTOresults$ModuleMembership, file.path(data_address,
output_dir, "/ModuleMembership_amaretto.gct"))
write_gct(AMARETTOresults$RegulatoryProgramData,
file.path(data_address, output_dir, "/RegulatoryProgramData_amaretto.gct"))
write_gct(AMARETTOresults$RegulatoryPrograms, file.path(data_address,
output_dir, "/RegulatoryPrograms_amaretto.gct"))
readr::write_tsv(as.data.frame(AMARETTOresults$AllGenes),
file.path(data_address, output_dir, "/AllGenes_amaretto.tsv"))
readr::write_tsv(as.data.frame(AMARETTOresults$AllRegulators),
file.path(data_address, output_dir, "/AllRegulators_amaretto.tsv"))
readr::write_tsv(as.data.frame(AMARETTOresults$NrModules),
file.path(data_address, output_dir, "/NrModules_amaretto.tsv"))
# zip the file
utils::zip(zipfile = file.path(data_address, output_dir),
files = file.path(data_address, output_dir))
}
#' write_gct
#'
#' @param data_in
#' @param file_address
#'
#' @return result
#' @keywords internal
#' @examples
write_gct <- function(data_in, file_address) {
header_gct <- paste0("#1.2\n", nrow(data_in), "\t",
ncol(data_in))
data_in <- tibble::rownames_to_column(as.data.frame(data_in),
"Name") %>% dplyr::mutate(Description = Name) %>%
dplyr::select(Name, Description, dplyr::everything())
write(header_gct, file = file_address, append = FALSE)
readr::write_tsv(data_in, file_address, append = TRUE,
col_names = TRUE)
}
#' computeGisticURL
#'
#' @param url
#' @param acronym
#'
#' @return result
#' @keywords internal
#' @examples
computeGisticURL <- function(url = NULL, acronym = "CHOL") {
if (!is.null(url))
return(url)
sprintf("http://gdac.broadinstitute.org/runs/analyses__2016_01_28/data/%s/20160128/gdac.broadinstitute.org_%s-TP.CopyNumber_Gistic2.Level_4.2016012800.0.0.tar.gz",
acronym, acronym)
}
#' cacheResource
#'
#' @param cache
#' @param resource
#'
#' @return result
#' @keywords internal
#' @examples
cacheResource <- function(cache = BiocFileCache::BiocFileCache(TargetDirectory),
resource) {
chk = bfcquery(cache, resource)
if (nrow(chk) == 0) {
message("downloading ", resource)
BiocFileCache::bfcadd(cache, resource)
return(bfcquery(cache, resource))
}
chk
}
|
/R/amaretto_download.R
|
permissive
|
vjcitn/AMARETTO
|
R
| false
| false
| 13,759
|
r
|
#' AMARETTO_Download
#'
#' Downloading TCGA dataset for AMARETTO analysis
#' @param CancerSite TCGA cancer code for data download
#' @param TargetDirectory Directory path to download data
#' @param downloadData TRUE
#' @return result
#' @importFrom curatedTCGAData curatedTCGAData
#' @importFrom httr GET stop_for_status
#' @importFrom limma strsplit2
#' @importFrom BiocFileCache BiocFileCache bfcadd bfcquery
#' @importFrom doParallel registerDoParallel
#' @importFrom dplyr everything mutate select
#' @importFrom foreach foreach
#' @import grDevices
#' @importFrom parallel makeCluster stopCluster
#' @importFrom readr write_tsv
#' @importFrom tibble rownames_to_column
#' @importFrom utils untar zip
#' @export
#' @examples
#' TargetDirectory <- file.path(getwd(),"Downloads/");dir.create(TargetDirectory)
#' CancerSite <- 'CHOL'
#' DataSetDirectories <- AMARETTO_Download(CancerSite,TargetDirectory = TargetDirectory)
AMARETTO_Download <- function(CancerSite = "CHOL",
TargetDirectory = TargetDirectory) {
ori.dir <- getwd()
message("Downloading Gene Expression and Copy Number Variation data for: ",
CancerSite, "\n")
Cancers = c("BLCA", "BRCA", "LUAD", "LUSC", "COADREAD",
"HNSC", "KIRC", "GBM", "OV", "LAML", "UCEC",
"COAD", "READ")
if (!(CancerSite %in% Cancers)) {
message("This TCGA cancer site/type was not tested, continue at your own risk.\n")
}
if (!file.exists(TargetDirectory))
dir.create(TargetDirectory, showWarnings = FALSE)
TCGA_acronym_uppercase = toupper(CancerSite)
assays <- c("RNASeq2GeneNorm")
MAEO <- suppressMessages(curatedTCGAData::curatedTCGAData(CancerSite,
assays, FALSE))
saveRDS(MAEO, file = paste0(TargetDirectory, CancerSite,
"_RNASeq_MAEO.rds"))
dataType = "analyses"
dataFileTag = "CopyNumber_Gistic2.Level_4"
message("Searching CNV data for:", CancerSite,
"\n")
CNVdirectory = get_firehoseData(saveDir = TargetDirectory,
TCGA_acronym_uppercase = TCGA_acronym_uppercase,
dataType = dataType, dataFileTag = dataFileTag)
on.exit(setwd(ori.dir))
return(list(CancerSite = CancerSite, MAdirectory = TargetDirectory,
CNVdirectory = CNVdirectory))
}
#' get_firehoseData
#'
#' Downloading TCGA dataset via firehose
#' @param downloadData
#' @param saveDir
#' @param TCGA_acronym_uppercase
#' @param dataType
#' @param dataFileTag
#' @param FFPE
#' @param fileType
#' @param gdacURL
#' @param untarUngzip
#' @param printDisease_abbr
#'
#' @return result
#' @keywords internal
#' @examples
get_firehoseData <- function(downloadData = TRUE, saveDir = "./",
TCGA_acronym_uppercase = "LUAD", dataType = "stddata",
dataFileTag = "mRNAseq_Preprocess.Level_3", FFPE = FALSE,
fileType = "tar.gz", gdacURL = "http://gdac.broadinstitute.org/runs/",
untarUngzip = TRUE, printDisease_abbr = FALSE) {
# Cases Shipped by BCR # Cases with Data* Date Last
# Updated (mm/dd/yy)
ori.dir <- getwd()
cancers <- c("Acute Myeloid Leukemia [LAML] \n",
"Adrenocortical carcinoma [ACC]\t\n", "Bladder Urothelial Carcinoma [BLCA] \n",
"Brain Lower Grade Glioma [LGG] \n", "Breast invasive carcinoma [BRCA] \n",
"Cervical squamous cell carcinoma and endocervical adenocarcinoma [CESC] \n",
"Cholangiocarcinoma [CHOL] \n", "Colon adenocarcinoma [COAD] \n",
"Esophageal carcinoma [ESCA] \n", "Glioblastoma multiforme [GBM] \n",
"Head and Neck squamous cell carcinoma [HNSC]\t\n",
"Kidney Chromophobe [KICH]\t\n", "Kidney renal clear cell carcinoma [KIRC]\t\n",
"Kidney renal papillary cell carcinoma [KIRP]\t\n",
"Liver hepatocellular carcinoma [LIHC]\t\n",
"Lung adenocarcinoma [LUAD]\t\n", "Lung squamous cell carcinoma [LUSC] \n",
"Lymphoid Neoplasm Diffuse Large B-cell Lymphoma [DLBC]\t\n",
"Mesothelioma [MESO] \n", "Ovarian serous cystadenocarcinoma [OV]\t\n",
"Pancreatic adenocarcinoma [PAAD]\t\n", "Pheochromocytoma and Paraganglioma [PCPG] \n",
"Prostate adenocarcinoma [PRAD] \n", "Rectum adenocarcinoma [READ]\t\n",
"Sarcoma [SARC]\t\n", "Skin Cutaneous Melanoma [SKCM]\t\n",
"Stomach adenocarcinoma [STAD] \n", "Testicular Germ Cell Tumors [TGCT] \n",
"Thymoma [THYM] \n", "Thyroid carcinoma [THCA]\t\n",
"Uterine Carcinosarcoma [UCS]\t \n", "Uterine Corpus Endometrial Carcinoma [UCEC]\t\n",
"Uveal Melanoma [UVM] \n")
cancers_acronyms <- c("LAML", "ACC", "BLCA", "LGG",
"BRCA", "CESC", "CHOL", "COAD", "ESCA", "GBM",
"HNSC", "KICH", "KIRC", "LIHC", "LUAD", "LUSC",
"DLBC", "MESO", "OV", "PAAD", "PCPG", "PRAD",
"READ", "SARC", "SKCM", "STAD", "TGCT", "THYM",
"THCA", "UCS", "UCEC", "UVM")
if (printDisease_abbr) {
message(cat("Here are the possible TCGA database disease acronyms. \nRe-run this function with printDisease_abbr=FALSE to then run an actual query.\n\n",
cancers))
}
if (TCGA_acronym_uppercase %in% cancers_acronyms) {
gdacURL_orig <- gdacURL
urlData <- web.lnk <- httr::GET(gdacURL)
urlData <- limma::strsplit2(urlData, paste(dataType,
"__", sep = ""))
urlData <- urlData[, 2:dim(urlData)[2]]
urlData <- limma::strsplit2(urlData, "/")
urlData <- urlData[, 1]
urlData <- as.POSIXct(strptime(urlData, "%Y_%m_%d"))
dateData <- as.Date(as.character(urlData[which(!is.na(urlData))]))
lastDate <- dateData[match(summary(dateData)[which(names(summary(dateData)) ==
"Max.")], dateData)]
lastDate <- gsub("-", "_", as.character(lastDate))
lastDateCompress <- gsub("_", "", lastDate)
gdacURL <- paste(gdacURL, dataType, "__", lastDate,
"/data/", TCGA_acronym_uppercase, "/",
lastDateCompress, "/", sep = "")
urlData <- web.lnk <- httr::GET(gdacURL)
urlData <- limma::strsplit2(urlData, "href=\\\"")
while (length(grep("was not found", urlData)) >
0) {
message(paste0("\tNOTE: the TCGA run dated ",
lastDate, " for ", TCGA_acronym_uppercase,
" isn't available for download yet. \n"))
message("\tTaking the run dated just before this one.\n")
dateData <- dateData[-which(dateData ==
(summary(dateData)[which(names(summary(dateData)) ==
"Max.")]))]
lastDate <- dateData[match(summary(dateData)[which(names(summary(dateData)) ==
"Max.")], dateData)]
lastDate <- gsub("-", "_", as.character(lastDate))
lastDateCompress <- gsub("_", "", lastDate)
gdacURL <- paste(gdacURL_orig, dataType,
"__", lastDate, "/data/", TCGA_acronym_uppercase,
"/", lastDateCompress, "/", sep = "")
urlData <- web.lnk <- httr::GET(gdacURL)
urlData <- limma::strsplit2(urlData, "href=\\\"")
if (length(dateData) <= 1) {
break
}
}
httr::stop_for_status(web.lnk, task = "FALIED to download input TCGA data type")
if (FFPE) {
urlData <- urlData[grep("FFPE", urlData)]
if (length(urlData) == 0) {
stop("\nNo FFPE data found for this query. Try FFPE=FALSE.\n")
}
} else {
if (length(grep("FFPE", urlData)) > 0) {
urlData <- urlData[-grep("FFPE", urlData)]
}
if (length(urlData) == 0) {
stop("\nNo non-FFPE data found for this query. Try FFPE=TRUE.\n")
}
}
fileName <- urlData[grep(dataFileTag, urlData)]
if (length(fileName) == 0) {
warnMessage <- paste0("\nNot returning any viable url data paths after searching by date for disease ",
TCGA_acronym_uppercase, " \tfor data type ",
dataFileTag, ".No data was downloaded.\n")
warning(warnMessage)
return(NA)
}
fileName <- limma::strsplit2(fileName, "tar.gz")[1,
1]
fileName <- paste(fileName, fileType, sep = "")
gdacURL <- paste(gdacURL, fileName, sep = "")
cancer_url <- computeGisticURL(url = gdacURL)
cache_target <- cacheResource(resource = cancer_url)
utils::untar(cache_target$rpath, exdir = TargetDirectory)
DownloadedFile <- list.dirs(TargetDirectory,
full.names = TRUE)[grep(CancerSite, list.dirs(TargetDirectory,
full.names = TRUE))]
DownloadedFile <- paste0(DownloadedFile, "/")
return(DownloadedFile)
}
on.exit(setwd(ori.dir))
}
#' AMARETTO_ExportResults
#'
#' Retrieve a download of all the data linked with the run (including heatmaps)
#' @param AMARETTOinit AMARETTO initialize output
#' @param AMARETTOresults AMARETTO results output
#' @param data_address Directory to save data folder
#' @param Heatmaps Output heatmaps as pdf
#' @param CNV_matrix CNV_matrix
#' @param MET_matrix MET_matrix
#' @return result
#' @export
#'
#' @examples
#' data('ProcessedDataLIHC')
#' TargetDirectory <- file.path(getwd(),"Downloads/");dir.create(TargetDirectory)
#' AMARETTOinit <- AMARETTO_Initialize(ProcessedData = ProcessedDataLIHC,
#' NrModules = 2, VarPercentage = 50)
#'
#' AMARETTOresults <- AMARETTO_Run(AMARETTOinit)
#' AMARETTO_ExportResults(AMARETTOinit,AMARETTOresults,TargetDirectory,Heatmaps = FALSE)
AMARETTO_ExportResults <- function(AMARETTOinit, AMARETTOresults,
data_address, Heatmaps = TRUE, CNV_matrix = NULL,
MET_matrix = NULL) {
if (!dir.exists(data_address)) {
stop("Output directory is not existing.")
}
# add a date stamp to the output directory
output_dir <- paste0("AMARETTOresults_", gsub("-|:",
"", gsub(" ", "_", Sys.time())))
dir.create(file.path(data_address, output_dir))
NrCores <- AMARETTOinit$NrCores
NrModules <- AMARETTOresults$NrModules
# parallelize the heatmap production
cluster <- parallel::makeCluster(c(rep("localhost",
NrCores)), type = "SOCK")
doParallel::registerDoParallel(cluster, cores = NrCores)
if (Heatmaps == TRUE) {
foreach::foreach(ModuleNr = 1:NrModules, .packages = c("AMARETTO")) %dopar%
{
pdf(file = file.path(data_address,
output_dir, paste0("Module_", as.character(ModuleNr),
".pdf")))
AMARETTO_VisualizeModule(AMARETTOinit,
AMARETTOresults, CNV_matrix, MET_matrix,
ModuleNr = ModuleNr)
dev.off()
}
}
parallel::stopCluster(cluster)
# save rdata files for AMARETTO_Run and
# AMARETTO_Initialize output
save(AMARETTOresults, file = file.path(data_address,
output_dir, "/amarettoResults.RData"))
save(AMARETTOinit, file = file.path(data_address,
output_dir, "/amarettoInit.RData"))
# save some tables that might be useful for further
# analysis
write_gct(AMARETTOresults$ModuleData, file.path(data_address,
output_dir, "/ModuleData_amaretto.gct"))
write_gct(AMARETTOresults$ModuleMembership, file.path(data_address,
output_dir, "/ModuleMembership_amaretto.gct"))
write_gct(AMARETTOresults$RegulatoryProgramData,
file.path(data_address, output_dir, "/RegulatoryProgramData_amaretto.gct"))
write_gct(AMARETTOresults$RegulatoryPrograms, file.path(data_address,
output_dir, "/RegulatoryPrograms_amaretto.gct"))
readr::write_tsv(as.data.frame(AMARETTOresults$AllGenes),
file.path(data_address, output_dir, "/AllGenes_amaretto.tsv"))
readr::write_tsv(as.data.frame(AMARETTOresults$AllRegulators),
file.path(data_address, output_dir, "/AllRegulators_amaretto.tsv"))
readr::write_tsv(as.data.frame(AMARETTOresults$NrModules),
file.path(data_address, output_dir, "/NrModules_amaretto.tsv"))
# zip the file
utils::zip(zipfile = file.path(data_address, output_dir),
files = file.path(data_address, output_dir))
}
#' write_gct
#'
#' @param data_in
#' @param file_address
#'
#' @return result
#' @keywords internal
#' @examples
write_gct <- function(data_in, file_address) {
header_gct <- paste0("#1.2\n", nrow(data_in), "\t",
ncol(data_in))
data_in <- tibble::rownames_to_column(as.data.frame(data_in),
"Name") %>% dplyr::mutate(Description = Name) %>%
dplyr::select(Name, Description, dplyr::everything())
write(header_gct, file = file_address, append = FALSE)
readr::write_tsv(data_in, file_address, append = TRUE,
col_names = TRUE)
}
#' computeGisticURL
#'
#' @param url
#' @param acronym
#'
#' @return result
#' @keywords internal
#' @examples
computeGisticURL <- function(url = NULL, acronym = "CHOL") {
if (!is.null(url))
return(url)
sprintf("http://gdac.broadinstitute.org/runs/analyses__2016_01_28/data/%s/20160128/gdac.broadinstitute.org_%s-TP.CopyNumber_Gistic2.Level_4.2016012800.0.0.tar.gz",
acronym, acronym)
}
#' cacheResource
#'
#' @param cache
#' @param resource
#'
#' @return result
#' @keywords internal
#' @examples
cacheResource <- function(cache = BiocFileCache::BiocFileCache(TargetDirectory),
resource) {
chk = bfcquery(cache, resource)
if (nrow(chk) == 0) {
message("downloading ", resource)
BiocFileCache::bfcadd(cache, resource)
return(bfcquery(cache, resource))
}
chk
}
|
library(rlist)
M <- m <- 20
N <- 19
phi <- function(t1.index,t2.index,m){
if(t1.index-t2.index==1){
garp <- (2*((t1.index)/m)^2)-0.5
}
if(t1.index-t2.index!=1){
garp <- 0
}
garp
}
grid <- expand.grid(t=1:M,s=1:M) %>% subset(.,t>s) %>%
transform(.,l=t-s,
m=(t+s)/2)
# true_phi <- sapply(1:nrow(grid),function(row.i){
# phi(grid$t[row.i],grid$s[row.i],m=m)
# })
# grid <- transform(grid,true_phi=true_phi)
#
#
#
# indices_of_nonzeros <- as.matrix(expand.grid(t=(1:m),s=(1:m)) %>% subset(.,(t-s)==1))
# nonzero_phis <- (2*((2:m)/m)^2)-0.5
#
# T_mat <- diag(rep(1,m))
# phis <- as.vector(rep(0,sum(lower.tri(T_mat))))
# T_mat[indices_of_nonzeros] <- -nonzero_phis
# phis <- -T_mat[lower.tri(T_mat)]
phi <- function(t1.index,t2.index,m){
if(t1.index>t2.index){
t_ij <- -exp(-2*(t1.index-t2.index)/((m-1)))
}
if(t1.index==t2.index){
t_ij <- 1
}
if(t1.index<t2.index){
t_ij <- 0
}
t_ij
}
full_grid <- expand.grid(t=1:m,s=1:m) %>% orderBy(~t+s,.)
true_T <- sapply(1:nrow(full_grid),function(row.i){
phi(full_grid$t[row.i],full_grid$s[row.i],m=m)
}) %>%unlist
T_mat <- matrix(data=true_T,nrow=m,ncol=m,byrow=TRUE)
T_mat
y <- t(solve(T_mat)%*%matrix(data=rnorm(N*m,mean=0,sd=0.3),
nrow=m,
ncol=N))
true_Sigma <- solve(T_mat)%*%t(solve(T_mat))
true_Omega <- t(T_mat)%*%T_mat
y_vec <- as.vector(t(y[,-1]))
X <- matrix(data=0,nrow=N*(M-1),ncol=choose(M,2))
no.skip <- 0
for (t in 2:M){
X[((0:(N-1))*(M-1)) + t-1,(no.skip+1):(no.skip+t-1)] <- y[,1:(t-1)]
no.skip <- no.skip + t - 1
}
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
## define basis functions, representers
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
k0 <- function(x){
return(rep(1,length(x)))
}
k1 <- function(x){
return(x-0.5)
}
k2 <- function(x){
return( 0.5*((k1(x))^2 - (1/(12))) )
}
k4 <- function(x){
return( (1/24)*( (k1(x))^4 -((k1(x))^2/2) + (7/240)) )
}
R1 <- function(l1,l2,m){
if(m==1){
representer <- k1(l1)*k1(l2) + k2(l1)*k2(l2) - k4( abs(l1-l2) )
}
if(m==2){
representer <- k2(l1)*k2(l2) - k4( abs(l1-l2) )
}
representer
}
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
## construct B, K
#-----------------------------------------------------------------------------------------
R1_l <- function(l1,l2){R1(l1,l2,m=1)}
R1_L <- outer(grid$l/max(grid$l), grid$l/max(grid$l), "R1_l")
R1_m <- function(m1,m2){R1(m1,m2,m=1)}
R1_M <- outer(grid$m/max(grid$m), grid$m/max(grid$m), "R1_m")
R1_LM <- R1_L*R1_M + outer(k1(grid$l/max(grid$l)),k1(grid$l/max(grid$l)))*R1_M
K <- R1_L + R1_M + R1_LM
# B <- matrix(data=c(rep(1,nrow(grid)),k1(grid$l/max(grid$l))*k1(grid$m/max(grid$m))),
# nrow=nrow(grid),ncol=2,byrow=FALSE)
B <- matrix(data=c(rep(1,nrow(grid))),ncol=1,nrow=nrow(grid),byrow=FALSE)
QR_B <- qr(B,complete=TRUE)
Q_B <- qr.Q(QR_B,complete=TRUE)
Q2_B <- Q_B[,(ncol(B)+1):ncol(Q_B)]
Q1_B <- Q_B[,1:ncol(B)]
R_B.big <- qr.R(QR_B,complete=TRUE)
R_B <- R_B.big[1:ncol(B),]
R_Binv <- solve(R_B)
Dinv <- diag(rep(1,length(y_vec)))
QR_X <- qr(X,complete=TRUE)
Q_X <- qr.Q(QR_X,complete=TRUE)
Q2_X <- Q_X[,(ncol(B)+1):ncol(Q_X)]
Q1_X <- Q_X[,1:ncol(X)]
R_X.big <- qr.R(QR_X,complete=TRUE)
R_X <- R_X.big[1:ncol(X),]
R_Xinv <- solve(R_X)
#-----------------------------------------------------------------------------------------
## Build solutions
#-----------------------------------------------------------------------------------------
lambdas <- as.list(exp(seq(-1,5,length.out=100)))
P <- solve(t(X)%*%Dinv%*%X)
Ms <- lapply(lambdas,function(l){
M <- solve( t(Q2_B)%*%(K + l*P)%*%Q2_B )
M
})
c <- lapply(Ms,function(mat){
Q2_B%*%mat%*%t(Q2_B)%*% P %*%t(X)%*%Dinv%*%y_vec
})
d <- lapply(list.zip(lam=lambdas,c=c),function(l){
d <- R_Binv%*%t(Q1_B)%*%( P%*%t(X)%*%Dinv%*%y_vec - ( K + l$lam*P )%*%l$c )
})
cholesky <- lapply(list.zip(c=c,d=d),function(l){
Phi <- B%*%l$d + K%*%l$c
T_hat <- diag(rep(1,m))
T_hat[lower.tri(T_hat)] <- -Phi
list(phi=Phi,T_mat=T_hat,omega=t(T_hat)%*%T_hat)
})
entropy_loss <- function(trueSigma, omegaHat){
I_hat <- trueSigma%*%omegaHat
sum(diag(I_hat)) -log(det(I_hat)) - ncol(omegaHat)
}
lapply(cholesky,function(l){
entropy_loss(true_Sigma,l$omega)
}) %>% unlist %>% plot(x=log(unlist(lambdas)),
y=.,
type="l",
ylab=expression(Delta[1]),
xlab=expression(log(lambda)))
quadratic_loss <- function(trueSigma, omegaHat){
I_hat <- trueSigma%*%omegaHat
sum( diag(I_hat-diag(1,ncol(omegaHat)))^2 )
}
lapply(cholesky,function(l){
quadratic_loss(true_Sigma,l$omega)
}) %>% unlist %>% plot(x=log(unlist(lambdas)),
y=.,
type="l",
ylab=expression(Delta[2]),
xlab=expression(log(lambda)))
Rl_gg <- sapply(grid$l/max(grid$l),function(grid_l) {sapply(seq(0,1,length.out=200),function(pred_l){R1_l(pred_l,grid_l)})})
Bl_gg <- matrix(data=c( rep(1,200), k1(seq(0,1,length.out=200))),nrow=200,ncol=2,byrow=FALSE)
l_smooth <- list.zip(c=c,d=d) %>% lapply(.,function(l){
Rl_gg%*%l$c #+ Bl_gg%*%l$d
})
Rm_gg <- sapply(grid$m/max(grid$m),function(grid_m) {sapply(seq(0,1,length.out=200),function(pred_m){R1_m(pred_m,grid_m)})})
Bm_gg <- matrix(data=c( rep(1,200), k1(seq(0,1,length.out=200))),nrow=200,ncol=2,byrow=FALSE)
m_smooth <- list.zip(c=c,d=d) %>% lapply(.,function(l){
Rm_gg%*%l$c #+Bm_gg%*%l$d
})
l_smooth <- list.cbind(l_smooth)
m_smooth <- list.cbind(m_smooth)
matplot(seq(0,1,length.out=200),l_smooth,
col=terrain.colors(100,alpha=0.7),type="l",
xlab="l",
ylab= expression(phi[l]))
matplot(seq(0,1,length.out=200),m_smooth,
col=terrain.colors(100,alpha=0.7),type="l",
xlab="m",
ylab= expression(phi[m]))
gg <- expand.grid(l=seq(0,1,length.out=200),
m=seq(0,1,length.out=200))
Rl_gg <- sapply(grid$l/max(grid$l),
function(grid_l){
sapply(gg$l,
function(pred_l){
R1_l(pred_l,grid_l)
})})
Rm_gg <- sapply(grid$m/max(grid$m),
function(grid_m){
sapply(gg$m,
function(pred_m){
R1_m(pred_m,grid_m)
})})
lm_smooth <- lapply(c,function(coef){
as.vector((Rl_gg*Rm_gg)%*%coef)
})
jet.colors <- colorRampPalette( c("deepskyblue2","green") )
nbcol <- 100
color <- jet.colors(nbcol)
lm_smooth[[lapply(cholesky,function(l){quadratic_loss(true_Sigma,l$omega)}) %>% unlist %>%which.min]] %>%
data.frame(gg,phi_lm=.) %>% wireframe(phi_lm~l+m,
data=.,
screen=list(z=20,x=-75),
light.source = c(5,20,10),
col="grey",
scales = list(arrows = FALSE),
drape=FALSE,
cex=0.15,
colorkey=FALSE,
par.settings = list(axis.line = list(col = "transparent")))
lm_smooth <- list.cbind(lm_smooth) %>% as.vector
library(ggplot2)
library(ggthemes)
best_phi_lm <- lm_smooth[[lapply(cholesky,function(l){quadratic_loss(true_Sigma,l$omega)}) %>% unlist %>%which.min]]
best_phi_m <- m_smooth[[lapply(cholesky,function(l){quadratic_loss(true_Sigma,l$omega)}) %>% unlist %>%which.min]]
true_phi_m <- data.frame(m=seq(0,1,length.out=200),
phi=2*(seq(0,1,length.out=200)^2 + seq(0,1,length.out=200) ) )
library(doBy)
data.frame(lambda=expand.grid(1:40000,lam=unlist(lambdas))$lam,
phi=lm_smooth,
l=rep(gg$l,length(lambdas)),
m=rep(gg$m,length(lambdas))) %>%
ggplot(.,aes(x=m,y=phi,group=lambda)) + geom_line(aes(colour=lambda)) +
scale_color_continuous_tableau(palette = "Green") +
theme_minimal()
best_phi_lm <- lm_smooth[[lapply(cholesky,function(l){quadratic_loss(true_Sigma,l$omega)}) %>% unlist %>%which.min]]
best_phi_m <- m_smooth[[lapply(cholesky,function(l){quadratic_loss(true_Sigma,l$omega)}) %>% unlist %>%which.min]]
true_phi_m <- data.frame(m=seq(0,1,length.out=200),
phi=2*(seq(0,1,length.out=200)^2 + seq(0,1,length.out=200) ) )
data.frame(phi=best_phi_m+best_phi_lm,gg) %>%
subset(.,l%in%seq(0,1,length.out=200)[c(1:5,seq(25,200,by=25))]) %>%
ggplot(.,aes(x=m,y=phi,group=l)) + geom_line(aes(colour=l)) +
scale_color_continuous_tableau(palette = "Green") +
theme_minimal() +
geom_line(data=true_phi_m,
aes(x=m,y=phi),
inherit.aes = FALSE,
colour="red")
|
/code/simulations/smoothing_spline_cholesky.R
|
no_license
|
taylerablake/Dissertation
|
R
| false
| false
| 9,859
|
r
|
library(rlist)
M <- m <- 20
N <- 19
phi <- function(t1.index,t2.index,m){
if(t1.index-t2.index==1){
garp <- (2*((t1.index)/m)^2)-0.5
}
if(t1.index-t2.index!=1){
garp <- 0
}
garp
}
grid <- expand.grid(t=1:M,s=1:M) %>% subset(.,t>s) %>%
transform(.,l=t-s,
m=(t+s)/2)
# true_phi <- sapply(1:nrow(grid),function(row.i){
# phi(grid$t[row.i],grid$s[row.i],m=m)
# })
# grid <- transform(grid,true_phi=true_phi)
#
#
#
# indices_of_nonzeros <- as.matrix(expand.grid(t=(1:m),s=(1:m)) %>% subset(.,(t-s)==1))
# nonzero_phis <- (2*((2:m)/m)^2)-0.5
#
# T_mat <- diag(rep(1,m))
# phis <- as.vector(rep(0,sum(lower.tri(T_mat))))
# T_mat[indices_of_nonzeros] <- -nonzero_phis
# phis <- -T_mat[lower.tri(T_mat)]
phi <- function(t1.index,t2.index,m){
if(t1.index>t2.index){
t_ij <- -exp(-2*(t1.index-t2.index)/((m-1)))
}
if(t1.index==t2.index){
t_ij <- 1
}
if(t1.index<t2.index){
t_ij <- 0
}
t_ij
}
full_grid <- expand.grid(t=1:m,s=1:m) %>% orderBy(~t+s,.)
true_T <- sapply(1:nrow(full_grid),function(row.i){
phi(full_grid$t[row.i],full_grid$s[row.i],m=m)
}) %>%unlist
T_mat <- matrix(data=true_T,nrow=m,ncol=m,byrow=TRUE)
T_mat
y <- t(solve(T_mat)%*%matrix(data=rnorm(N*m,mean=0,sd=0.3),
nrow=m,
ncol=N))
true_Sigma <- solve(T_mat)%*%t(solve(T_mat))
true_Omega <- t(T_mat)%*%T_mat
y_vec <- as.vector(t(y[,-1]))
X <- matrix(data=0,nrow=N*(M-1),ncol=choose(M,2))
no.skip <- 0
for (t in 2:M){
X[((0:(N-1))*(M-1)) + t-1,(no.skip+1):(no.skip+t-1)] <- y[,1:(t-1)]
no.skip <- no.skip + t - 1
}
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
## define basis functions, representers
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
k0 <- function(x){
return(rep(1,length(x)))
}
k1 <- function(x){
return(x-0.5)
}
k2 <- function(x){
return( 0.5*((k1(x))^2 - (1/(12))) )
}
k4 <- function(x){
return( (1/24)*( (k1(x))^4 -((k1(x))^2/2) + (7/240)) )
}
R1 <- function(l1,l2,m){
if(m==1){
representer <- k1(l1)*k1(l2) + k2(l1)*k2(l2) - k4( abs(l1-l2) )
}
if(m==2){
representer <- k2(l1)*k2(l2) - k4( abs(l1-l2) )
}
representer
}
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
## construct B, K
#-----------------------------------------------------------------------------------------
R1_l <- function(l1,l2){R1(l1,l2,m=1)}
R1_L <- outer(grid$l/max(grid$l), grid$l/max(grid$l), "R1_l")
R1_m <- function(m1,m2){R1(m1,m2,m=1)}
R1_M <- outer(grid$m/max(grid$m), grid$m/max(grid$m), "R1_m")
R1_LM <- R1_L*R1_M + outer(k1(grid$l/max(grid$l)),k1(grid$l/max(grid$l)))*R1_M
K <- R1_L + R1_M + R1_LM
# B <- matrix(data=c(rep(1,nrow(grid)),k1(grid$l/max(grid$l))*k1(grid$m/max(grid$m))),
# nrow=nrow(grid),ncol=2,byrow=FALSE)
B <- matrix(data=c(rep(1,nrow(grid))),ncol=1,nrow=nrow(grid),byrow=FALSE)
QR_B <- qr(B,complete=TRUE)
Q_B <- qr.Q(QR_B,complete=TRUE)
Q2_B <- Q_B[,(ncol(B)+1):ncol(Q_B)]
Q1_B <- Q_B[,1:ncol(B)]
R_B.big <- qr.R(QR_B,complete=TRUE)
R_B <- R_B.big[1:ncol(B),]
R_Binv <- solve(R_B)
Dinv <- diag(rep(1,length(y_vec)))
QR_X <- qr(X,complete=TRUE)
Q_X <- qr.Q(QR_X,complete=TRUE)
Q2_X <- Q_X[,(ncol(B)+1):ncol(Q_X)]
Q1_X <- Q_X[,1:ncol(X)]
R_X.big <- qr.R(QR_X,complete=TRUE)
R_X <- R_X.big[1:ncol(X),]
R_Xinv <- solve(R_X)
#-----------------------------------------------------------------------------------------
## Build solutions
#-----------------------------------------------------------------------------------------
lambdas <- as.list(exp(seq(-1,5,length.out=100)))
P <- solve(t(X)%*%Dinv%*%X)
Ms <- lapply(lambdas,function(l){
M <- solve( t(Q2_B)%*%(K + l*P)%*%Q2_B )
M
})
c <- lapply(Ms,function(mat){
Q2_B%*%mat%*%t(Q2_B)%*% P %*%t(X)%*%Dinv%*%y_vec
})
d <- lapply(list.zip(lam=lambdas,c=c),function(l){
d <- R_Binv%*%t(Q1_B)%*%( P%*%t(X)%*%Dinv%*%y_vec - ( K + l$lam*P )%*%l$c )
})
cholesky <- lapply(list.zip(c=c,d=d),function(l){
Phi <- B%*%l$d + K%*%l$c
T_hat <- diag(rep(1,m))
T_hat[lower.tri(T_hat)] <- -Phi
list(phi=Phi,T_mat=T_hat,omega=t(T_hat)%*%T_hat)
})
entropy_loss <- function(trueSigma, omegaHat){
I_hat <- trueSigma%*%omegaHat
sum(diag(I_hat)) -log(det(I_hat)) - ncol(omegaHat)
}
lapply(cholesky,function(l){
entropy_loss(true_Sigma,l$omega)
}) %>% unlist %>% plot(x=log(unlist(lambdas)),
y=.,
type="l",
ylab=expression(Delta[1]),
xlab=expression(log(lambda)))
quadratic_loss <- function(trueSigma, omegaHat){
I_hat <- trueSigma%*%omegaHat
sum( diag(I_hat-diag(1,ncol(omegaHat)))^2 )
}
lapply(cholesky,function(l){
quadratic_loss(true_Sigma,l$omega)
}) %>% unlist %>% plot(x=log(unlist(lambdas)),
y=.,
type="l",
ylab=expression(Delta[2]),
xlab=expression(log(lambda)))
Rl_gg <- sapply(grid$l/max(grid$l),function(grid_l) {sapply(seq(0,1,length.out=200),function(pred_l){R1_l(pred_l,grid_l)})})
Bl_gg <- matrix(data=c( rep(1,200), k1(seq(0,1,length.out=200))),nrow=200,ncol=2,byrow=FALSE)
l_smooth <- list.zip(c=c,d=d) %>% lapply(.,function(l){
Rl_gg%*%l$c #+ Bl_gg%*%l$d
})
Rm_gg <- sapply(grid$m/max(grid$m),function(grid_m) {sapply(seq(0,1,length.out=200),function(pred_m){R1_m(pred_m,grid_m)})})
Bm_gg <- matrix(data=c( rep(1,200), k1(seq(0,1,length.out=200))),nrow=200,ncol=2,byrow=FALSE)
m_smooth <- list.zip(c=c,d=d) %>% lapply(.,function(l){
Rm_gg%*%l$c #+Bm_gg%*%l$d
})
l_smooth <- list.cbind(l_smooth)
m_smooth <- list.cbind(m_smooth)
matplot(seq(0,1,length.out=200),l_smooth,
col=terrain.colors(100,alpha=0.7),type="l",
xlab="l",
ylab= expression(phi[l]))
matplot(seq(0,1,length.out=200),m_smooth,
col=terrain.colors(100,alpha=0.7),type="l",
xlab="m",
ylab= expression(phi[m]))
gg <- expand.grid(l=seq(0,1,length.out=200),
m=seq(0,1,length.out=200))
Rl_gg <- sapply(grid$l/max(grid$l),
function(grid_l){
sapply(gg$l,
function(pred_l){
R1_l(pred_l,grid_l)
})})
Rm_gg <- sapply(grid$m/max(grid$m),
function(grid_m){
sapply(gg$m,
function(pred_m){
R1_m(pred_m,grid_m)
})})
lm_smooth <- lapply(c,function(coef){
as.vector((Rl_gg*Rm_gg)%*%coef)
})
jet.colors <- colorRampPalette( c("deepskyblue2","green") )
nbcol <- 100
color <- jet.colors(nbcol)
lm_smooth[[lapply(cholesky,function(l){quadratic_loss(true_Sigma,l$omega)}) %>% unlist %>%which.min]] %>%
data.frame(gg,phi_lm=.) %>% wireframe(phi_lm~l+m,
data=.,
screen=list(z=20,x=-75),
light.source = c(5,20,10),
col="grey",
scales = list(arrows = FALSE),
drape=FALSE,
cex=0.15,
colorkey=FALSE,
par.settings = list(axis.line = list(col = "transparent")))
lm_smooth <- list.cbind(lm_smooth) %>% as.vector
library(ggplot2)
library(ggthemes)
best_phi_lm <- lm_smooth[[lapply(cholesky,function(l){quadratic_loss(true_Sigma,l$omega)}) %>% unlist %>%which.min]]
best_phi_m <- m_smooth[[lapply(cholesky,function(l){quadratic_loss(true_Sigma,l$omega)}) %>% unlist %>%which.min]]
true_phi_m <- data.frame(m=seq(0,1,length.out=200),
phi=2*(seq(0,1,length.out=200)^2 + seq(0,1,length.out=200) ) )
library(doBy)
data.frame(lambda=expand.grid(1:40000,lam=unlist(lambdas))$lam,
phi=lm_smooth,
l=rep(gg$l,length(lambdas)),
m=rep(gg$m,length(lambdas))) %>%
ggplot(.,aes(x=m,y=phi,group=lambda)) + geom_line(aes(colour=lambda)) +
scale_color_continuous_tableau(palette = "Green") +
theme_minimal()
best_phi_lm <- lm_smooth[[lapply(cholesky,function(l){quadratic_loss(true_Sigma,l$omega)}) %>% unlist %>%which.min]]
best_phi_m <- m_smooth[[lapply(cholesky,function(l){quadratic_loss(true_Sigma,l$omega)}) %>% unlist %>%which.min]]
true_phi_m <- data.frame(m=seq(0,1,length.out=200),
phi=2*(seq(0,1,length.out=200)^2 + seq(0,1,length.out=200) ) )
data.frame(phi=best_phi_m+best_phi_lm,gg) %>%
subset(.,l%in%seq(0,1,length.out=200)[c(1:5,seq(25,200,by=25))]) %>%
ggplot(.,aes(x=m,y=phi,group=l)) + geom_line(aes(colour=l)) +
scale_color_continuous_tableau(palette = "Green") +
theme_minimal() +
geom_line(data=true_phi_m,
aes(x=m,y=phi),
inherit.aes = FALSE,
colour="red")
|
## Trace of a matrix
matrix.trace = function(A){
r = dim(A)[1]
trace = 0
for(i in 1:r)
{
trace <- trace + A[i,i]
}
return(trace)
}
#' Batched FSM for sequential experiments
#'
#' @description
#' Extension of the FSM to cases where units arrive sequentially in batches.
#' @param data_frame Data frame containing a column of unit indices (optional) and covariates (or transformations thereof).
#' @param data_frame_past A data frame of units already allocated to treatment groups.
#' Data frame contains a column of unit indices (optional), columns of covariates (or transformations thereof),
#' and a column for treatment indicator.
#' @param t_ind column name containing the treatment indicator in \code{data_frame_past}.
#' @param SOM Selection Order Matrix.
#' @param s_function Specifies a selection function, a string among \code{'constant'}, \code{'Dopt'},
#' \code{'Aopt'}, \code{'max pc'}, \code{'min pc'}, \code{'Dopt pc'}, \code{'max average'}, \code{'min average'},
#' \code{'Dopt average'}. \code{'constant'} selection function puts a constant value on every unselected unit.
#' \code{'Dopt'} use the D-optimality criteria based on the full set of covariates to select units.
#' \code{'Aopt'} uses the A-optimality criteria. \code{'max pc'} (respectively, \code{'min pc'}) selects that
#' unit that has the maximum (respectively, minimum) value of the first principal component.
#' \code{'Dopt pc'} uses the D-optimality criteria on the first principal component, \code{'max average'}
#' (respectively, \code{'min average'}) selects that unit that has the maximum (respectively, minimum)
#' value of the simple average of the covariates. \code{'Dopt average'} uses the D-optimality criteria on the
#' simple average of the covariates.
#' @param Q_initial A (optional) non-singular matrix (called 'initial matrix') that is added the \eqn{(X^T X)}
#' matrix of the choosing treatment group at any stage, when the \eqn{(X^T X)} matrix of that treatment group
#' at that stage is non-invertible. If \code{FALSE}, the \eqn{(X^T X)} matrix for the full set of observations is used
#' as the non-singular matrix. Applicable if \code{s_function = 'Dopt'} or \code{'Aopt'}.
#' @param eps Proportionality constant for \code{Q_initial}, the default value is 0.001.
#' @param ties Specifies how to deal with ties in the values of the selection function. If \code{ties = 'random'},
#' a unit is selected randomly from the set of candidate units. If \code{ties = 'smallest'}, the unit
#' that appears earlier in the data frame, i.e. the unit with the smallest index gets selected.
#' @param intercept if \code{TRUE}, the design matrix of each treatment group includes a column of intercepts.
#' @param index_col_past \code{TRUE} if column of unit indices is present in \code{data_frame_past}.
#' @param standardize if \code{TRUE}, the columns of the \eqn{X} matrix other than the column for the intercept (if any),
#' are standardized.
#' @param units_print if \code{TRUE}, the function automatically prints the candidate units at each step of selection.
#' @param index_col if \code{TRUE}, data_frame contains a column of unit indices.
#' @param Pol_mat Policy matrix. Applicable only when \code{s_function = 'Aopt'}.
#' @param w_pol A vector of policy weights. Applicable only when \code{s_function = 'Aopt'}.
#' @export
#' @return A list containing the following items.
#'
#' \code{data_frame_allocated}: The original data frame augmented with the column of the treatment indicator.
#'
#' \code{som_appended}: The SOM with augmented columns for the indices and covariate values for units selected.
#'
#' \code{som_split}: som_appended, split by the levels of the treatment.
#'
#' \code{data_frame_allocated_augmented}: data frame combining \code{data_frame_allocated} and \code{data_frame_past}.
#' @author Ambarish Chattopadhyay, Carl N. Morris and Jose R. Zubizarreta
#' @references
#' Chattopadhyay, A., Morris, C. N., and Zubizarreta, J. R. (2020), ``Randomized and Balanced Allocation of Units into Treatment Groups Using the Finite Selection Model for \code{R}'.
#' @examples
#' # Consider N=18, number of treatments = 2, n1 = n2 = 9, batch sizes = 6,6,6.
#' # Get data frame for the first batch.
#' df_sample_1 = data.frame(index = 1:6, age = c(20,30,40,40,50,60))
#' # Obtain SOM for all the 12 units.
#' som_gen = som(data_frame = NULL, n_treat = 2, treat_sizes = c(9,9),
#' include_discard = FALSE, method = 'SCOMARS', marginal_treat = rep((9/18),18), control = FALSE)
#' # Assign the first batch.
#' f1 = fsm(data_frame = df_sample_1, SOM = som_gen[1:6,], s_function = 'Dopt',
#' eps = 0.0001, ties = 'random', intercept = TRUE, standardize = TRUE, units_print = TRUE)
#' f1_app = f1$data_frame_allocated
#' # Get data frame for the second batch.
#' df_sample_2 = data.frame(index = 7:12, age = c(20,30,40,40,50,60))
#' # Assign the second batch.
#' f2 = fsm_batch(data_frame = df_sample_2, SOM = som_gen[7:12,], s_function = 'Dopt',
#' eps = 0.0001, ties = 'random', intercept = TRUE, standardize = TRUE, units_print = TRUE,
#' data_frame_past = f1_app, t_ind = 'Treat', index_col_past = TRUE)
#' f2_app = f2$data_frame_allocated_augmented
#' # Get data frame for the third batch.
#' df_sample_3 = data.frame(index = 13:18, age = c(20,30,40,40,50,60))
#' # Assign the third batch.
#' f3 = fsm_batch(data_frame = df_sample_3, SOM = som_gen[13:18,], s_function = 'Dopt',
#' eps = 0.0001, ties = 'random', intercept = TRUE, standardize = TRUE, units_print = TRUE,
#' data_frame_past = f2_app, t_ind = 'Treat', index_col_past = TRUE)
#' f3_app = f3$data_frame_allocated_augmented
fsm_batch = function(data_frame, data_frame_past, t_ind, SOM, s_function = 'Dopt',
Q_initial = NULL, eps = 0.001, ties = 'random', intercept = TRUE,
index_col_past = TRUE, standardize = TRUE, units_print = TRUE,
index_col = TRUE, Pol_mat = NULL, w_pol = NULL)
{
# names of all possible selection functions
sf.names = c('constant', 'Dopt', 'Aopt', 'negative Dopt',
'max pc', 'min pc', 'Dopt pc', 'max average', 'min average', 'Dopt average',
'marginal var sum')
if(ncol(SOM)>1)
{
som_order = SOM[['Treat']] # treatments should be labelled 1,2,...,g or 0,1,...,g-1
}
if(ncol(SOM)==1)
{
som_order = SOM[,1] # treatments should be labelled 1,2,...,g or 0,1,...,g-1
}
if(index_col == TRUE)
{
unit.identity = data_frame[['Index']]
}
unit.index = 1:nrow(data_frame)
g = length(table(som_order)) # no. of treatments
n = as.vector(table(som_order)) # vector of treatment group sizes
N = sum(n) # total no. of units in the sample
## build-up phase
units.selected = rep(0,N)
if(index_col == TRUE)
{
X_cov = as.matrix(data_frame[,-1]) # matrix of all covariates
}
if(index_col == FALSE)
{
X_cov = as.matrix(data_frame) # matrix of all covariates
}
# if a column contains the same values, remove it to avoid singularity
k = ncol(X_cov) # no. of covariates'
# total size of past units
N_past = nrow(data_frame_past)
# treatment indicator for past units
Z_past = data_frame_past[,t_ind]
if(index_col_past == TRUE)
{
X_cov_past = as.matrix(data_frame_past[,-c(1, which(colnames(data_frame_past) == t_ind))]) # matrix of all covariates
}
if(index_col == FALSE)
{
X_cov_past = as.matrix(data_frame_past[,-which(colnames(data_frame_past) == t_ind)]) # matrix of all covariates
}
if(standardize == TRUE)
{
# standardize the columns of X_cov
for(j in 1:ncol(X_cov))
{
#X_cov[,j] = (X_cov[,j] - mean(X_cov[,j]))/sd(X_cov[,j])
#X_cov_past[,j] = (X_cov_past[,j] - mean(X_cov_past[,j]))/sd(X_cov_past[,j])
# can also standardize based on the augmented matrix
loc = mean(c(X_cov_past[,j], X_cov[,j]))
scale = sd(c(X_cov_past[,j], X_cov[,j]))
X_cov[,j] = (X_cov[,j] - loc)/scale
X_cov_past[,j] = (X_cov_past[,j] - loc)/scale
}
}
if(intercept == TRUE)
{
X_N = as.matrix(cbind(rep(1,N), X_cov)) # n x (k+1) design matrix for the new units
colnames(X_N) = c('intercept', sprintf('x%d', 1:k))
X_N_past = as.matrix(cbind(rep(1,N_past), X_cov_past)) # n x (k+1) design matrix for the past units
colnames(X_N_past) = c('intercept', sprintf('x%d', 1:k))
}
if(intercept == FALSE)
{
X_N = as.matrix(X_cov) # n x k design matrix for the new units
colnames(X_N) = sprintf('x%d', 1:k)
X_N_past = as.matrix(X_cov_past) # n x k design matrix for the past units
colnames(X_N) = sprintf('x%d', 1:k)
}
## set initital nonsingular matrix
if(is.null(Q_initial) == TRUE)
{
X_N_combined = rbind(X_N_past, X_N)
Q0 = t(X_N_combined)%*%X_N_combined #invertible(?) matrix
} else{
Q0 = Q_initial
}
# Treatments take turn in selecting the units
Z = rep(-1,N) # treatment indicator initialized at all -1
crit_print = matrix(rep(-1,N*N),nrow = N)
for(i in 1:N)
{
t.index = som_order[i] # treatment group that will pick a unit at this stage
units.current = unit.index[Z == t.index] # new units already in that treatment group
X_N_group_past = X_N_past[Z_past == t.index, ] # design matrix for the past units in the choosing treatment group
# augment the past design matrix with the new design matrix
if(length(units.current) == 0)
{
X_n = X_N_group_past
}
if(length(units.current)>0)
{
X_n = rbind(X_N_group_past, X_N[units.current,])
}
# reciprocal 'Condition number'
if((kappa((t(X_n)%*% X_n)/i))^{-1} < 1e-6)
{
#print(t(X_n)%*% X_n + eps * Q0)
Sn = solve((t(X_n)%*% X_n)/nrow(X_n) + eps * (Q0/(N_past + N)))
} else{
#print(t(X_n)%*% X_n)
#Sn = solve((t(X_n)%*% X_n)/i)
Sn = solve((t(X_n)%*% X_n))
}
units.search = unit.index[Z == -1] # units yet to be allocated
if(units_print == TRUE)
{
print(units.search)
}
# evaluate the criterion function on each of these units
crit.func = rep(0, length(units.search))
for(u in 1:length(units.search))
{
if(s_function == 'constant')
{
crit.func[u] = 10 #a constant
}
if(s_function == 'Dopt')
{
crit.func[u] = as.vector(t(X_N[units.search[u],]) %*% Sn %*% X_N[units.search[u],] )
}
if(s_function == 'negative Dopt')
{
crit.func[u] = (-1) * as.vector(t(X_N[units.search[u],]) %*% Sn %*% X_N[units.search[u],] )
}
if(s_function == 'Aopt')
{
# Policy matrix - Pol_mat, Matrix of weights = w_pol
# I am assuming cost to be constant
Pol_mat = as.matrix(Pol_mat)
W = diag(w_pol)
T.mat = t(Pol_mat) %*% W %*% Pol_mat
crit.func[u] = as.vector(t(X_N[units.search[u],]) %*% (Sn %*% T.mat %*% Sn) %*%
X_N[units.search[u],])/(1 + as.vector(t(X_N[units.search[u],]) %*%
Sn %*% X_N[units.search[u],]) )
}
if(s_function %in% c('max pc', 'min pc', 'Dopt pc'))
{
# Extract the 1st principal component from the full set of covariates
pca = prcomp(X_cov)
x.pc = pca$x[,1] #1st principal component
if(s_function == 'max pc')
{
# include the unit which maximizes the 1st principal component
crit.func[u] = x.pc[units.search[u]]
}
if(s_function == 'min pc')
{
# include the unit which minimizes the 1st principal component
crit.func[u] = -x.pc[units.search[u]]
}
if(s_function == 'Dopt pc')
{
# include the unit which maximizes the dispersion of the 1st principal component
# within the chooser group
x.pc.append = x.pc[c(units.current,units.search[u])]
crit.func[u] = sum((x.pc.append - mean(x.pc.append))^2)
}
}
if(s_function == 'max average') # takes simple average of all covariates
{
# first check if there are more than one covariates
if(k==1)
{
crit.func[u] = as.vector(X_cov)[units.search[u]]
}else
{
crit.func[u] = rowMeans(X_cov)[units.search[u]]
}
}
if(s_function == 'min average')
{
# first check if there are more than one covariates
if(k==1)
{
crit.func[u] = -as.vector(X_cov)[units.search[u]]
}
else
{
crit.func[u] = -rowMeans(X_cov)[units.search[u]]
}
}
if(s_function == 'Dopt average')
{
# first check if there are more than one covariate
if(k==1)
{
x.avg.append = as.vector(X_cov)[c(units.current,units.search[u])]
crit.func[u] = sum((x.avg.append - mean(x.avg.append))^2)
}
else
{
x.avg = rowMeans(X_cov)
x.avg.append = x.avg[c(units.current,units.search[u])]
crit.func[u] = sum((x.avg.append - mean(x.avg.append))^2)
}
}
if(s_function == 'marginal var sum')
{
# first check if there are more than one covariate
if(k==1)
{
x.append = as.vector(X_cov)[c(units.current,units.search[u])]
crit.func[u] = sum((x.append - mean(x.append))^2)
}
else
{
X_treat = X_cov[c(units.current,units.search[u]),]
if(is.null(nrow(X_treat)) == TRUE)
{
crit.func[u] = 0
}
if(is.null(nrow(X_treat)) == FALSE)
{
crit.func[u] = matrix.trace(cov(X_treat))
}
}
}
if((s_function %in% sf.names) == FALSE)
{
stop('Invalid selection function')
}
}
crit.func = round(crit.func,7) # rounding off the values (?)
crit_print[i,units.search] = crit.func
# all candidate units
units.opt = units.search[which(crit.func == max(crit.func))]
# resolve ties
if(ties == 'random')
{
unit.opt = units.opt[sample(x = 1:length(units.opt),size = 1)]
Z[unit.opt] = t.index
}
if(ties == 'smallest')
{
unit.opt = units.opt[1]
Z[unit.opt] = t.index
}
# unit number that is selected at this stage
units.selected[i] = unit.opt
}
crit_print[crit_print == -1] = NA
data_frame_allocated = cbind(data_frame,Z)
colnames(data_frame_allocated)[ncol(data_frame_allocated)] = 'Treat'
#stage = 1:N
som_appended = cbind(as.vector(som_order), data_frame[units.selected,])
#colnames(som_appended)[1] = 'Sel order'
colnames(som_appended)[1] = 'Treat'
rownames(som_appended) = 1:N
som_appended = as.data.frame(som_appended)
som_split = split(som_appended,som_appended$Treat)
# augmented data frame after allocation
if(index_col_past == TRUE && index_col == TRUE)
{
data_frame_allocated_augmented = as.data.frame(rbind(data_frame_past, data_frame_allocated))
}
if(index_col_past == TRUE && index_col == FALSE)
{
temp1 = as.data.frame(rbind(data_frame_past[,-1], data_frame_allocated))
data_frame_allocated_augmented = data.frame(Index = 1:(N_past + N), temp1)
}
if(index_col_past == FALSE && index_col == TRUE)
{
temp1 = as.data.frame(rbind(data_frame_past, data_frame_allocated[,-1]))
data_frame_allocated_augmented = data.frame(Index = 1:(N_past + N), temp1)
}
if(index_col_past == FALSE && index_col == FALSE)
{
temp1 = as.data.frame(rbind(data_frame_past, data_frame_allocated))
data_frame_allocated_augmented = data.frame(Index = 1:(N_past + N), temp1)
}
return(list(data_frame_allocated = data_frame_allocated, som_appended = som_appended,
som_split = som_split,
data_frame_allocated_augmented = data_frame_allocated_augmented,
criteria = crit_print))
}
|
/R/fsm_batch_without_strata.R
|
no_license
|
cran/FSM
|
R
| false
| false
| 16,725
|
r
|
## Trace of a matrix
matrix.trace = function(A){
r = dim(A)[1]
trace = 0
for(i in 1:r)
{
trace <- trace + A[i,i]
}
return(trace)
}
#' Batched FSM for sequential experiments
#'
#' @description
#' Extension of the FSM to cases where units arrive sequentially in batches.
#' @param data_frame Data frame containing a column of unit indices (optional) and covariates (or transformations thereof).
#' @param data_frame_past A data frame of units already allocated to treatment groups.
#' Data frame contains a column of unit indices (optional), columns of covariates (or transformations thereof),
#' and a column for treatment indicator.
#' @param t_ind column name containing the treatment indicator in \code{data_frame_past}.
#' @param SOM Selection Order Matrix.
#' @param s_function Specifies a selection function, a string among \code{'constant'}, \code{'Dopt'},
#' \code{'Aopt'}, \code{'max pc'}, \code{'min pc'}, \code{'Dopt pc'}, \code{'max average'}, \code{'min average'},
#' \code{'Dopt average'}. \code{'constant'} selection function puts a constant value on every unselected unit.
#' \code{'Dopt'} use the D-optimality criteria based on the full set of covariates to select units.
#' \code{'Aopt'} uses the A-optimality criteria. \code{'max pc'} (respectively, \code{'min pc'}) selects that
#' unit that has the maximum (respectively, minimum) value of the first principal component.
#' \code{'Dopt pc'} uses the D-optimality criteria on the first principal component, \code{'max average'}
#' (respectively, \code{'min average'}) selects that unit that has the maximum (respectively, minimum)
#' value of the simple average of the covariates. \code{'Dopt average'} uses the D-optimality criteria on the
#' simple average of the covariates.
#' @param Q_initial A (optional) non-singular matrix (called 'initial matrix') that is added the \eqn{(X^T X)}
#' matrix of the choosing treatment group at any stage, when the \eqn{(X^T X)} matrix of that treatment group
#' at that stage is non-invertible. If \code{FALSE}, the \eqn{(X^T X)} matrix for the full set of observations is used
#' as the non-singular matrix. Applicable if \code{s_function = 'Dopt'} or \code{'Aopt'}.
#' @param eps Proportionality constant for \code{Q_initial}, the default value is 0.001.
#' @param ties Specifies how to deal with ties in the values of the selection function. If \code{ties = 'random'},
#' a unit is selected randomly from the set of candidate units. If \code{ties = 'smallest'}, the unit
#' that appears earlier in the data frame, i.e. the unit with the smallest index gets selected.
#' @param intercept if \code{TRUE}, the design matrix of each treatment group includes a column of intercepts.
#' @param index_col_past \code{TRUE} if column of unit indices is present in \code{data_frame_past}.
#' @param standardize if \code{TRUE}, the columns of the \eqn{X} matrix other than the column for the intercept (if any),
#' are standardized.
#' @param units_print if \code{TRUE}, the function automatically prints the candidate units at each step of selection.
#' @param index_col if \code{TRUE}, data_frame contains a column of unit indices.
#' @param Pol_mat Policy matrix. Applicable only when \code{s_function = 'Aopt'}.
#' @param w_pol A vector of policy weights. Applicable only when \code{s_function = 'Aopt'}.
#' @export
#' @return A list containing the following items.
#'
#' \code{data_frame_allocated}: The original data frame augmented with the column of the treatment indicator.
#'
#' \code{som_appended}: The SOM with augmented columns for the indices and covariate values for units selected.
#'
#' \code{som_split}: som_appended, split by the levels of the treatment.
#'
#' \code{data_frame_allocated_augmented}: data frame combining \code{data_frame_allocated} and \code{data_frame_past}.
#' @author Ambarish Chattopadhyay, Carl N. Morris and Jose R. Zubizarreta
#' @references
#' Chattopadhyay, A., Morris, C. N., and Zubizarreta, J. R. (2020), ``Randomized and Balanced Allocation of Units into Treatment Groups Using the Finite Selection Model for \code{R}'.
#' @examples
#' # Consider N=18, number of treatments = 2, n1 = n2 = 9, batch sizes = 6,6,6.
#' # Get data frame for the first batch.
#' df_sample_1 = data.frame(index = 1:6, age = c(20,30,40,40,50,60))
#' # Obtain SOM for all the 12 units.
#' som_gen = som(data_frame = NULL, n_treat = 2, treat_sizes = c(9,9),
#' include_discard = FALSE, method = 'SCOMARS', marginal_treat = rep((9/18),18), control = FALSE)
#' # Assign the first batch.
#' f1 = fsm(data_frame = df_sample_1, SOM = som_gen[1:6,], s_function = 'Dopt',
#' eps = 0.0001, ties = 'random', intercept = TRUE, standardize = TRUE, units_print = TRUE)
#' f1_app = f1$data_frame_allocated
#' # Get data frame for the second batch.
#' df_sample_2 = data.frame(index = 7:12, age = c(20,30,40,40,50,60))
#' # Assign the second batch.
#' f2 = fsm_batch(data_frame = df_sample_2, SOM = som_gen[7:12,], s_function = 'Dopt',
#' eps = 0.0001, ties = 'random', intercept = TRUE, standardize = TRUE, units_print = TRUE,
#' data_frame_past = f1_app, t_ind = 'Treat', index_col_past = TRUE)
#' f2_app = f2$data_frame_allocated_augmented
#' # Get data frame for the third batch.
#' df_sample_3 = data.frame(index = 13:18, age = c(20,30,40,40,50,60))
#' # Assign the third batch.
#' f3 = fsm_batch(data_frame = df_sample_3, SOM = som_gen[13:18,], s_function = 'Dopt',
#' eps = 0.0001, ties = 'random', intercept = TRUE, standardize = TRUE, units_print = TRUE,
#' data_frame_past = f2_app, t_ind = 'Treat', index_col_past = TRUE)
#' f3_app = f3$data_frame_allocated_augmented
fsm_batch = function(data_frame, data_frame_past, t_ind, SOM, s_function = 'Dopt',
Q_initial = NULL, eps = 0.001, ties = 'random', intercept = TRUE,
index_col_past = TRUE, standardize = TRUE, units_print = TRUE,
index_col = TRUE, Pol_mat = NULL, w_pol = NULL)
{
# names of all possible selection functions
sf.names = c('constant', 'Dopt', 'Aopt', 'negative Dopt',
'max pc', 'min pc', 'Dopt pc', 'max average', 'min average', 'Dopt average',
'marginal var sum')
if(ncol(SOM)>1)
{
som_order = SOM[['Treat']] # treatments should be labelled 1,2,...,g or 0,1,...,g-1
}
if(ncol(SOM)==1)
{
som_order = SOM[,1] # treatments should be labelled 1,2,...,g or 0,1,...,g-1
}
if(index_col == TRUE)
{
unit.identity = data_frame[['Index']]
}
unit.index = 1:nrow(data_frame)
g = length(table(som_order)) # no. of treatments
n = as.vector(table(som_order)) # vector of treatment group sizes
N = sum(n) # total no. of units in the sample
## build-up phase
units.selected = rep(0,N)
if(index_col == TRUE)
{
X_cov = as.matrix(data_frame[,-1]) # matrix of all covariates
}
if(index_col == FALSE)
{
X_cov = as.matrix(data_frame) # matrix of all covariates
}
# if a column contains the same values, remove it to avoid singularity
k = ncol(X_cov) # no. of covariates'
# total size of past units
N_past = nrow(data_frame_past)
# treatment indicator for past units
Z_past = data_frame_past[,t_ind]
if(index_col_past == TRUE)
{
X_cov_past = as.matrix(data_frame_past[,-c(1, which(colnames(data_frame_past) == t_ind))]) # matrix of all covariates
}
if(index_col == FALSE)
{
X_cov_past = as.matrix(data_frame_past[,-which(colnames(data_frame_past) == t_ind)]) # matrix of all covariates
}
if(standardize == TRUE)
{
# standardize the columns of X_cov
for(j in 1:ncol(X_cov))
{
#X_cov[,j] = (X_cov[,j] - mean(X_cov[,j]))/sd(X_cov[,j])
#X_cov_past[,j] = (X_cov_past[,j] - mean(X_cov_past[,j]))/sd(X_cov_past[,j])
# can also standardize based on the augmented matrix
loc = mean(c(X_cov_past[,j], X_cov[,j]))
scale = sd(c(X_cov_past[,j], X_cov[,j]))
X_cov[,j] = (X_cov[,j] - loc)/scale
X_cov_past[,j] = (X_cov_past[,j] - loc)/scale
}
}
if(intercept == TRUE)
{
X_N = as.matrix(cbind(rep(1,N), X_cov)) # n x (k+1) design matrix for the new units
colnames(X_N) = c('intercept', sprintf('x%d', 1:k))
X_N_past = as.matrix(cbind(rep(1,N_past), X_cov_past)) # n x (k+1) design matrix for the past units
colnames(X_N_past) = c('intercept', sprintf('x%d', 1:k))
}
if(intercept == FALSE)
{
X_N = as.matrix(X_cov) # n x k design matrix for the new units
colnames(X_N) = sprintf('x%d', 1:k)
X_N_past = as.matrix(X_cov_past) # n x k design matrix for the past units
colnames(X_N) = sprintf('x%d', 1:k)
}
## set initital nonsingular matrix
if(is.null(Q_initial) == TRUE)
{
X_N_combined = rbind(X_N_past, X_N)
Q0 = t(X_N_combined)%*%X_N_combined #invertible(?) matrix
} else{
Q0 = Q_initial
}
# Treatments take turn in selecting the units
Z = rep(-1,N) # treatment indicator initialized at all -1
crit_print = matrix(rep(-1,N*N),nrow = N)
for(i in 1:N)
{
t.index = som_order[i] # treatment group that will pick a unit at this stage
units.current = unit.index[Z == t.index] # new units already in that treatment group
X_N_group_past = X_N_past[Z_past == t.index, ] # design matrix for the past units in the choosing treatment group
# augment the past design matrix with the new design matrix
if(length(units.current) == 0)
{
X_n = X_N_group_past
}
if(length(units.current)>0)
{
X_n = rbind(X_N_group_past, X_N[units.current,])
}
# reciprocal 'Condition number'
if((kappa((t(X_n)%*% X_n)/i))^{-1} < 1e-6)
{
#print(t(X_n)%*% X_n + eps * Q0)
Sn = solve((t(X_n)%*% X_n)/nrow(X_n) + eps * (Q0/(N_past + N)))
} else{
#print(t(X_n)%*% X_n)
#Sn = solve((t(X_n)%*% X_n)/i)
Sn = solve((t(X_n)%*% X_n))
}
units.search = unit.index[Z == -1] # units yet to be allocated
if(units_print == TRUE)
{
print(units.search)
}
# evaluate the criterion function on each of these units
crit.func = rep(0, length(units.search))
for(u in 1:length(units.search))
{
if(s_function == 'constant')
{
crit.func[u] = 10 #a constant
}
if(s_function == 'Dopt')
{
crit.func[u] = as.vector(t(X_N[units.search[u],]) %*% Sn %*% X_N[units.search[u],] )
}
if(s_function == 'negative Dopt')
{
crit.func[u] = (-1) * as.vector(t(X_N[units.search[u],]) %*% Sn %*% X_N[units.search[u],] )
}
if(s_function == 'Aopt')
{
# Policy matrix - Pol_mat, Matrix of weights = w_pol
# I am assuming cost to be constant
Pol_mat = as.matrix(Pol_mat)
W = diag(w_pol)
T.mat = t(Pol_mat) %*% W %*% Pol_mat
crit.func[u] = as.vector(t(X_N[units.search[u],]) %*% (Sn %*% T.mat %*% Sn) %*%
X_N[units.search[u],])/(1 + as.vector(t(X_N[units.search[u],]) %*%
Sn %*% X_N[units.search[u],]) )
}
if(s_function %in% c('max pc', 'min pc', 'Dopt pc'))
{
# Extract the 1st principal component from the full set of covariates
pca = prcomp(X_cov)
x.pc = pca$x[,1] #1st principal component
if(s_function == 'max pc')
{
# include the unit which maximizes the 1st principal component
crit.func[u] = x.pc[units.search[u]]
}
if(s_function == 'min pc')
{
# include the unit which minimizes the 1st principal component
crit.func[u] = -x.pc[units.search[u]]
}
if(s_function == 'Dopt pc')
{
# include the unit which maximizes the dispersion of the 1st principal component
# within the chooser group
x.pc.append = x.pc[c(units.current,units.search[u])]
crit.func[u] = sum((x.pc.append - mean(x.pc.append))^2)
}
}
if(s_function == 'max average') # takes simple average of all covariates
{
# first check if there are more than one covariates
if(k==1)
{
crit.func[u] = as.vector(X_cov)[units.search[u]]
}else
{
crit.func[u] = rowMeans(X_cov)[units.search[u]]
}
}
if(s_function == 'min average')
{
# first check if there are more than one covariates
if(k==1)
{
crit.func[u] = -as.vector(X_cov)[units.search[u]]
}
else
{
crit.func[u] = -rowMeans(X_cov)[units.search[u]]
}
}
if(s_function == 'Dopt average')
{
# first check if there are more than one covariate
if(k==1)
{
x.avg.append = as.vector(X_cov)[c(units.current,units.search[u])]
crit.func[u] = sum((x.avg.append - mean(x.avg.append))^2)
}
else
{
x.avg = rowMeans(X_cov)
x.avg.append = x.avg[c(units.current,units.search[u])]
crit.func[u] = sum((x.avg.append - mean(x.avg.append))^2)
}
}
if(s_function == 'marginal var sum')
{
# first check if there are more than one covariate
if(k==1)
{
x.append = as.vector(X_cov)[c(units.current,units.search[u])]
crit.func[u] = sum((x.append - mean(x.append))^2)
}
else
{
X_treat = X_cov[c(units.current,units.search[u]),]
if(is.null(nrow(X_treat)) == TRUE)
{
crit.func[u] = 0
}
if(is.null(nrow(X_treat)) == FALSE)
{
crit.func[u] = matrix.trace(cov(X_treat))
}
}
}
if((s_function %in% sf.names) == FALSE)
{
stop('Invalid selection function')
}
}
crit.func = round(crit.func,7) # rounding off the values (?)
crit_print[i,units.search] = crit.func
# all candidate units
units.opt = units.search[which(crit.func == max(crit.func))]
# resolve ties
if(ties == 'random')
{
unit.opt = units.opt[sample(x = 1:length(units.opt),size = 1)]
Z[unit.opt] = t.index
}
if(ties == 'smallest')
{
unit.opt = units.opt[1]
Z[unit.opt] = t.index
}
# unit number that is selected at this stage
units.selected[i] = unit.opt
}
crit_print[crit_print == -1] = NA
data_frame_allocated = cbind(data_frame,Z)
colnames(data_frame_allocated)[ncol(data_frame_allocated)] = 'Treat'
#stage = 1:N
som_appended = cbind(as.vector(som_order), data_frame[units.selected,])
#colnames(som_appended)[1] = 'Sel order'
colnames(som_appended)[1] = 'Treat'
rownames(som_appended) = 1:N
som_appended = as.data.frame(som_appended)
som_split = split(som_appended,som_appended$Treat)
# augmented data frame after allocation
if(index_col_past == TRUE && index_col == TRUE)
{
data_frame_allocated_augmented = as.data.frame(rbind(data_frame_past, data_frame_allocated))
}
if(index_col_past == TRUE && index_col == FALSE)
{
temp1 = as.data.frame(rbind(data_frame_past[,-1], data_frame_allocated))
data_frame_allocated_augmented = data.frame(Index = 1:(N_past + N), temp1)
}
if(index_col_past == FALSE && index_col == TRUE)
{
temp1 = as.data.frame(rbind(data_frame_past, data_frame_allocated[,-1]))
data_frame_allocated_augmented = data.frame(Index = 1:(N_past + N), temp1)
}
if(index_col_past == FALSE && index_col == FALSE)
{
temp1 = as.data.frame(rbind(data_frame_past, data_frame_allocated))
data_frame_allocated_augmented = data.frame(Index = 1:(N_past + N), temp1)
}
return(list(data_frame_allocated = data_frame_allocated, som_appended = som_appended,
som_split = som_split,
data_frame_allocated_augmented = data_frame_allocated_augmented,
criteria = crit_print))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClassDefinition.R, R/MNF.R
\name{MNF}
\alias{MNF}
\title{Class MNF}
\usage{
MNF(dataObject)
MNF(dataObject)
}
\arguments{
\item{dataObject}{object of type massImage}
}
\value{
object of class MNF
}
\description{
Class \code{MNF} contains methods for Maximum Autocorrelation Factors
analysis
This method calculates MNF transform using the
diagonal shift method from Switzer and Green (1984)
to estimate the noise.
}
\details{
Class \code{MNF} contains methods for Maximum Autocorrelation Factors
analysis
Minimum Noise Fraction according Green et al. (1988) using
diagonal shift method from Switzer and Green (1984) to estimate
the noise. As the original package \code{mzImage} from
Stone et al. 2012 is no longer maintained, we use it as code
base for the present version. The C code was implemented
through Rcpp (Eddelbuettel and Francois, 2011). Practically,
this method uses \code{covDiffCalc} from the MAF method. The
present function is a user constructur that will create a new
analysis slot in the chosen MassSpectra/MassImage object.
}
\examples{
testImage<-MassImage('dummy')
testImage<-MNF(testImage)
image(analysis(testImage,1), comp = 1)
\dontrun{
library(tofsimsData)
data(tofsimsData)
MNF(testImage)
image(analysis(testImage,1), comp = 1)}
}
|
/man/MNF.Rd
|
no_license
|
lorenzgerber/tofsims
|
R
| false
| true
| 1,347
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClassDefinition.R, R/MNF.R
\name{MNF}
\alias{MNF}
\title{Class MNF}
\usage{
MNF(dataObject)
MNF(dataObject)
}
\arguments{
\item{dataObject}{object of type massImage}
}
\value{
object of class MNF
}
\description{
Class \code{MNF} contains methods for Maximum Autocorrelation Factors
analysis
This method calculates MNF transform using the
diagonal shift method from Switzer and Green (1984)
to estimate the noise.
}
\details{
Class \code{MNF} contains methods for Maximum Autocorrelation Factors
analysis
Minimum Noise Fraction according Green et al. (1988) using
diagonal shift method from Switzer and Green (1984) to estimate
the noise. As the original package \code{mzImage} from
Stone et al. 2012 is no longer maintained, we use it as code
base for the present version. The C code was implemented
through Rcpp (Eddelbuettel and Francois, 2011). Practically,
this method uses \code{covDiffCalc} from the MAF method. The
present function is a user constructur that will create a new
analysis slot in the chosen MassSpectra/MassImage object.
}
\examples{
testImage<-MassImage('dummy')
testImage<-MNF(testImage)
image(analysis(testImage,1), comp = 1)
\dontrun{
library(tofsimsData)
data(tofsimsData)
MNF(testImage)
image(analysis(testImage,1), comp = 1)}
}
|
# load libraries
library(stringr)
library(tidyverse)
library(rvest)
# scrape data
get_tornadoes <- function(year) {
base_url <- "http://www.tornadohistoryproject.com/tornado/Oklahoma/"
url <- str_c(base_url, year, "/table")
tor_html <- read_html(url)
tor <- tor_html %>%
html_nodes("#results") %>%
html_table() %>%
.[[1]]
names(tor) <- tor[1, ]
tor %>%
filter(Date != "Date") %>%
janitor::clean_names() %>%
select(date:lift_lon) %>%
as_tibble()
}
ok_tornadoes <- map_df(1998:2017, get_tornadoes)
saveRDS(ok_tornadoes, file = "ok_tornadoes.rds")
|
/R/get_tornadoes.R
|
no_license
|
aashareddy14/523-lab07
|
R
| false
| false
| 620
|
r
|
# load libraries
library(stringr)
library(tidyverse)
library(rvest)
# scrape data
get_tornadoes <- function(year) {
base_url <- "http://www.tornadohistoryproject.com/tornado/Oklahoma/"
url <- str_c(base_url, year, "/table")
tor_html <- read_html(url)
tor <- tor_html %>%
html_nodes("#results") %>%
html_table() %>%
.[[1]]
names(tor) <- tor[1, ]
tor %>%
filter(Date != "Date") %>%
janitor::clean_names() %>%
select(date:lift_lon) %>%
as_tibble()
}
ok_tornadoes <- map_df(1998:2017, get_tornadoes)
saveRDS(ok_tornadoes, file = "ok_tornadoes.rds")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AccessorsChIA.R
\name{average_component_size}
\alias{average_component_size}
\title{Return the mean component size of a CHIA object.}
\usage{
average_component_size(chia.obj)
}
\arguments{
\item{chia.obj}{A list containing the ChIA-PET data, as returned by \code{\link{load_chia}}.}
}
\value{
The mean component size of the chia object.
}
\description{
Return the mean component size of a CHIA object.
}
|
/man/average_component_size.Rd
|
no_license
|
ehenrion/ChIAnalysis
|
R
| false
| true
| 483
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AccessorsChIA.R
\name{average_component_size}
\alias{average_component_size}
\title{Return the mean component size of a CHIA object.}
\usage{
average_component_size(chia.obj)
}
\arguments{
\item{chia.obj}{A list containing the ChIA-PET data, as returned by \code{\link{load_chia}}.}
}
\value{
The mean component size of the chia object.
}
\description{
Return the mean component size of a CHIA object.
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.