content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(tidyLPA)
### Name: get_data
### Title: Get data from objects generated by tidyLPA
### Aliases: get_data get_data.tidyLPA get_data.tidyProfile
### ** Examples
## Not run:
##D if(interactive()){
##D results <- iris %>%
##D select(Sepal.Length, Sepal.Width, Petal.Length, Petal.Width) %>%
##D estimate_profiles(3)
##D get_data(results)
##D get_data(results[[1]])
##D }
## End(Not run)
| /data/genthat_extracted_code/tidyLPA/examples/get_data.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 408 | r | library(tidyLPA)
### Name: get_data
### Title: Get data from objects generated by tidyLPA
### Aliases: get_data get_data.tidyLPA get_data.tidyProfile
### ** Examples
## Not run:
##D if(interactive()){
##D results <- iris %>%
##D select(Sepal.Length, Sepal.Width, Petal.Length, Petal.Width) %>%
##D estimate_profiles(3)
##D get_data(results)
##D get_data(results[[1]])
##D }
## End(Not run)
|
# Project: CLIR
# Log messages and error utils
#
# Author: Renaud Gaujoux
# Created: Nov 21, 2013
###############################################################################
#' CLI Logging Features
#'
#' The function \code{cli_*} wrap calls to the corresponding
#' base functions, to allow automatic formating and redirection.
#'
#' @param ... arguments passed to the corresponding base function.
#'
#' @rdname CLI-logging
#' @export
cli_stop <- function(...) stop(..., call. = FALSE)
#' @rdname CLI-logging
#' @export
cli_warning <- function(...) warning(..., call. = FALSE)
#' @param indent number of indent spaces
#' @param item itemize character to use, e.g., \code{'*'} or \code{'-'}.
#' @inheritParams base::message
#' @rdname CLI-logging
#' @export
cli_smessage <- function(..., indent = 0L, item = NULL, appendLF = FALSE){
if( is.null(item) ){ # choose item from indent
.item <- c('*', '*', '-', '-', '>', '>')
item <- .item[indent+1]
}
indent <- if( indent ) paste0(rep(' ', indent), collapse='') else ''
if( nzchar(item) ) item <- paste0(item, ' ')
message(indent, item, ..., appendLF = appendLF)
}
#' @param extfile external log file where to save log messages.
#' Note that messages are still shown in \emph{stderr}.
#' @rdname CLI-logging
#' @export
cli_log <- function(..., appendLF = TRUE, extfile = NULL){
# output to external file as well
if( !is.null(extfile) ){
cat(..., if( appendLF ) "\n", sep ='', file = extfile, append = TRUE)
}
cli_message(..., appendLF = appendLF)
}
#' @rdname CLI-logging
#' @export
cli_message <- function(..., item = '', appendLF = TRUE){
cli_smessage(..., item = item, appendLF = appendLF)
}
tryCatchWarning <- local({
W <- list()
w.handler <- function(w){ # warning handler
W <<- c(W, list(w))
invokeRestart("muffleWarning")
}
function(expr, ..., format. = FALSE)
{
if( missing(expr) ){
if( isFALSE(format.) ) return(W)
else{
if( !length(W) ) return(NULL)
w <- str_trim(sapply(W, as.character))
if( is.na(format.) ) return(w)
res <- paste0('## Warnings:\n', paste0("* ", w, collapse = "\n"))
return(res)
}
}
W <<- list()
withCallingHandlers(tryCatch(expr, ...)
, warning = w.handler)
}
})
| /R/logging.R | no_license | renozao/CLIR | R | false | false | 2,440 | r | # Project: CLIR
# Log messages and error utils
#
# Author: Renaud Gaujoux
# Created: Nov 21, 2013
###############################################################################
#' CLI Logging Features
#'
#' The function \code{cli_*} wrap calls to the corresponding
#' base functions, to allow automatic formating and redirection.
#'
#' @param ... arguments passed to the corresponding base function.
#'
#' @rdname CLI-logging
#' @export
cli_stop <- function(...) stop(..., call. = FALSE)
#' @rdname CLI-logging
#' @export
cli_warning <- function(...) warning(..., call. = FALSE)
#' @param indent number of indent spaces
#' @param item itemize character to use, e.g., \code{'*'} or \code{'-'}.
#' @inheritParams base::message
#' @rdname CLI-logging
#' @export
cli_smessage <- function(..., indent = 0L, item = NULL, appendLF = FALSE){
if( is.null(item) ){ # choose item from indent
.item <- c('*', '*', '-', '-', '>', '>')
item <- .item[indent+1]
}
indent <- if( indent ) paste0(rep(' ', indent), collapse='') else ''
if( nzchar(item) ) item <- paste0(item, ' ')
message(indent, item, ..., appendLF = appendLF)
}
#' @param extfile external log file where to save log messages.
#' Note that messages are still shown in \emph{stderr}.
#' @rdname CLI-logging
#' @export
cli_log <- function(..., appendLF = TRUE, extfile = NULL){
# output to external file as well
if( !is.null(extfile) ){
cat(..., if( appendLF ) "\n", sep ='', file = extfile, append = TRUE)
}
cli_message(..., appendLF = appendLF)
}
#' @rdname CLI-logging
#' @export
cli_message <- function(..., item = '', appendLF = TRUE){
cli_smessage(..., item = item, appendLF = appendLF)
}
tryCatchWarning <- local({
W <- list()
w.handler <- function(w){ # warning handler
W <<- c(W, list(w))
invokeRestart("muffleWarning")
}
function(expr, ..., format. = FALSE)
{
if( missing(expr) ){
if( isFALSE(format.) ) return(W)
else{
if( !length(W) ) return(NULL)
w <- str_trim(sapply(W, as.character))
if( is.na(format.) ) return(w)
res <- paste0('## Warnings:\n', paste0("* ", w, collapse = "\n"))
return(res)
}
}
W <<- list()
withCallingHandlers(tryCatch(expr, ...)
, warning = w.handler)
}
})
|
/Calcul de métriques/Creation_table_metrics_PAs_v1.R | no_license | BGourdon/finalyear_project | R | false | false | 24,277 | r | ||
f <- read.csv("household_power_consumption.txt",header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
f$Date <- as.Date(f$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(f, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 2
plot(data$Global_active_power~data$Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off()
| /plot2.R | no_license | mubin016/ExData_Plotting1 | R | false | false | 629 | r | f <- read.csv("household_power_consumption.txt",header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
f$Date <- as.Date(f$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(f, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 2
plot(data$Global_active_power~data$Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off()
|
library(echarts4r)
### Name: e_format_axis
### Title: Formatters
### Aliases: e_format_axis e_format_x_axis e_format_y_axis
### ** Examples
# Y = %
df <- data.frame(
x = 1:10,
y = round(
runif(10, 1, 100), 2
)
)
df %>%
e_charts(x) %>%
e_line(y) %>%
e_format_y_axis(suffix = "%") %>%
e_format_x_axis(prefix = "A")
| /data/genthat_extracted_code/echarts4r/examples/formatters.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 345 | r | library(echarts4r)
### Name: e_format_axis
### Title: Formatters
### Aliases: e_format_axis e_format_x_axis e_format_y_axis
### ** Examples
# Y = %
df <- data.frame(
x = 1:10,
y = round(
runif(10, 1, 100), 2
)
)
df %>%
e_charts(x) %>%
e_line(y) %>%
e_format_y_axis(suffix = "%") %>%
e_format_x_axis(prefix = "A")
|
rsu.sspfree.rs <- function(N = NA, prior, p.intro, pstar, pfree, se.u) {
# Discounted prior:
adj.prior <- zdisc.prior(prior = prior, p.intro = p.intro)
# Population sensitivity required to achieve a given value for probability of disease freedom:
se.p <- zsep.pfree(prior = adj.prior, pfree = pfree)
n <- rsu.sssep.rs(N = N, pstar = pstar, se.p = se.p, se.u = se.u)
rval <- list(n = n, se.p = se.p, adj.prior = adj.prior)
rval
} | /epiR/R/rsu.sspfree.rs.R | no_license | albrizre/spatstat.revdep | R | false | false | 450 | r | rsu.sspfree.rs <- function(N = NA, prior, p.intro, pstar, pfree, se.u) {
# Discounted prior:
adj.prior <- zdisc.prior(prior = prior, p.intro = p.intro)
# Population sensitivity required to achieve a given value for probability of disease freedom:
se.p <- zsep.pfree(prior = adj.prior, pfree = pfree)
n <- rsu.sssep.rs(N = N, pstar = pstar, se.p = se.p, se.u = se.u)
rval <- list(n = n, se.p = se.p, adj.prior = adj.prior)
rval
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeTables.R
\name{makeComments}
\alias{makeComments}
\title{Make the COMMENTS table}
\usage{
makeComments(df, questions, updateID)
}
\arguments{
\item{df}{Raw survey data as a data frame.}
\item{questions}{The QUESTIONS portion of the database tables, created by makeQuestions.R}
\item{updateID}{Character string that will become the `updateID` column for this table. For example, "survey11Add" for Survey 11.}
}
\description{
This function creates the COMMENTS database table from raw survey data.
}
| /man/makeComments.Rd | permissive | kaijagahm/ygdpAddSurvey | R | false | true | 582 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeTables.R
\name{makeComments}
\alias{makeComments}
\title{Make the COMMENTS table}
\usage{
makeComments(df, questions, updateID)
}
\arguments{
\item{df}{Raw survey data as a data frame.}
\item{questions}{The QUESTIONS portion of the database tables, created by makeQuestions.R}
\item{updateID}{Character string that will become the `updateID` column for this table. For example, "survey11Add" for Survey 11.}
}
\description{
This function creates the COMMENTS database table from raw survey data.
}
|
source(file = "global.R")
ui <- bootstrapPage(
tags$style(type = "text/css", "html, body {width:100%; height:100%}
#controls {background-color:white;border-style:double;opacity:0.5;padding:0px 20px;width:25%;font-family:serif;font-style:italic;}
#controls:hover {opacity:1.0;}"),
# useFirebase(), # import dependencies
# useFirebaseUI(),
leafletOutput('map', height = '100%', width = '100%'),
# reqSignin will hide the content until the user signs in, but not safe.
#reqSignin(
absolutePanel(top = 10, right = 10, id = "controls",
h3("Player 1"),
h5("What is your next step ?"),
br(),
actionButton("attack", label = "Attack"),
actionButton("move", label = "Move"),
actionButton("pass", label = "Next"),
br(),
br(),
)
#)
)
server <- function(input, output){
# f <- FirebaseUI$
# new()$ # instantiate
# set_providers(email = TRUE)$
# launch() # launch
output$map <- renderLeaflet({
# f$req_sign_in() # require sign in
leaflet() %>%
addProviderTiles("Stamen.Watercolor", options = providerTileOptions(minZoom = 2, maxZoom = 10)) %>%
setView(lng = 0, lat = 40, zoom = 3) %>%
setMaxBounds(lng1 = -180, lat1 = 90, lng2 = 180, lat2 = -90) %>%
addPolygons(data = summarize(regions), weight = 0, color = "white", smoothFactor = 0.000001,
label = ~subregion, labelOptions = labelOptions(style = list("color" = "black",
"font-family" = "serif",
"font-style" = "italic",
"font-size" = "15px")),
fillColor = "black", fillOpacity = 0.1,
highlightOptions = highlightOptions(fillColor = "white", bringToFront = TRUE)) %>%
addMarkers(data = as.data.table(game),
icon = flagIcon["p2"],
label = ~as.character(regiment),
labelOptions = labelOptions(noHide = TRUE, direction = "bottom",
style = list("color" = "black",
"font-family" = "serif",
"font-style" = "italic",
"font-size" = "15px",
"height" = "25px",
"width" = "25px",
"border-color" = "black")
)
) %>%
addControl(html = img(src = "https://raw.githubusercontent.com/letsang/risk/master/graphics/logorisk.png", height = "16px"),
position = "bottomleft")
})
}
shinyApp(ui, server) | /old/risk-1505.R | no_license | letsang/risk | R | false | false | 3,078 | r | source(file = "global.R")
ui <- bootstrapPage(
tags$style(type = "text/css", "html, body {width:100%; height:100%}
#controls {background-color:white;border-style:double;opacity:0.5;padding:0px 20px;width:25%;font-family:serif;font-style:italic;}
#controls:hover {opacity:1.0;}"),
# useFirebase(), # import dependencies
# useFirebaseUI(),
leafletOutput('map', height = '100%', width = '100%'),
# reqSignin will hide the content until the user signs in, but not safe.
#reqSignin(
absolutePanel(top = 10, right = 10, id = "controls",
h3("Player 1"),
h5("What is your next step ?"),
br(),
actionButton("attack", label = "Attack"),
actionButton("move", label = "Move"),
actionButton("pass", label = "Next"),
br(),
br(),
)
#)
)
server <- function(input, output){
# f <- FirebaseUI$
# new()$ # instantiate
# set_providers(email = TRUE)$
# launch() # launch
output$map <- renderLeaflet({
# f$req_sign_in() # require sign in
leaflet() %>%
addProviderTiles("Stamen.Watercolor", options = providerTileOptions(minZoom = 2, maxZoom = 10)) %>%
setView(lng = 0, lat = 40, zoom = 3) %>%
setMaxBounds(lng1 = -180, lat1 = 90, lng2 = 180, lat2 = -90) %>%
addPolygons(data = summarize(regions), weight = 0, color = "white", smoothFactor = 0.000001,
label = ~subregion, labelOptions = labelOptions(style = list("color" = "black",
"font-family" = "serif",
"font-style" = "italic",
"font-size" = "15px")),
fillColor = "black", fillOpacity = 0.1,
highlightOptions = highlightOptions(fillColor = "white", bringToFront = TRUE)) %>%
addMarkers(data = as.data.table(game),
icon = flagIcon["p2"],
label = ~as.character(regiment),
labelOptions = labelOptions(noHide = TRUE, direction = "bottom",
style = list("color" = "black",
"font-family" = "serif",
"font-style" = "italic",
"font-size" = "15px",
"height" = "25px",
"width" = "25px",
"border-color" = "black")
)
) %>%
addControl(html = img(src = "https://raw.githubusercontent.com/letsang/risk/master/graphics/logorisk.png", height = "16px"),
position = "bottomleft")
})
}
shinyApp(ui, server) |
#install.packages("RCurl")
library(RCurl)
decode_short_url <- function(url, ...) {
# LOCAL FUNCTIONS #
decode <- function(u) {
Sys.sleep(0.5)
x <- try( getURL(u, header = TRUE, nobody = TRUE, followlocation = FALSE, cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")))
if(inherits(x, 'try-error') | length(grep(".*Location: (\\S+).*", x))<1) {
return(u)
} else {
return(gsub('.*Location: (\\S+).*', '\\1', x))
}
}
# MAIN #
gc()
# return decoded URLs
urls <- c(url, ...)
l <- vector(mode = "list", length = length(urls))
l <- lapply(urls, decode)
names(l) <- urls
return(l)
}
decode_short_url(obamacare_labeled[[1]]$entities$urls[[1]]$expanded_url)#not the final redirection
expd<-decode_short_url(decode_short_url(decode_short_url(obamacare_labeled[[1]]$entities$urls[[1]]$expanded_url)))
identical(expd[[1]],names(expd))
decode_short_url(expd)
| /labeled_tweet/unshort urls.R | no_license | monchewharry/R_twitter_politics | R | false | false | 918 | r | #install.packages("RCurl")
library(RCurl)
decode_short_url <- function(url, ...) {
# LOCAL FUNCTIONS #
decode <- function(u) {
Sys.sleep(0.5)
x <- try( getURL(u, header = TRUE, nobody = TRUE, followlocation = FALSE, cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")))
if(inherits(x, 'try-error') | length(grep(".*Location: (\\S+).*", x))<1) {
return(u)
} else {
return(gsub('.*Location: (\\S+).*', '\\1', x))
}
}
# MAIN #
gc()
# return decoded URLs
urls <- c(url, ...)
l <- vector(mode = "list", length = length(urls))
l <- lapply(urls, decode)
names(l) <- urls
return(l)
}
decode_short_url(obamacare_labeled[[1]]$entities$urls[[1]]$expanded_url)#not the final redirection
expd<-decode_short_url(decode_short_url(decode_short_url(obamacare_labeled[[1]]$entities$urls[[1]]$expanded_url)))
identical(expd[[1]],names(expd))
decode_short_url(expd)
|
testlist <- list(scale = 0, shape = 1.16372211846069e-315)
result <- do.call(bama:::rand_igamma,testlist)
str(result) | /bama/inst/testfiles/rand_igamma/AFL_rand_igamma/rand_igamma_valgrind_files/1615926438-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 117 | r | testlist <- list(scale = 0, shape = 1.16372211846069e-315)
result <- do.call(bama:::rand_igamma,testlist)
str(result) |
# functions for analysis
extract.vaf <- function(var.table, AD.col='gt_AD') {
var.table <- tidyr::separate(var.table, AD.col, c("AD_N", "AD_T"), sep=",")
var.table[, c("AD_N", "AD_T")] <- lapply(var.table[, c("AD_N", "AD_T")], as.numeric)
var.table$VAF_AD <- 100 * ( var.table$AD_T / (var.table$AD_N + var.table$AD_T) )
return(var.table)
}
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
parse_extra <- function(df, annot){
res <- substr(x=df$Extra, start=regexpr(pattern=paste(annot, '=', sep=''),
text=df$Extra), stop=nchar(df$Extra) )
res <- substr(x=res, start=1, stop=regexpr(pattern=';', text=res))
res <- gsub(x=gsub(pattern=paste(annot, '=', sep=''), replacement='', x=res),
pattern=';', replacement='')
res[!grepl(annot, df$Extra)] <- 'NA'
return(res)
}
# GSEA lister
# inputs:
# rank_list - named list of genes (deg_ranks above). names should correspond to sample comparison in the DESeq2 test.
# sig_list - gene signature being tested
# output: a dataframe (tibble) containing the fgsea results across all sample comparisons for the tested gene signatures
gsea_lister <- function(sig_list, rank_list, sig_name){
require(fgsea)
require(tidyverse)
# perform the gsea, include the sample comparison
y <- lapply(names(rank_list), function(i){
x <- fgsea(pathways=sig_list, stats=rank_list[[i]], nperm=1000) %>%
as_tibble() %>%
dplyr::select(-leadingEdge, -ES, -nMoreExtreme) %>%
mutate(sample_comparison=i, signature_name=sig_name)
return(x)
})
y <- do.call(rbind, y) %>%
as_tibble()
return(y)
} | /code/functions.r | no_license | LosicLab/gbm-case-study | R | false | false | 1,692 | r | # functions for analysis
extract.vaf <- function(var.table, AD.col='gt_AD') {
var.table <- tidyr::separate(var.table, AD.col, c("AD_N", "AD_T"), sep=",")
var.table[, c("AD_N", "AD_T")] <- lapply(var.table[, c("AD_N", "AD_T")], as.numeric)
var.table$VAF_AD <- 100 * ( var.table$AD_T / (var.table$AD_N + var.table$AD_T) )
return(var.table)
}
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
parse_extra <- function(df, annot){
res <- substr(x=df$Extra, start=regexpr(pattern=paste(annot, '=', sep=''),
text=df$Extra), stop=nchar(df$Extra) )
res <- substr(x=res, start=1, stop=regexpr(pattern=';', text=res))
res <- gsub(x=gsub(pattern=paste(annot, '=', sep=''), replacement='', x=res),
pattern=';', replacement='')
res[!grepl(annot, df$Extra)] <- 'NA'
return(res)
}
# GSEA lister
# inputs:
# rank_list - named list of genes (deg_ranks above). names should correspond to sample comparison in the DESeq2 test.
# sig_list - gene signature being tested
# output: a dataframe (tibble) containing the fgsea results across all sample comparisons for the tested gene signatures
gsea_lister <- function(sig_list, rank_list, sig_name){
require(fgsea)
require(tidyverse)
# perform the gsea, include the sample comparison
y <- lapply(names(rank_list), function(i){
x <- fgsea(pathways=sig_list, stats=rank_list[[i]], nperm=1000) %>%
as_tibble() %>%
dplyr::select(-leadingEdge, -ES, -nMoreExtreme) %>%
mutate(sample_comparison=i, signature_name=sig_name)
return(x)
})
y <- do.call(rbind, y) %>%
as_tibble()
return(y)
} |
#Econ 294
#Assignment 4
#0
print("First Name: Yuan")
print("Last Name: Wu")
print("Student ID: 1307193")
#1
library(foreign)
airports<-read.csv("https://github.com/EconomiCurtis/econ294_2015/raw/master/data/airports.csv", stringsAsFactors = F)
flights<-read.csv("https://github.com/EconomiCurtis/econ294_2015/raw/master/data/flights.csv", stringsAsFactors = F)
planes<-read.csv("https://github.com/EconomiCurtis/econ294_2015/raw/master/data/planes.csv", stringsAsFactors = F)
weather<-read.csv("https://github.com/EconomiCurtis/econ294_2015/raw/master/data/weather.csv", stringsAsFactors = F)
#2
weather$date<-as.Date(weather$date)
flights$date<-as.Date(flights$date)
#3
flights.2a<-subset(flights, dest=="OAK"|dest=="SFO")
print(paste("There are", nrow(flights.2a) ,"observations in flights.2a"))
flights.2b<-subset(flights, dep_delay>=1|arr_delay>=1)
print(paste("There are", nrow(flights.2b) ,"observations in flights.2a"))
flights.2c<-subset(flights, arr_delay>2*dep_delay&arr_delay>0&dep_delay>0)
print(paste("There are", nrow(flights.2c) ,"observations in flights.2a"))
#4
library(dplyr)
method1<-select(flights, arr_delay, dep_delay)
method2<-select(flights, contains("delay"))
method3<-select(flights, ends_with("delay"))
#5
dep_delay_desc<-arrange(flights, desc(dep_delay))
print(head(dep_delay_desc, n=5))
caught_up_desc<-arrange(flights, desc(arr_delay-dep_delay))
print(head(caught_up_desc, n=5))
#6
flights<- mutate(flights,
speed = dist / (time / 60),
delta = arr_delay - dep_delay
)
print(flights %>% arrange(desc(speed)) %>% select(plane, speed) %>% head(5))
print(flights %>% arrange(desc(delta)) %>% select(plane, delta) %>% head(5))
print(flights %>% arrange(delta) %>% select(plane, delta) %>% head(5))
#7
flights.7a<-flights %>%
group_by(carrier) %>%
summarise(
cancel = sum(cancelled),
total = n(),
percentage=cancel*100/total,
deltamin = min(delta, na.rm = T),
delta1stq = quantile(delta, 0.25, na.rm = T),
deltamed = median(delta, na.rm = T),
deltamean = mean(delta, na.rm = T),
delta3rdq = quantile(delta, 0.75, na.rm = T),
delta90q = quantile(delta, 0.9, na.rm = T),
deltamax = max(delta, na.rm = T)
) %>% arrange(desc(percentage))
print(flights.7a)
day_delay<- dplyr::filter(
summarise(
group_by(
dplyr::filter(
flights,
!is.na(dep_delay)
),
date),
delay=mean(dep_delay),
n=n()
),
n>10
)
print("filter out rows that are not empty and count out the number of the rows that is left, grouped by dates.
Only the rows that are left with more than 10 rows will be displayed.
also display the mean of departure delay in each date.")
day_delay.b<- flights %>%
dplyr::filter(!is.na(dep_delay)) %>%
group_by(date) %>%
summarise(
delay=mean(dep_delay),
n=n()
) %>%
dplyr::filter(n>10)
#8
day_delay$lag_delay<- dplyr::lag(day_delay$delay, n=1L)
day_delay$dif_delay<- day_delay$delay - day_delay$lag_delay
day_delay<-na.omit(day_delay)
day_delay<-dplyr::arrange(day_delay, desc(dif_delay))
print(head(day_delay,5))
#9
dest_delay<- flights %>%
group_by(dest) %>%
summarise(
arr_delay=mean(arr_delay, na.rm=T),
n=n()
)
airports.9<- airports %>%
select(iata, airport, city, state, lat, long)
colnames(airports.9)[1]<-"dest"
colnames(airports.9)[2]<-"name"
df.9a<-dest_delay %>%
left_join(airports.9) %>%
arrange(desc(arr_delay)) %>%
select(city,state,arr_delay)
print(head(df.9a,5))
df.9b<-dest_delay %>%
inner_join(airports.9)
#Not the same number of obs as in left_join. It is 2 observations shorter.
df.9c<-dest_delay %>%
right_join(airports.9)
#There are 3376 observations. All mean and n are NA because join sequence where there is no match for the join.
df.9d<-dest_delay %>%
full_join(airports.9)
#There are 3378 observations and contains NA from all variables.
#10
library(tidyr)
flightsdate<-flights %>%
separate(date,c("date","midnight"),sep=" ") %>%
flightsdate$time<-as.character(paste(flightsdate$date,flightsdate$hour, sep="-"))
hourly_delay.10<-flightsdate%>%
filter(!is.na(dep_delay))%>%
group_by(time)%>%
summarise(
hourly_delay=mean(dep_delay,na.rm=T),
n=n())
weather$time<-as.character(paste(weather$date,weather$hour, sep="-"))
df.10<-hourly_delay.10%>%
dplyr::left_join(weather, by = c("time") )
df.10.2<-df.10%>%
arrange(desc(hourly_delay))%>%
select(hourly_delay,conditions)%>%
head(n=5)
print(df.10.2)
#11
df<-data.frame(treament=c("a","b"),subject1=c(3,4),subject2=c(5,6))
print(df)
df.11a<-df %>%
gather(df, treatment, subject1, subject2, na.rm = T) %>%
separate(df,c("discard","subject"),sep="t")%>%
select(subject,treatment=treament,value=treatment) %>%
arrange(subject)
print(df.11a)
df<- data.frame(
subject=c(1,1,2,2),
treatment=c("a","b","a","b"),
value=c(3,4,5,6)
)
print(df)
df.11b<-df %>%
spread(key=subject,value=value) %>%
rename(subject1=`1`,subject2=`2`)
print(df.11b)
df<- data.frame(
subject=c(1,2,3,4),
demo=c("f_15_CA","f_50_NY","m_45_HI","m_18_DC"),
value=c(3,4,5,6)
)
print(df)
df.11c<-df %>%
separate(demo,c("sex","age","state"),sep="_")
print(df.11c)
df<-data.frame(
subject=c(1,2,3,4),
sex=c("f","f","m",NA),
age=c(11,55,65,NA),
city=c("DC","NY","WA",NA),
value=c(3,4,5,6)
)
print(df)
df.11d<- df%>%
unite(demo,...=sex, age, city,sep=".")
df.11d[df.11d=="NA.NA.NA"]<-NA
print(df.11d) | /YuanWuAssignment 4.R | no_license | ywu34/YuanWu294Assignment4 | R | false | false | 5,411 | r | #Econ 294
#Assignment 4
#0
print("First Name: Yuan")
print("Last Name: Wu")
print("Student ID: 1307193")
#1
library(foreign)
airports<-read.csv("https://github.com/EconomiCurtis/econ294_2015/raw/master/data/airports.csv", stringsAsFactors = F)
flights<-read.csv("https://github.com/EconomiCurtis/econ294_2015/raw/master/data/flights.csv", stringsAsFactors = F)
planes<-read.csv("https://github.com/EconomiCurtis/econ294_2015/raw/master/data/planes.csv", stringsAsFactors = F)
weather<-read.csv("https://github.com/EconomiCurtis/econ294_2015/raw/master/data/weather.csv", stringsAsFactors = F)
#2
weather$date<-as.Date(weather$date)
flights$date<-as.Date(flights$date)
#3
flights.2a<-subset(flights, dest=="OAK"|dest=="SFO")
print(paste("There are", nrow(flights.2a) ,"observations in flights.2a"))
flights.2b<-subset(flights, dep_delay>=1|arr_delay>=1)
print(paste("There are", nrow(flights.2b) ,"observations in flights.2a"))
flights.2c<-subset(flights, arr_delay>2*dep_delay&arr_delay>0&dep_delay>0)
print(paste("There are", nrow(flights.2c) ,"observations in flights.2a"))
#4
library(dplyr)
method1<-select(flights, arr_delay, dep_delay)
method2<-select(flights, contains("delay"))
method3<-select(flights, ends_with("delay"))
#5
dep_delay_desc<-arrange(flights, desc(dep_delay))
print(head(dep_delay_desc, n=5))
caught_up_desc<-arrange(flights, desc(arr_delay-dep_delay))
print(head(caught_up_desc, n=5))
#6
flights<- mutate(flights,
speed = dist / (time / 60),
delta = arr_delay - dep_delay
)
print(flights %>% arrange(desc(speed)) %>% select(plane, speed) %>% head(5))
print(flights %>% arrange(desc(delta)) %>% select(plane, delta) %>% head(5))
print(flights %>% arrange(delta) %>% select(plane, delta) %>% head(5))
#7
flights.7a<-flights %>%
group_by(carrier) %>%
summarise(
cancel = sum(cancelled),
total = n(),
percentage=cancel*100/total,
deltamin = min(delta, na.rm = T),
delta1stq = quantile(delta, 0.25, na.rm = T),
deltamed = median(delta, na.rm = T),
deltamean = mean(delta, na.rm = T),
delta3rdq = quantile(delta, 0.75, na.rm = T),
delta90q = quantile(delta, 0.9, na.rm = T),
deltamax = max(delta, na.rm = T)
) %>% arrange(desc(percentage))
print(flights.7a)
day_delay<- dplyr::filter(
summarise(
group_by(
dplyr::filter(
flights,
!is.na(dep_delay)
),
date),
delay=mean(dep_delay),
n=n()
),
n>10
)
print("filter out rows that are not empty and count out the number of the rows that is left, grouped by dates.
Only the rows that are left with more than 10 rows will be displayed.
also display the mean of departure delay in each date.")
day_delay.b<- flights %>%
dplyr::filter(!is.na(dep_delay)) %>%
group_by(date) %>%
summarise(
delay=mean(dep_delay),
n=n()
) %>%
dplyr::filter(n>10)
#8
day_delay$lag_delay<- dplyr::lag(day_delay$delay, n=1L)
day_delay$dif_delay<- day_delay$delay - day_delay$lag_delay
day_delay<-na.omit(day_delay)
day_delay<-dplyr::arrange(day_delay, desc(dif_delay))
print(head(day_delay,5))
#9
dest_delay<- flights %>%
group_by(dest) %>%
summarise(
arr_delay=mean(arr_delay, na.rm=T),
n=n()
)
airports.9<- airports %>%
select(iata, airport, city, state, lat, long)
colnames(airports.9)[1]<-"dest"
colnames(airports.9)[2]<-"name"
df.9a<-dest_delay %>%
left_join(airports.9) %>%
arrange(desc(arr_delay)) %>%
select(city,state,arr_delay)
print(head(df.9a,5))
df.9b<-dest_delay %>%
inner_join(airports.9)
#Not the same number of obs as in left_join. It is 2 observations shorter.
df.9c<-dest_delay %>%
right_join(airports.9)
#There are 3376 observations. All mean and n are NA because join sequence where there is no match for the join.
df.9d<-dest_delay %>%
full_join(airports.9)
#There are 3378 observations and contains NA from all variables.
#10
library(tidyr)
flightsdate<-flights %>%
separate(date,c("date","midnight"),sep=" ") %>%
flightsdate$time<-as.character(paste(flightsdate$date,flightsdate$hour, sep="-"))
hourly_delay.10<-flightsdate%>%
filter(!is.na(dep_delay))%>%
group_by(time)%>%
summarise(
hourly_delay=mean(dep_delay,na.rm=T),
n=n())
weather$time<-as.character(paste(weather$date,weather$hour, sep="-"))
df.10<-hourly_delay.10%>%
dplyr::left_join(weather, by = c("time") )
df.10.2<-df.10%>%
arrange(desc(hourly_delay))%>%
select(hourly_delay,conditions)%>%
head(n=5)
print(df.10.2)
#11
df<-data.frame(treament=c("a","b"),subject1=c(3,4),subject2=c(5,6))
print(df)
df.11a<-df %>%
gather(df, treatment, subject1, subject2, na.rm = T) %>%
separate(df,c("discard","subject"),sep="t")%>%
select(subject,treatment=treament,value=treatment) %>%
arrange(subject)
print(df.11a)
df<- data.frame(
subject=c(1,1,2,2),
treatment=c("a","b","a","b"),
value=c(3,4,5,6)
)
print(df)
df.11b<-df %>%
spread(key=subject,value=value) %>%
rename(subject1=`1`,subject2=`2`)
print(df.11b)
df<- data.frame(
subject=c(1,2,3,4),
demo=c("f_15_CA","f_50_NY","m_45_HI","m_18_DC"),
value=c(3,4,5,6)
)
print(df)
df.11c<-df %>%
separate(demo,c("sex","age","state"),sep="_")
print(df.11c)
df<-data.frame(
subject=c(1,2,3,4),
sex=c("f","f","m",NA),
age=c(11,55,65,NA),
city=c("DC","NY","WA",NA),
value=c(3,4,5,6)
)
print(df)
df.11d<- df%>%
unite(demo,...=sex, age, city,sep=".")
df.11d[df.11d=="NA.NA.NA"]<-NA
print(df.11d) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tagstack.R
\name{tagstack}
\alias{tagstack}
\title{constructor for tagstack}
\usage{
tagstack(data = list(), directory = character())
}
\arguments{
\item{data}{this should be a list of \code{\link[sattagutils]{sattag-class}}.}
}
\value{
a \code{\link[sattagutils]{tagstack-class}}
}
\description{
use this constructor to create a new tagstack object.
}
| /man/tagstack.Rd | no_license | williamcioffi/sattagutils | R | false | true | 431 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tagstack.R
\name{tagstack}
\alias{tagstack}
\title{constructor for tagstack}
\usage{
tagstack(data = list(), directory = character())
}
\arguments{
\item{data}{this should be a list of \code{\link[sattagutils]{sattag-class}}.}
}
\value{
a \code{\link[sattagutils]{tagstack-class}}
}
\description{
use this constructor to create a new tagstack object.
}
|
library(shiny)
library(functionplotR)
library(shinythemes)
ui <- fluidPage(
theme = shinytheme("united"),
headerPanel("Explore functions!"),
wellPanel(textInput(
inputId = "text1",
value = "x^2-3", # if changed to blank no function shows up
label = "Write first function here"
),
textInput(
inputId = "text2",
value = "0", # if left blank tooltip and legend won't work
label = "..and second function here",
placeholder = "x or anything"
)
),
functionplotOutput("func", width = "50%", height = "50%")
)
server <- function(input, output) {
output$func <- renderFunctionplot(
functionplot(
c(input$text1, input$text2)
)
)
}
shinyApp(ui = ui, server = server)
| /Visualize-Functions/app.R | no_license | peyron/shiny-server-test | R | false | false | 996 | r | library(shiny)
library(functionplotR)
library(shinythemes)
ui <- fluidPage(
theme = shinytheme("united"),
headerPanel("Explore functions!"),
wellPanel(textInput(
inputId = "text1",
value = "x^2-3", # if changed to blank no function shows up
label = "Write first function here"
),
textInput(
inputId = "text2",
value = "0", # if left blank tooltip and legend won't work
label = "..and second function here",
placeholder = "x or anything"
)
),
functionplotOutput("func", width = "50%", height = "50%")
)
server <- function(input, output) {
output$func <- renderFunctionplot(
functionplot(
c(input$text1, input$text2)
)
)
}
shinyApp(ui = ui, server = server)
|
BCfit <-
function(y, X, covlist, R, z, mu, updateR, iters, thin = 1, burn = 0, priW = c(nrow(z) + 2 * ncol(z), 2 * ncol(z)), updateMu = TRUE, verbose = 0) {
broken = which(diag(var(y)) == 0)
if (any(diag(var(y)) == 0)) {
stop("No variation in y for species", paste(broken, collapse = ", "),
"; correlation not defined")
}
stopifnot(all(is.finite(X)), all(is.finite(y)))
spnames <- colnames(y)
y <- t(y) # NOTE: y is transposed relative to the input!
nsp <- dim(y)[1] # number of species
n <- dim(y)[2] # number of sites
iR <- solve(R) # Inverse correlation matrix
e <- z - mu # component of z that can't be explained by mu
nsamp <- (iters - burn) %/% thin
stopifnot(nsamp > 0)
# Dave asks: Should mu be in the output as well? It wasn't before, but that
# may have been an oversight.
# Nick: It's not needed since it's just X %*% B, we could
# store it for convenience but I doubt it's worth the memory used
output <- list(
R = array(NA, dim = c(nsamp, ((nsp * nsp - nsp) / 2))),
B = NULL,
z = array(NA, dim = c(nsamp, n, nsp)),
burn = burn,
thin = thin
)
if (updateMu) {
for (i in 1:nsp) {
temp <- matrix(NA, nsamp, length(covlist[[i]]))
colnames(temp) <- colnames(X)[covlist[[i]]]
output$B[[spnames[i]]] <- temp
}
rm(temp)
}
nam <- rep(NA, n * n)
for (i in 1:nsp) {
for (j in 1:nsp) {
nam[(i - 1) * nsp + j] <- paste(spnames[i], "_", spnames[j], sep="")
}
}
colnames(output$R) <- nam[which(upper.tri(diag(nsp)))]
dimnames(output$z)[[3]] <- spnames
# start sampler
rec <- 0 # counter for record number after burn-in and thinning
start <- Sys.time()
for (iter in 1:iters) {
# get truncation values and sample z
trunc <- find_trunc(mu, y)
e <- sample_e(e, trunc, iR)
z <- mu + e
# sample mu and calculate e
if (updateMu) {
mulis <- sample_mu(z, X, covlist)
mu <- mulis[[1]]
}
e <- z - mu
# sample R
if (updateR) {
R <- sample_R(z - mu, priW)
iR <- chol2inv(chol(R))
}
if(verbose == 2){
message(iter)
}
if(verbose > 0 & iter == burn){
message("burn-in complete")
}
# record parameters
if (iter %% thin == 0 & iter > burn) {
if(verbose == 1){
message(iter)
}
rec <- rec + 1
output$R[rec, ] <- R[upper.tri(R)]
output$z[rec, , ] <- z
if (updateMu) {
for (i in 1:nsp) {
output$B[[i]][rec, ] <- mulis[[2]][[i]]
}
}
}
} # sampler
output
}
| /R/BCfit.R | no_license | goldingn/BayesComm | R | false | false | 2,653 | r | BCfit <-
function(y, X, covlist, R, z, mu, updateR, iters, thin = 1, burn = 0, priW = c(nrow(z) + 2 * ncol(z), 2 * ncol(z)), updateMu = TRUE, verbose = 0) {
broken = which(diag(var(y)) == 0)
if (any(diag(var(y)) == 0)) {
stop("No variation in y for species", paste(broken, collapse = ", "),
"; correlation not defined")
}
stopifnot(all(is.finite(X)), all(is.finite(y)))
spnames <- colnames(y)
y <- t(y) # NOTE: y is transposed relative to the input!
nsp <- dim(y)[1] # number of species
n <- dim(y)[2] # number of sites
iR <- solve(R) # Inverse correlation matrix
e <- z - mu # component of z that can't be explained by mu
nsamp <- (iters - burn) %/% thin
stopifnot(nsamp > 0)
# Dave asks: Should mu be in the output as well? It wasn't before, but that
# may have been an oversight.
# Nick: It's not needed since it's just X %*% B, we could
# store it for convenience but I doubt it's worth the memory used
output <- list(
R = array(NA, dim = c(nsamp, ((nsp * nsp - nsp) / 2))),
B = NULL,
z = array(NA, dim = c(nsamp, n, nsp)),
burn = burn,
thin = thin
)
if (updateMu) {
for (i in 1:nsp) {
temp <- matrix(NA, nsamp, length(covlist[[i]]))
colnames(temp) <- colnames(X)[covlist[[i]]]
output$B[[spnames[i]]] <- temp
}
rm(temp)
}
nam <- rep(NA, n * n)
for (i in 1:nsp) {
for (j in 1:nsp) {
nam[(i - 1) * nsp + j] <- paste(spnames[i], "_", spnames[j], sep="")
}
}
colnames(output$R) <- nam[which(upper.tri(diag(nsp)))]
dimnames(output$z)[[3]] <- spnames
# start sampler
rec <- 0 # counter for record number after burn-in and thinning
start <- Sys.time()
for (iter in 1:iters) {
# get truncation values and sample z
trunc <- find_trunc(mu, y)
e <- sample_e(e, trunc, iR)
z <- mu + e
# sample mu and calculate e
if (updateMu) {
mulis <- sample_mu(z, X, covlist)
mu <- mulis[[1]]
}
e <- z - mu
# sample R
if (updateR) {
R <- sample_R(z - mu, priW)
iR <- chol2inv(chol(R))
}
if(verbose == 2){
message(iter)
}
if(verbose > 0 & iter == burn){
message("burn-in complete")
}
# record parameters
if (iter %% thin == 0 & iter > burn) {
if(verbose == 1){
message(iter)
}
rec <- rec + 1
output$R[rec, ] <- R[upper.tri(R)]
output$z[rec, , ] <- z
if (updateMu) {
for (i in 1:nsp) {
output$B[[i]][rec, ] <- mulis[[2]][[i]]
}
}
}
} # sampler
output
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/random_forest.R
\name{random_forest_train}
\alias{random_forest_train}
\title{Train a DAPM Random Forest Model}
\usage{
random_forest_train(df, transform.funcs = list(), impute.vals = list(),
params = list(num_trees = 500))
}
\arguments{
\item{df}{A not-transformed data frame of predictors *plus* an additional
boolean column named \code{target}.}
\item{transform.funcs}{A list of APM-available functions that will be applied
to the input data both for training and for scoring. No transformation
will be applied to columns that do not have corresponding keys
in this list.}
\item{impute.vals}{A list of values that will be imputed to missing
observations in both the training and scoring data. No imputation will
be performed for columns that do not have corresponding keys.}
\item{params}{A list of parameters available for use by the model. These will
persist with the model object.}
}
\value{
Returns 0 on success and an integer > 0 if an error occured. These
integer codes should be unique.
}
\description{
This is a simpler, more opinionated training function than has been included
in DAPM previously, but this routine will extend more easily to multiple
states with different data.
}
\details{
Assumptions:
1. The input data \code{df} is already balanced with respect to the target.
2. The input data \code{df} contains *only* the (a) target and (b) features
used to predict the target. These columns will be the only ones used in
the final model.
}
\author{
Tom Shafer
}
| /Production/Model/APM/man/random_forest_train.Rd | permissive | ElderResearch/DAPM | R | false | true | 1,580 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/random_forest.R
\name{random_forest_train}
\alias{random_forest_train}
\title{Train a DAPM Random Forest Model}
\usage{
random_forest_train(df, transform.funcs = list(), impute.vals = list(),
params = list(num_trees = 500))
}
\arguments{
\item{df}{A not-transformed data frame of predictors *plus* an additional
boolean column named \code{target}.}
\item{transform.funcs}{A list of APM-available functions that will be applied
to the input data both for training and for scoring. No transformation
will be applied to columns that do not have corresponding keys
in this list.}
\item{impute.vals}{A list of values that will be imputed to missing
observations in both the training and scoring data. No imputation will
be performed for columns that do not have corresponding keys.}
\item{params}{A list of parameters available for use by the model. These will
persist with the model object.}
}
\value{
Returns 0 on success and an integer > 0 if an error occured. These
integer codes should be unique.
}
\description{
This is a simpler, more opinionated training function than has been included
in DAPM previously, but this routine will extend more easily to multiple
states with different data.
}
\details{
Assumptions:
1. The input data \code{df} is already balanced with respect to the target.
2. The input data \code{df} contains *only* the (a) target and (b) features
used to predict the target. These columns will be the only ones used in
the final model.
}
\author{
Tom Shafer
}
|
if (! 'd' %in% ls()){
source('load_data.R')
d<-load_data()
}
png(filename='plot1.png')
hist(d[,'Global_active_power'],
xlab='Global Active Power (kilowatts)',
ylab='Frequency',
main='Global Active Power',
col='red')
dev.off() | /plot1.R | no_license | plexluthor81/ExData_Plotting1 | R | false | false | 250 | r | if (! 'd' %in% ls()){
source('load_data.R')
d<-load_data()
}
png(filename='plot1.png')
hist(d[,'Global_active_power'],
xlab='Global Active Power (kilowatts)',
ylab='Frequency',
main='Global Active Power',
col='red')
dev.off() |
dane<- read.csv2("metr.csv")
tau<-seq(0.1,1,0.01)
x <- dane$x
phi <- numeric(length(tau))
for (i in 1:length(phi)){
phi[i] <- sum(cos(2*pi*tau[i]*x))
}
q <- 1/tau[which.max(phi)]
q
| /ZalewskaAndzelika_pd2.R | no_license | zlotopolscylover/TimeSeries | R | false | false | 202 | r | dane<- read.csv2("metr.csv")
tau<-seq(0.1,1,0.01)
x <- dane$x
phi <- numeric(length(tau))
for (i in 1:length(phi)){
phi[i] <- sum(cos(2*pi*tau[i]*x))
}
q <- 1/tau[which.max(phi)]
q
|
#Set sleep data working directory
SleepDIR <- setwd("C:/Users/James/Documents/SMU/MSDS_6306_Doing_Data_Science/HW_Data/Unit3_File_Mgmt/")
#Read CSV file into R
SleepData <- read.csv("sleep_data_01.csv", header=TRUE, sep=",")
#get column Names
colnames(SleepData)
#create vectors
AgeVector <- as.vector(SleepData$Age)
DurationVector <- as.vector(SleepData$Duration)
RSESVector <- as.vector(SleepData$RSES)
#Create metric objects
MedianAge <- mean(AgeVector, na.rm=TRUE)
DurationMin <- min(DurationVector, na.rm=TRUE)
DurationMax <- max(DurationVector, na.rm=TRUE)
SelfEsteem <- mean(RSESVector, na.rm=TRUE)
SE_SD <- sd(RSESVector, na.rm=TRUE)
#Create data frame and divide results by 5, limit to 2 sig figs
DF_AgeDurRSES <- round(data.frame(cbind(MedianAge, DurationMin, DurationMax, SelfEsteem, SE_SD))/5, digits = 2)
##############################
#######FUNCTION BUILDING######
##############################
HW3FileMgmt <- function(x) {
#create vectors
AgeVector <- as.vector(SleepData$Age)
DurationVector <- as.vector(SleepData$Duration)
RSESVector <- as.vector(SleepData$RSES)
#Create metric objects
MedianAge <- mean(AgeVector, na.rm=TRUE)
DurationMin <- min(DurationVector, na.rm=TRUE)
DurationMax <- max(DurationVector, na.rm=TRUE)
SelfEsteem <- mean(RSESVector, na.rm=TRUE)
SE_SD <- sd(RSESVector, na.rm=TRUE)
#Create data frame and divide results by 5, limit to 2 sig figs
Report <- round(data.frame(cbind(MedianAge, DurationMin, DurationMax, SelfEsteem, SE_SD))/5, digits = 2)
return(Report)
}
#function(data frame name)
HW3FileMgmt(SleepData) | /Unit3_File_Mgmt/HW_3_Sleep_Code.R | no_license | jvasquezDC/SMU-MSDS6306-HW | R | false | false | 1,598 | r |
#Set sleep data working directory
SleepDIR <- setwd("C:/Users/James/Documents/SMU/MSDS_6306_Doing_Data_Science/HW_Data/Unit3_File_Mgmt/")
#Read CSV file into R
SleepData <- read.csv("sleep_data_01.csv", header=TRUE, sep=",")
#get column Names
colnames(SleepData)
#create vectors
AgeVector <- as.vector(SleepData$Age)
DurationVector <- as.vector(SleepData$Duration)
RSESVector <- as.vector(SleepData$RSES)
#Create metric objects
MedianAge <- mean(AgeVector, na.rm=TRUE)
DurationMin <- min(DurationVector, na.rm=TRUE)
DurationMax <- max(DurationVector, na.rm=TRUE)
SelfEsteem <- mean(RSESVector, na.rm=TRUE)
SE_SD <- sd(RSESVector, na.rm=TRUE)
#Create data frame and divide results by 5, limit to 2 sig figs
DF_AgeDurRSES <- round(data.frame(cbind(MedianAge, DurationMin, DurationMax, SelfEsteem, SE_SD))/5, digits = 2)
##############################
#######FUNCTION BUILDING######
##############################
HW3FileMgmt <- function(x) {
#create vectors
AgeVector <- as.vector(SleepData$Age)
DurationVector <- as.vector(SleepData$Duration)
RSESVector <- as.vector(SleepData$RSES)
#Create metric objects
MedianAge <- mean(AgeVector, na.rm=TRUE)
DurationMin <- min(DurationVector, na.rm=TRUE)
DurationMax <- max(DurationVector, na.rm=TRUE)
SelfEsteem <- mean(RSESVector, na.rm=TRUE)
SE_SD <- sd(RSESVector, na.rm=TRUE)
#Create data frame and divide results by 5, limit to 2 sig figs
Report <- round(data.frame(cbind(MedianAge, DurationMin, DurationMax, SelfEsteem, SE_SD))/5, digits = 2)
return(Report)
}
#function(data frame name)
HW3FileMgmt(SleepData) |
generate.siber.data <- function(n.groups = 3, n.communities = 2, n.obs = 30, mu.range = c(-1, 1, -1, 1) ){
# calculate the number of observations (rows) to be created
nn <- n.obs * n.groups * n.communities
# a vector of dummy NA entries to use to populate the dataframe
dummy <- rep(NA, nn)
# the dataframe that will hold the simulated data
simulated.data <- data.frame(iso1 = dummy,
iso2 = dummy,
group = dummy,
community = dummy)
# a counter to keep track of how many communities have been created, and to allow
# appropriate indexing of the dataframe "simulated.data"
idx.counter <- 1
# loop over communities
for (i in 1:n.communities){
# create a random community
y <- generate.siber.community(n.groups = 3, community.id = i, n.obs = n.obs, mu.range = mu.range)
# add the random community to the dataframe "simulated.data"
simulated.data[idx.counter:(idx.counter+nrow(y)-1), ] <- y
# update the counter
idx.counter <- idx.counter + nrow(y)
}
# output the dataframe "simulated.data"
return(simulated.data)
}
| /R/generate.siber.data.R | no_license | ChrisHarrod/SIBER | R | false | false | 1,167 | r | generate.siber.data <- function(n.groups = 3, n.communities = 2, n.obs = 30, mu.range = c(-1, 1, -1, 1) ){
# calculate the number of observations (rows) to be created
nn <- n.obs * n.groups * n.communities
# a vector of dummy NA entries to use to populate the dataframe
dummy <- rep(NA, nn)
# the dataframe that will hold the simulated data
simulated.data <- data.frame(iso1 = dummy,
iso2 = dummy,
group = dummy,
community = dummy)
# a counter to keep track of how many communities have been created, and to allow
# appropriate indexing of the dataframe "simulated.data"
idx.counter <- 1
# loop over communities
for (i in 1:n.communities){
# create a random community
y <- generate.siber.community(n.groups = 3, community.id = i, n.obs = n.obs, mu.range = mu.range)
# add the random community to the dataframe "simulated.data"
simulated.data[idx.counter:(idx.counter+nrow(y)-1), ] <- y
# update the counter
idx.counter <- idx.counter + nrow(y)
}
# output the dataframe "simulated.data"
return(simulated.data)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cpm.R
\name{isBadNum}
\alias{isBadNum}
\title{checks if a log-density evaluation is not a valid number}
\usage{
isBadNum(num)
}
\arguments{
\item{num}{evaluation of a log-density}
}
\value{
TRUE or FALSE
}
\description{
checks if a log-density evaluation is not a valid number
}
\examples{
isBadNum(NaN)
}
| /man/isBadNum.Rd | permissive | tbrown122387/cpm | R | false | true | 384 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cpm.R
\name{isBadNum}
\alias{isBadNum}
\title{checks if a log-density evaluation is not a valid number}
\usage{
isBadNum(num)
}
\arguments{
\item{num}{evaluation of a log-density}
}
\value{
TRUE or FALSE
}
\description{
checks if a log-density evaluation is not a valid number
}
\examples{
isBadNum(NaN)
}
|
## Binary reading of a matrix
library(pbdMPI, quiet=TRUE)
library(pbdDMAT, quiet=TRUE)
init.grid()
## set up start and length for reading a matrix of
## doubles split on columns
nrow <- 4
ncol <- 8
size <- 8 # bytes
my_ids <- get.jid(ncol, method="block")
my_start_col <- my_ids[1]
my_ncol <- length(my_ids) # contiguous ids
my_start <- (my_start_col - 1)*nrow*size
my_length <- my_ncol*nrow
comm.cat("my_start", my_start, "my_length", my_length, "\n",
all.rank=TRUE)
con <- file("binary.matrix.file", "rb")
seekval <- seek(con, where=my_start, rw="read")
x <- readBin(con, what="double", n=my_length, size=size)
comm.print(x, all.rank=TRUE)
gdim <- c(nrow, ncol)
ldim <- c(nrow, my_ncol)
bldim <- c(nrow, allreduce(my_ncol, op="max"))
X <- new("ddmatrix", Data=matrix(x, nrow, my_ncol),
dim=gdim, ldim=ldim, bldim=bldim, ICTXT=1)
comm.print(X@Data, all.rank=TRUE)
X <- redistribute(X, bldim=c(2, 2), ICTXT=0)
comm.print(X@Data, all.rank=TRUE)
Xprc <- prcomp(X) # ScaLAPACK-powered!
comm.print(Xprc)
finalize()
| /common/scripts/input/3_readbinary_mat.r | permissive | wrathematics/Rpackagetalk | R | false | false | 1,043 | r | ## Binary reading of a matrix
library(pbdMPI, quiet=TRUE)
library(pbdDMAT, quiet=TRUE)
init.grid()
## set up start and length for reading a matrix of
## doubles split on columns
nrow <- 4
ncol <- 8
size <- 8 # bytes
my_ids <- get.jid(ncol, method="block")
my_start_col <- my_ids[1]
my_ncol <- length(my_ids) # contiguous ids
my_start <- (my_start_col - 1)*nrow*size
my_length <- my_ncol*nrow
comm.cat("my_start", my_start, "my_length", my_length, "\n",
all.rank=TRUE)
con <- file("binary.matrix.file", "rb")
seekval <- seek(con, where=my_start, rw="read")
x <- readBin(con, what="double", n=my_length, size=size)
comm.print(x, all.rank=TRUE)
gdim <- c(nrow, ncol)
ldim <- c(nrow, my_ncol)
bldim <- c(nrow, allreduce(my_ncol, op="max"))
X <- new("ddmatrix", Data=matrix(x, nrow, my_ncol),
dim=gdim, ldim=ldim, bldim=bldim, ICTXT=1)
comm.print(X@Data, all.rank=TRUE)
X <- redistribute(X, bldim=c(2, 2), ICTXT=0)
comm.print(X@Data, all.rank=TRUE)
Xprc <- prcomp(X) # ScaLAPACK-powered!
comm.print(Xprc)
finalize()
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# To run this example use
# ./bin/spark-submit examples/src/main/r/ml/kstest.R
# Load SparkR library into your R session
library(SparkR)
# Initialize SparkSession
sparkR.session(appName = "SparkR-ML-kstest-example")
# $example on$
# Load training data
data <- data.frame(test = c(0.1, 0.15, 0.2, 0.3, 0.25, -1, -0.5))
df <- createDataFrame(data)
training <- df
test <- df
# Conduct the two-sided Kolmogorov-Smirnov (KS) test with spark.kstest
model <- spark.kstest(df, "test", "norm")
# Model summary
summary(model)
# $example off$
| /examples/src/main/r/ml/kstest.R | permissive | bloomberg/apache-spark-on-k8s | R | false | false | 1,322 | r | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# To run this example use
# ./bin/spark-submit examples/src/main/r/ml/kstest.R
# Load SparkR library into your R session
library(SparkR)
# Initialize SparkSession
sparkR.session(appName = "SparkR-ML-kstest-example")
# $example on$
# Load training data
data <- data.frame(test = c(0.1, 0.15, 0.2, 0.3, 0.25, -1, -0.5))
df <- createDataFrame(data)
training <- df
test <- df
# Conduct the two-sided Kolmogorov-Smirnov (KS) test with spark.kstest
model <- spark.kstest(df, "test", "norm")
# Model summary
summary(model)
# $example off$
|
# install / load required packages ----------------------------------------
# install aWhere R packages
#library(devtools)
#devtools::install_github("aWhereAPI/aWhere-R-Library")
#devtools::install_github("aWhereAPI/aWhere-R-Charts")
# load required packages
library(tidyverse)
library(data.table)
library(ggmap)
library(ggplot2)
library(tidyr)
library(dplyr)
library(wicket)
library(aWhereAPI)
library(aWhereCharts)
# define input paths and variables ----------------------------------------
# working directory - where input files are located and outputs will be saved
working.dir <- "~/Documents/aWhere/"
setwd(working.dir)
# load external functions
source("0-supporting_functions.R")
source("0-function_generateaWhereHistogramPET.R") # to plot P/PET
# specify the weather data directory and file name
weather.dir <- "climark_work_csvs/"
weather.name <- "180609_past30.csv"
# define template data filename
template.file <- "CLIMARKonlyWardTemplate.csv"
# write histograms to image files
write.hist = TRUE
# to select subarea(s) of interest, list their names in this vector.
# for now, these subareas are limited to ward names. To generate a forecast
# for the entire region instead, set subarea.select to ENTIRE_REGION.
subarea.select <- "KARARE"
subarea.select <- c("KARARE", "GOLBO")
subarea.select <- "ENTIRE_REGION"
# bins for tabular summaries of histogram data
# precipitation
bins.precip <- c(seq(from = 0, to = 300, by = 5), Inf)
# P/PET
bins.ppet <- c(0, 0.4, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.4, 1.6, 2.0, Inf)
# base map location and zoom values for mapping the forecast data.
# LAT AND LON
map.lat <- 2.5
map.lon <- 38
map.zoom <- 7
# create the base map
base.map = ggmap::get_map(location = c(lon = map.lon, lat = map.lat),
zoom = map.zoom,
color = "bw")
# display map of region
gg.map <- ggmap(base.map)
gg.map
# OR use location = country name
base.map <- ggmap::get_map(location = "Kenya", zoom = 6, color = "bw")
gg.map <- ggmap(base.map)
gg.map
# processing steps ----------------------------------------------------------
# combine the directory and file name
weather.file <- paste(weather.dir, weather.name, sep="")
# read the weather data
weather.df <- read.csv(weather.file)
# read the template data. remove columns that are duplicated.
template.df <- read.csv(template.file) %>%
dplyr::select( -c(shapewkt, longitude, latitude ))
# filter weather data for only the grid locations within the template data
get.wards.area <- weather.df %>%
dplyr::filter(locationid %in% template.df$locationid)
# merge the weather data with and template data (wards/constituen)
weather.template.df <- merge(get.wards.area,
template.df, by = "locationid")
# take a look at the combined data set
head(weather.template.df %>%
dplyr::select(locationid, latitude, longitude, CSUMPRE,
CPOVRPR, WARD_NAME))
# construct output filename for weather + template data
weather.template.df.file <- paste("weather+template",
weather.name,
sep = "_")
# write the combined weather and template data to .csv file
write.csv(weather.template.df,
file = weather.template.df.file)
# filter the data set for subarea(s) of interest
# and write this clipped data set to file. It can become a template
# for a forecast.
if (!identical(subarea.select, "ENTIRE_REGION")){
weather.template.df <- weather.template.df %>%
dplyr::filter(WARD_NAME %in% subarea.select)
write.csv(weather.template.df, file = paste("weather+template_clip_",
weather.name,
sep = "_"))
}
# take a look at the combined data set after the clip
head(weather.template.df %>%
dplyr::select(locationid, latitude, longitude, CSUMPRE,
CPOVRPR, WARD_NAME))
# calculate stats across subareas
subarea.stats <- weather.template.df %>%
dplyr::group_by(WARD_NAME) %>%
dplyr::summarise(avg_CSUMPRE = mean(CSUMPRE),
max_CSUMPRE = max(CSUMPRE),
sd_CSUMPRE = sd(CSUMPRE),
avg_LTNsumPre = mean(LTNSUMP),
max_LTNsumPre = max(LTNSUMP),
sd_LTNsumPre = sd(LTNSUMP),
avg_D_CLTNSUMPRE = mean(DFLTSUM),
max_D_CLTNSUMPRE = max(DFLTSUM),
sd_D_CLTNSUMPRE = sd(DFLTSUM),
avg_CP_PET = mean(CPOVRPR),
max_CP_PET = max(CPOVRPR),
sd_CP_PET = sd(CPOVRPR),
avg_LTNP_PET = mean(LTNASPO),
max_LTNP_PET = max(LTNASPO),
sd_LTNPPET = sd(LTNASPO),
avg_D_CLTNP_PET = mean(DFLTPVP),
max_D_CLTNP_PET = max(DFLTPVP),
sd_D_CLTNP_PET = sd(DFLTPVP),
avg_CAvgMinT = mean(CAvgMinT),
max_CAvgMinT = max(CAvgMinT),
sd_CAvgMinT = sd(CAvgMinT),
avg_CAvgMaxT = mean(CAvgMaxT),
max_CAvgMaxT = max(CAvgMaxT),
sd_CAvgMaxT = sd(CAvgMaxT),
n_grids = n())
# calculate the stats across the entire region as a single entry in the table
# this serves as a summary across the entire region
region.stats <- weather.template.df %>%
dplyr::summarise(avg_CSUMPRE = mean(CSUMPRE),
max_CSUMPRE = max(CSUMPRE),
sd_CSUMPRE = sd(CSUMPRE),
avg_LTNsumPre = mean(LTNSUMP),
max_LTNsumPre = max(LTNSUMP),
sd_LTNsumPre = sd(LTNSUMP),
avg_D_CLTNSUMPRE = mean(DFLTSUM),
max_D_CLTNSUMPRE = max(DFLTSUM),
sd_D_CLTNSUMPRE = sd(DFLTSUM),
avg_CP_PET = mean(CPOVRPR),
max_CP_PET = max(CPOVRPR),
sd_CP_PET = sd(CPOVRPR),
avg_LTNP_PET = mean(LTNASPO),
max_LTNP_PET = max(LTNASPO),
sd_LTNPPET = sd(LTNASPO),
avg_D_CLTNP_PET = mean(DFLTPVP),
max_D_CLTNP_PET = max(DFLTPVP),
sd_D_CLTNP_PET = sd(DFLTPVP),
avg_CAvgMinT = mean(CAvgMinT),
max_CAvgMinT = max(CAvgMinT),
sd_CAvgMinT = sd(CAvgMinT),
avg_CAvgMaxT = mean(CAvgMaxT),
max_CAvgMaxT = max(CAvgMaxT),
sd_CAvgMaxT = sd(CAvgMaxT),
n_grids = n()) %>%
dplyr::mutate(WARD_NAME = "ENTIRE REGION") %>%
dplyr::select(WARD_NAME, n_grids, everything())
# combine the ward-specific stats with the overall region calculation
stats.out <- rbind(region.stats,
subarea.stats)
# take a look at the statistics data
head(stats.out[,1:5], n = 10)
# write ward statistics to file
write.csv(stats.out,
paste("stats_by_subarea",
weather.name,
sep="_"))
# visualize data using the aWhereCharts::generateaWhereHistogram function
# Histogram of precip compared to LTN
# create a descriptive title that includes the current weather file name
hist.title <- paste("Histogram Precipitation",
tools::file_path_sans_ext(weather.name), sep = " ")
aWhereCharts::generateaWhereHistogram(data = weather.template.df,
variable = "CSUMPRE",
title = hist.title,
xlabel = "mm",
compare = TRUE,
compare_var = "LTNSUMP")
# write histogram to file
if (write.hist == TRUE) {
ggplot2::ggsave(paste0(filename = hist.title, ".png"),
device = "png")
}
# Histogram of minT compared to LTN
hist.title <- paste("Histogram Min Temp",
tools::file_path_sans_ext(weather.name), sep = " ")
generateaWhereHistogram(data = weather.template.df,
variable = "CAvgMinT",
title = hist.title,
xlabel = "Deg C",
compare = TRUE,
compare_var = "LTAvgMnT")
# write histogram to file
if (write.hist == TRUE) {
ggplot2::ggsave(paste0(filename = hist.title, ".png"),
device = "png")
}
# Histogram of maxT compared to LTN
hist.title <- paste("Histogram Max Temp",
tools::file_path_sans_ext(weather.name), sep = " ")
generateaWhereHistogram(data = weather.template.df,
variable = "CAvgMaxT",
title = hist.title,
xlabel = "Deg C",
compare = TRUE,
compare_var = "LTAvgMxT")
# write histogram to file
if (write.hist == TRUE) {
ggplot2::ggsave(paste0(filename = hist.title, ".png"),
device = "png")
}
# Histogram of P/PET compared to LTN P/PET
# clip the extreme values of CPOVRPR (current P/PET) and LTNASPO
# long-term average P/PET and place these clipped values in new columns,
# "ctemp" and "LTNtemp"
weather.template.df$ctemp <- ClipValues(weather.template.df$CPOVRPR,
max.thresh = 2)
weather.template.df$LTNtemp <- ClipValues(weather.template.df$LTNASPO,
max.thresh = 2)
# construct a descriptive title
hist.title <- paste("Histogram PPET",
tools::file_path_sans_ext(weather.name), sep = " ")
generateaWhereHistogramPET(data = weather.template.df,
"ctemp",
title = hist.title,
xlabel = "P/PET",
compare = TRUE,
compare_var = "LTNtemp")
# write histogram to file
if (write.hist == TRUE) {
ggplot2::ggsave(paste0(filename = hist.title, ".png"),
device = "png")
}
# tabular summaries of histogram data ---------------------------------------
# precipitation
# take a look at the bins
bins.precip
# if the initial precipitation bins extend beyond the actual range of precip
# data, remove the extra bins
bins.precip <- bins.precip[bins.precip < (max(weather.template.df$CSUMPRE) + 5)]
# take a look at the bins after removing the ones that will be empty
bins.precip
# add a column for which bin/ bin range each grid falls into
weather.template.df$bin.precip <- NA
weather.template.df$bin.range.precip <- NA
# loop through each bin and populate the appropriate values
for(b in 1:(length(bins.precip)-1)){
# indices of entries that fall in the current bin
idx <- weather.template.df$CSUMPRE >= bins.precip[b] &
weather.template.df$CSUMPRE < bins.precip[b+1]
# add the bin number to each row
weather.template.df$bin.precip[idx] <- b
# add the bin range to each row
weather.template.df$bin.range.precip[idx] <- paste(as.character(bins.precip[b]),
" - ",
as.character(bins.precip[b+1]),
sep="")
}
# add columns for number of grids per precip level
weather.template.df <- weather.template.df %>%
dplyr::group_by(bin.precip) %>%
dplyr::mutate(grid.count.precip = n(),
grid.percent.precip = 100 * n() / nrow(weather.template.df))
# nest the data within each bin and show a summary
weather.template.nested.precip <- weather.template.df %>%
dplyr::group_by(bin.precip,
bin.range.precip,
grid.count.precip,
grid.percent.precip) %>%
tidyr::nest() %>%
dplyr::arrange(bin.precip)
# take a look at the nested data.
# this is essentially the histogram in tabular form.
# get the nested data out of the way,
grids.per.precip.level <- weather.template.nested.precip %>%
dplyr::select(-data)
# and use the "head" function to take a look at the first 6 rows
head(grids.per.precip.level)
# write number of grids per precip level to .csv
write.csv(grids.per.precip.level,
"grids_per_precip_level.csv")
# P/PET table summary ---------------------------------------------------
# take a look at the P/PET bins
bins.ppet
# add a column for which bin the grid falls into
weather.template.df$bin.ppet <- NA
weather.template.df$bin.range.ppet <- NA
# loop through each bin and populate the appropriate values
for(b in 1:(length(bins.ppet)-1)){
# indices of entries that fall in the current bin
idx <- weather.template.df$CPOVRPR >= bins.ppet[b] &
weather.template.df$CPOVRPR < bins.ppet[b+1]
# add the bin number to each row
weather.template.df$bin.ppet[idx] <- b
# add the bin range to each row
weather.template.df$bin.range.ppet[idx] <- paste(as.character(bins.ppet[b]),
" - ",
as.character(bins.ppet[b+1]),
sep="")
}
# add columns for number of grids per ppet level
weather.template.df <- weather.template.df %>%
dplyr::group_by(bin.ppet) %>%
dplyr::mutate(grid.count.ppet = n(),
grid.percent.ppet = 100 * n() / nrow(weather.template.df))
weather.template.nested.ppet <- weather.template.df %>%
dplyr::group_by(bin.ppet,
bin.range.ppet,
grid.count.ppet,
grid.percent.ppet) %>%
tidyr::nest() %>%
dplyr::arrange(bin.ppet)
# take a look at the nested data
# this is essentially the P/PET histogram in tabular form.
# get the nested data out of the way,
grids.per.ppet.level <- weather.template.nested.ppet %>%
dplyr::select(-data)
# and use the "head" function to take a look at the first 6 rows
head(grids.per.ppet.level)
# write number of grids per ppet level to .csv
write.csv(grids.per.ppet.level,
"grids_per_ppet_level.csv")
# ward narrative --------------------------------------------------------------
# To create a narrative about the percentage of a ward receiving
# a given precip or p/pet level, we can subset the larger data frame
# and see the distribution of grids cells at different precipitation / ppet levels
ward.select = "TURBI"
ward.select = "KARARE"
# filter the data for the ward of interest
ward.df <- weather.template.df %>%
dplyr::filter(WARD_NAME %in% ward.select)
# count total number of grids in ward
ward.grid.count <- nrow(ward.df)
ward.df <- ward.df %>%
dplyr::group_by(bin.precip) %>%
dplyr::mutate(bin.grid.count.ward = n(),
bin.grid.percent.ward = 100 * (bin.grid.count.ward /
ward.grid.count)) %>%
dplyr::group_by(bin.precip,
bin.range.precip,
bin.grid.count.ward,
bin.grid.percent.ward) %>%
tidyr::nest() %>%
dplyr::arrange(bin.precip)
# take a look at the grid count and percentages per precip level
head(ward.df %>% dplyr::select(-data))
# mapping -----------------------------------------------------------------
# clip the extreme values of selected variables to map
weather.template.df$cPovPET <- ClipValues(weather.template.df$CPOVRPR,
max.thresh = 2)
weather.template.df$cLTNPPET <- ClipValues(weather.template.df$LTNASPO,
max.thresh = 2)
weather.template.df$aPre <- ClipValues(weather.template.df$CSUMPRE,
max.thresh = 300)
weather.template.df$aLTNPRE <- ClipValues(weather.template.df$LTNSUMP,
max.thresh = 400)
weather.template.df$aDinPre <- ClipValues(weather.template.df$DFLTSUM,
max.thresh = 250,
min.thresh = -250)
ggmap.df <- weather.template.df
# Expand wkt to format usable by ggplot
polygon.df = as.tibble(wicket::wkt_coords(ggmap.df$shapewkt))
polygon.df$aPre <- ggmap.df$aPre[polygon.df$object]
polygon.df$cPovPET = ggmap.df$cPovPET[polygon.df$object]
polygon.df$aDinPre = ggmap.df$aDinPre[polygon.df$object]
# create a map for each of the specified variables
# precipitation
precip.map.title <- paste("Precipitation",
tools::file_path_sans_ext(weather.name))
precip.map <- ggmap(base.map) +
geom_polygon( aes( x = lng, y = lat,
group = object, fill = aPre),
data = polygon.df, alpha = 0.7) +
scale_fill_gradient2(breaks = seq(0,300, by = 50),
low = "red", mid = "green", high = "blue",
midpoint = 150, limits = c(0,300),
name="Precipitation (mm)") +
ggtitle(precip.map.title)
precip.map
# save the map to file
ggsave(filename = paste0(precip.map.title, ".png"),
precip.map, width = 6.02, height = 3.38, units = "in")
# P / PET
ppet.map.title <- paste("P PET",
tools::file_path_sans_ext(weather.name),
sep="_")
ppet.map <- ggmap(base.map) +
geom_polygon( aes( x = lng, y = lat,
group = object, fill = cPovPET),
data = polygon.df, alpha = 0.7) +
scale_fill_gradient2(breaks = seq(0,2, by = 0.2),
low = "red", mid = "green", high = "blue",
midpoint = 1.0, limits = c(0,2.0),
name="P/PET") +
ggtitle(ppet.map.title)
ppet.map
# save the map to file
ggsave(filename = paste0(ppet.map.title, ".png"),
ppet.map, width = 6.02, height = 3.38, units = "in")
# LTN precipitation
pltn.map.title <- paste("LTN Precipitation",
tools::file_path_sans_ext(weather.name),
sep="_")
pltn.map <- ggmap(base.map) +
geom_polygon( aes( x = lng, y = lat,
group = object, fill = aDinPre),
data = polygon.df, alpha = 0.7) +
scale_fill_gradient2(breaks = seq(-250,250, by = 50),
low = "red", mid = "white", high = "blue",
midpoint = 0, limits = c(-250,250),
name="LTN Precipitation (mm)") +
ggtitle(pltn.map.title)
pltn.map
# save the map to file
ggsave(filename = paste0(pltn.map.title, ".png"),
pltn.map, width = 6.02, height = 3.38, units = "in")
| /2-histo_stats_maps.R | no_license | vscholl/hybridRice_package | R | false | false | 18,467 | r | # install / load required packages ----------------------------------------
# install aWhere R packages
#library(devtools)
#devtools::install_github("aWhereAPI/aWhere-R-Library")
#devtools::install_github("aWhereAPI/aWhere-R-Charts")
# load required packages
library(tidyverse)
library(data.table)
library(ggmap)
library(ggplot2)
library(tidyr)
library(dplyr)
library(wicket)
library(aWhereAPI)
library(aWhereCharts)
# define input paths and variables ----------------------------------------
# working directory - where input files are located and outputs will be saved
working.dir <- "~/Documents/aWhere/"
setwd(working.dir)
# load external functions
source("0-supporting_functions.R")
source("0-function_generateaWhereHistogramPET.R") # to plot P/PET
# specify the weather data directory and file name
weather.dir <- "climark_work_csvs/"
weather.name <- "180609_past30.csv"
# define template data filename
template.file <- "CLIMARKonlyWardTemplate.csv"
# write histograms to image files
write.hist = TRUE
# to select subarea(s) of interest, list their names in this vector.
# for now, these subareas are limited to ward names. To generate a forecast
# for the entire region instead, set subarea.select to ENTIRE_REGION.
subarea.select <- "KARARE"
subarea.select <- c("KARARE", "GOLBO")
subarea.select <- "ENTIRE_REGION"
# bins for tabular summaries of histogram data
# precipitation
bins.precip <- c(seq(from = 0, to = 300, by = 5), Inf)
# P/PET
bins.ppet <- c(0, 0.4, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.4, 1.6, 2.0, Inf)
# base map location and zoom values for mapping the forecast data.
# LAT AND LON
map.lat <- 2.5
map.lon <- 38
map.zoom <- 7
# create the base map
base.map = ggmap::get_map(location = c(lon = map.lon, lat = map.lat),
zoom = map.zoom,
color = "bw")
# display map of region
gg.map <- ggmap(base.map)
gg.map
# OR use location = country name
base.map <- ggmap::get_map(location = "Kenya", zoom = 6, color = "bw")
gg.map <- ggmap(base.map)
gg.map
# processing steps ----------------------------------------------------------
# combine the directory and file name
weather.file <- paste(weather.dir, weather.name, sep="")
# read the weather data
weather.df <- read.csv(weather.file)
# read the template data. remove columns that are duplicated.
template.df <- read.csv(template.file) %>%
dplyr::select( -c(shapewkt, longitude, latitude ))
# filter weather data for only the grid locations within the template data
get.wards.area <- weather.df %>%
dplyr::filter(locationid %in% template.df$locationid)
# merge the weather data with and template data (wards/constituen)
weather.template.df <- merge(get.wards.area,
template.df, by = "locationid")
# take a look at the combined data set
head(weather.template.df %>%
dplyr::select(locationid, latitude, longitude, CSUMPRE,
CPOVRPR, WARD_NAME))
# construct output filename for weather + template data
weather.template.df.file <- paste("weather+template",
weather.name,
sep = "_")
# write the combined weather and template data to .csv file
write.csv(weather.template.df,
file = weather.template.df.file)
# filter the data set for subarea(s) of interest
# and write this clipped data set to file. It can become a template
# for a forecast.
if (!identical(subarea.select, "ENTIRE_REGION")){
weather.template.df <- weather.template.df %>%
dplyr::filter(WARD_NAME %in% subarea.select)
write.csv(weather.template.df, file = paste("weather+template_clip_",
weather.name,
sep = "_"))
}
# take a look at the combined data set after the clip
head(weather.template.df %>%
dplyr::select(locationid, latitude, longitude, CSUMPRE,
CPOVRPR, WARD_NAME))
# calculate stats across subareas
subarea.stats <- weather.template.df %>%
dplyr::group_by(WARD_NAME) %>%
dplyr::summarise(avg_CSUMPRE = mean(CSUMPRE),
max_CSUMPRE = max(CSUMPRE),
sd_CSUMPRE = sd(CSUMPRE),
avg_LTNsumPre = mean(LTNSUMP),
max_LTNsumPre = max(LTNSUMP),
sd_LTNsumPre = sd(LTNSUMP),
avg_D_CLTNSUMPRE = mean(DFLTSUM),
max_D_CLTNSUMPRE = max(DFLTSUM),
sd_D_CLTNSUMPRE = sd(DFLTSUM),
avg_CP_PET = mean(CPOVRPR),
max_CP_PET = max(CPOVRPR),
sd_CP_PET = sd(CPOVRPR),
avg_LTNP_PET = mean(LTNASPO),
max_LTNP_PET = max(LTNASPO),
sd_LTNPPET = sd(LTNASPO),
avg_D_CLTNP_PET = mean(DFLTPVP),
max_D_CLTNP_PET = max(DFLTPVP),
sd_D_CLTNP_PET = sd(DFLTPVP),
avg_CAvgMinT = mean(CAvgMinT),
max_CAvgMinT = max(CAvgMinT),
sd_CAvgMinT = sd(CAvgMinT),
avg_CAvgMaxT = mean(CAvgMaxT),
max_CAvgMaxT = max(CAvgMaxT),
sd_CAvgMaxT = sd(CAvgMaxT),
n_grids = n())
# calculate the stats across the entire region as a single entry in the table
# this serves as a summary across the entire region
region.stats <- weather.template.df %>%
dplyr::summarise(avg_CSUMPRE = mean(CSUMPRE),
max_CSUMPRE = max(CSUMPRE),
sd_CSUMPRE = sd(CSUMPRE),
avg_LTNsumPre = mean(LTNSUMP),
max_LTNsumPre = max(LTNSUMP),
sd_LTNsumPre = sd(LTNSUMP),
avg_D_CLTNSUMPRE = mean(DFLTSUM),
max_D_CLTNSUMPRE = max(DFLTSUM),
sd_D_CLTNSUMPRE = sd(DFLTSUM),
avg_CP_PET = mean(CPOVRPR),
max_CP_PET = max(CPOVRPR),
sd_CP_PET = sd(CPOVRPR),
avg_LTNP_PET = mean(LTNASPO),
max_LTNP_PET = max(LTNASPO),
sd_LTNPPET = sd(LTNASPO),
avg_D_CLTNP_PET = mean(DFLTPVP),
max_D_CLTNP_PET = max(DFLTPVP),
sd_D_CLTNP_PET = sd(DFLTPVP),
avg_CAvgMinT = mean(CAvgMinT),
max_CAvgMinT = max(CAvgMinT),
sd_CAvgMinT = sd(CAvgMinT),
avg_CAvgMaxT = mean(CAvgMaxT),
max_CAvgMaxT = max(CAvgMaxT),
sd_CAvgMaxT = sd(CAvgMaxT),
n_grids = n()) %>%
dplyr::mutate(WARD_NAME = "ENTIRE REGION") %>%
dplyr::select(WARD_NAME, n_grids, everything())
# combine the ward-specific stats with the overall region calculation
stats.out <- rbind(region.stats,
subarea.stats)
# take a look at the statistics data
head(stats.out[,1:5], n = 10)
# write ward statistics to file
write.csv(stats.out,
paste("stats_by_subarea",
weather.name,
sep="_"))
# visualize data using the aWhereCharts::generateaWhereHistogram function
# Histogram of precip compared to LTN
# create a descriptive title that includes the current weather file name
hist.title <- paste("Histogram Precipitation",
tools::file_path_sans_ext(weather.name), sep = " ")
aWhereCharts::generateaWhereHistogram(data = weather.template.df,
variable = "CSUMPRE",
title = hist.title,
xlabel = "mm",
compare = TRUE,
compare_var = "LTNSUMP")
# write histogram to file
if (write.hist == TRUE) {
ggplot2::ggsave(paste0(filename = hist.title, ".png"),
device = "png")
}
# Histogram of minT compared to LTN
hist.title <- paste("Histogram Min Temp",
tools::file_path_sans_ext(weather.name), sep = " ")
generateaWhereHistogram(data = weather.template.df,
variable = "CAvgMinT",
title = hist.title,
xlabel = "Deg C",
compare = TRUE,
compare_var = "LTAvgMnT")
# write histogram to file
if (write.hist == TRUE) {
ggplot2::ggsave(paste0(filename = hist.title, ".png"),
device = "png")
}
# Histogram of maxT compared to LTN
hist.title <- paste("Histogram Max Temp",
tools::file_path_sans_ext(weather.name), sep = " ")
generateaWhereHistogram(data = weather.template.df,
variable = "CAvgMaxT",
title = hist.title,
xlabel = "Deg C",
compare = TRUE,
compare_var = "LTAvgMxT")
# write histogram to file
if (write.hist == TRUE) {
ggplot2::ggsave(paste0(filename = hist.title, ".png"),
device = "png")
}
# Histogram of P/PET compared to LTN P/PET
# clip the extreme values of CPOVRPR (current P/PET) and LTNASPO
# long-term average P/PET and place these clipped values in new columns,
# "ctemp" and "LTNtemp"
weather.template.df$ctemp <- ClipValues(weather.template.df$CPOVRPR,
max.thresh = 2)
weather.template.df$LTNtemp <- ClipValues(weather.template.df$LTNASPO,
max.thresh = 2)
# construct a descriptive title
hist.title <- paste("Histogram PPET",
tools::file_path_sans_ext(weather.name), sep = " ")
generateaWhereHistogramPET(data = weather.template.df,
"ctemp",
title = hist.title,
xlabel = "P/PET",
compare = TRUE,
compare_var = "LTNtemp")
# write histogram to file
if (write.hist == TRUE) {
ggplot2::ggsave(paste0(filename = hist.title, ".png"),
device = "png")
}
# tabular summaries of histogram data ---------------------------------------
# precipitation
# take a look at the bins
bins.precip
# if the initial precipitation bins extend beyond the actual range of precip
# data, remove the extra bins
bins.precip <- bins.precip[bins.precip < (max(weather.template.df$CSUMPRE) + 5)]
# take a look at the bins after removing the ones that will be empty
bins.precip
# add a column for which bin/ bin range each grid falls into
weather.template.df$bin.precip <- NA
weather.template.df$bin.range.precip <- NA
# loop through each bin and populate the appropriate values
for(b in 1:(length(bins.precip)-1)){
# indices of entries that fall in the current bin
idx <- weather.template.df$CSUMPRE >= bins.precip[b] &
weather.template.df$CSUMPRE < bins.precip[b+1]
# add the bin number to each row
weather.template.df$bin.precip[idx] <- b
# add the bin range to each row
weather.template.df$bin.range.precip[idx] <- paste(as.character(bins.precip[b]),
" - ",
as.character(bins.precip[b+1]),
sep="")
}
# add columns for number of grids per precip level
weather.template.df <- weather.template.df %>%
dplyr::group_by(bin.precip) %>%
dplyr::mutate(grid.count.precip = n(),
grid.percent.precip = 100 * n() / nrow(weather.template.df))
# nest the data within each bin and show a summary
weather.template.nested.precip <- weather.template.df %>%
dplyr::group_by(bin.precip,
bin.range.precip,
grid.count.precip,
grid.percent.precip) %>%
tidyr::nest() %>%
dplyr::arrange(bin.precip)
# take a look at the nested data.
# this is essentially the histogram in tabular form.
# get the nested data out of the way,
grids.per.precip.level <- weather.template.nested.precip %>%
dplyr::select(-data)
# and use the "head" function to take a look at the first 6 rows
head(grids.per.precip.level)
# write number of grids per precip level to .csv
write.csv(grids.per.precip.level,
"grids_per_precip_level.csv")
# P/PET table summary ---------------------------------------------------
# take a look at the P/PET bins
bins.ppet
# add a column for which bin the grid falls into
weather.template.df$bin.ppet <- NA
weather.template.df$bin.range.ppet <- NA
# loop through each bin and populate the appropriate values
for(b in 1:(length(bins.ppet)-1)){
# indices of entries that fall in the current bin
idx <- weather.template.df$CPOVRPR >= bins.ppet[b] &
weather.template.df$CPOVRPR < bins.ppet[b+1]
# add the bin number to each row
weather.template.df$bin.ppet[idx] <- b
# add the bin range to each row
weather.template.df$bin.range.ppet[idx] <- paste(as.character(bins.ppet[b]),
" - ",
as.character(bins.ppet[b+1]),
sep="")
}
# add columns for number of grids per ppet level
weather.template.df <- weather.template.df %>%
dplyr::group_by(bin.ppet) %>%
dplyr::mutate(grid.count.ppet = n(),
grid.percent.ppet = 100 * n() / nrow(weather.template.df))
weather.template.nested.ppet <- weather.template.df %>%
dplyr::group_by(bin.ppet,
bin.range.ppet,
grid.count.ppet,
grid.percent.ppet) %>%
tidyr::nest() %>%
dplyr::arrange(bin.ppet)
# take a look at the nested data
# this is essentially the P/PET histogram in tabular form.
# get the nested data out of the way,
grids.per.ppet.level <- weather.template.nested.ppet %>%
dplyr::select(-data)
# and use the "head" function to take a look at the first 6 rows
head(grids.per.ppet.level)
# write number of grids per ppet level to .csv
write.csv(grids.per.ppet.level,
"grids_per_ppet_level.csv")
# ward narrative --------------------------------------------------------------
# To create a narrative about the percentage of a ward receiving
# a given precip or p/pet level, we can subset the larger data frame
# and see the distribution of grids cells at different precipitation / ppet levels
ward.select = "TURBI"
ward.select = "KARARE"
# filter the data for the ward of interest
ward.df <- weather.template.df %>%
dplyr::filter(WARD_NAME %in% ward.select)
# count total number of grids in ward
ward.grid.count <- nrow(ward.df)
ward.df <- ward.df %>%
dplyr::group_by(bin.precip) %>%
dplyr::mutate(bin.grid.count.ward = n(),
bin.grid.percent.ward = 100 * (bin.grid.count.ward /
ward.grid.count)) %>%
dplyr::group_by(bin.precip,
bin.range.precip,
bin.grid.count.ward,
bin.grid.percent.ward) %>%
tidyr::nest() %>%
dplyr::arrange(bin.precip)
# take a look at the grid count and percentages per precip level
head(ward.df %>% dplyr::select(-data))
# mapping -----------------------------------------------------------------
# clip the extreme values of selected variables to map
weather.template.df$cPovPET <- ClipValues(weather.template.df$CPOVRPR,
max.thresh = 2)
weather.template.df$cLTNPPET <- ClipValues(weather.template.df$LTNASPO,
max.thresh = 2)
weather.template.df$aPre <- ClipValues(weather.template.df$CSUMPRE,
max.thresh = 300)
weather.template.df$aLTNPRE <- ClipValues(weather.template.df$LTNSUMP,
max.thresh = 400)
weather.template.df$aDinPre <- ClipValues(weather.template.df$DFLTSUM,
max.thresh = 250,
min.thresh = -250)
ggmap.df <- weather.template.df
# Expand wkt to format usable by ggplot
polygon.df = as.tibble(wicket::wkt_coords(ggmap.df$shapewkt))
polygon.df$aPre <- ggmap.df$aPre[polygon.df$object]
polygon.df$cPovPET = ggmap.df$cPovPET[polygon.df$object]
polygon.df$aDinPre = ggmap.df$aDinPre[polygon.df$object]
# create a map for each of the specified variables
# precipitation
precip.map.title <- paste("Precipitation",
tools::file_path_sans_ext(weather.name))
precip.map <- ggmap(base.map) +
geom_polygon( aes( x = lng, y = lat,
group = object, fill = aPre),
data = polygon.df, alpha = 0.7) +
scale_fill_gradient2(breaks = seq(0,300, by = 50),
low = "red", mid = "green", high = "blue",
midpoint = 150, limits = c(0,300),
name="Precipitation (mm)") +
ggtitle(precip.map.title)
precip.map
# save the map to file
ggsave(filename = paste0(precip.map.title, ".png"),
precip.map, width = 6.02, height = 3.38, units = "in")
# P / PET
ppet.map.title <- paste("P PET",
tools::file_path_sans_ext(weather.name),
sep="_")
ppet.map <- ggmap(base.map) +
geom_polygon( aes( x = lng, y = lat,
group = object, fill = cPovPET),
data = polygon.df, alpha = 0.7) +
scale_fill_gradient2(breaks = seq(0,2, by = 0.2),
low = "red", mid = "green", high = "blue",
midpoint = 1.0, limits = c(0,2.0),
name="P/PET") +
ggtitle(ppet.map.title)
ppet.map
# save the map to file
ggsave(filename = paste0(ppet.map.title, ".png"),
ppet.map, width = 6.02, height = 3.38, units = "in")
# LTN precipitation
pltn.map.title <- paste("LTN Precipitation",
tools::file_path_sans_ext(weather.name),
sep="_")
pltn.map <- ggmap(base.map) +
geom_polygon( aes( x = lng, y = lat,
group = object, fill = aDinPre),
data = polygon.df, alpha = 0.7) +
scale_fill_gradient2(breaks = seq(-250,250, by = 50),
low = "red", mid = "white", high = "blue",
midpoint = 0, limits = c(-250,250),
name="LTN Precipitation (mm)") +
ggtitle(pltn.map.title)
pltn.map
# save the map to file
ggsave(filename = paste0(pltn.map.title, ".png"),
pltn.map, width = 6.02, height = 3.38, units = "in")
|
library(quanteda)
library(magrittr)
library(caret)
library(quanteda.textmodels)
library(shinyWidgets)
library(DT)
library(e1071)
library(wordcloud)
library(RColorBrewer)
#library(lexicon)
hash_lemmas<-readRDS('hash_lemmas.rds')
shinyUI(fluidPage(
title = "Text Classification",
titlePanel(title=div(img(src="logo.png",align='right'),"Text Classification")),
# Input in sidepanel:
sidebarPanel(
pickerInput(
inputId = "sample",
label = "Load Sample Data",
choices = sub('\\.csv$', '',list.files('data')),
choicesOpt = list(icon = c("plane-departure", "amazon", 'twitter','refresh')),
options =list(`live-search` = TRUE,style = "btn-primary")
),
#selectInput("sample","Load Sample Data",choices = sub('\\.csv$', '',list.files('data'))),
p("OR"),
fileInput("file", "Upload Data"),
#actionButton("load","Load Data"),
actionButton(
inputId = "load",
label = "Load Data",
style = "color: #fff; background-color: #337ab7; border-color: #2e6da4",
color = "default",size = 's',icon=icon('cloud-upload-alt')
),
# h5("select X"),
hr(),
strong(p("Variable Selection")),
uiOutput('inp_var'),
# h5("select Y"),
uiOutput('tar_var'),
hr(),
strong(p("Data Preprocessing")),
helpText("Note: Remove symbols does'nt remove @ from twitter handles"),
fluidRow(
column(4,align="left",
prettyCheckbox('remove_punct', 'Remove Punctuation', value = TRUE,status = 'primary',icon = icon("check")),
prettyCheckbox('split_hyphens', 'Split Hyphen', value = TRUE,status = 'primary',icon = icon("check"))),
column(4,align="left",
prettyCheckbox('remove_symbols', 'Remove Symbols', value = TRUE,status = 'primary',icon = icon("check")),
prettyCheckbox('stem', 'Stemming', value = FALSE,status = 'primary',icon = icon("check"))),
column(4,align="left",
prettyCheckbox('remove_numbers', 'Remove Numbers', value = TRUE,status = 'primary',icon = icon("check")),
prettyCheckbox('lemma', 'Lemmatization', value = FALSE,status = 'primary',icon = icon("check"))
),
),
textInput("stopw", ("Enter stop words separated by comma(,)"), value = "will,can"),
#actionButton('plotwc',"Plot WordCloud"),
actionButton(
inputId = "plotwc",
label = "Plot WordCloud",
style = "color: #fff; background-color: #337ab7; border-color: #2e6da4",
color = "default",size = 's'),
# ),
# h5("select training data (in percentage)"),
hr(),
strong(p("Train-Test Split")),
sliderInput("tr_per",label = "select training data (in percentage)",min = 0,max = 100,value = 70,step = 1),
hr(),
#h5("select classification algorithm"),
strong(p("Model Selection")),
selectInput("algo",label = "select algorithm",choices = c("Naive Bayes"="nb","Logistic Regression"="lr")),
#actionButton("apply","Train Model"),
actionButton(
inputId = "apply",
label = "Train Model",
style = "color: #fff; background-color: #337ab7; border-color: #2e6da4",
color = "default",size = 's',icon=icon('cogs')
),
progressBar(id = "pb6", value = 0, status = "success", size = "xs")
),
# Main Panel:
mainPanel(
tabsetPanel(type = "tabs",
#
tabPanel("Overview & Example Dataset",h4(p("How to use this App")),
p("To use this app you need a document corpus in csv file format. Make sure each document is separated from another document with a new line character.
Dataset should contain atleast two columns text (used as input) and targert (used for predictions).
To do basic Text classfication on your text corpus, click on Browse in left-sidebar panel and upload the file. Once the file is uploaded it will do the computations in
back-end with default inputs and accordingly results will be displayed in various tabs.", align = "justify"),
p("If you wish to change the input, modify the input in left side-bar panel and click on Apply changes. Accordingly results in other tab will be refreshed
", align = "Justify"),
h5("Note"),
p("You might observe no change in the outputs after clicking 'Apply Changes'. Wait for few seconds. As soon as all the computations
are over in back-end results will be refreshed",
align = "justify"),
#, height = 280, width = 400
br(),
h4(p("Download Sample file")),
selectInput("downloads","Select Sample file to download",choices = sub('\\.csv$', '',list.files('data'))),
downloadButton('downloadData1', 'Download'),br(),br(),
p("Please note that download will not work with RStudio interface. Download will work only in web-browsers. So open this app in a web-browser and then download the example file. For opening this app in web-browser click on \"Open in Browser\" as shown below -"),
img(src = "example1.png")
)
,
tabPanel("Data",
h5("Data Dimensions"),
verbatimTextOutput('dim'),
h5("Distribution of Target(Y)"),
verbatimTextOutput('y_dis'),
hr(),
h5("Sample dataset"),
dataTableOutput("sampleData"),hr(),
h4("Word Cloud"),
dropdownButton(
tags$h3("List of Inputs"),
sliderInput(inputId = 'minword',
label = 'Min Term Frequency',
value = 10,
min = 1,
max = 50),
circle = TRUE, status = "danger",
icon = icon("gear"), width = "300px",
tooltip = tooltipOptions(title = "Click to see inputs !")
),
plotOutput("wordcloud",height = 700, width = 700),br(),
#textInput("in",label = "text"),
),
tabPanel("Training Report",
h4("Confusion Matrix"),
textOutput("cf_text"),
plotOutput("cf_matrix"),
hr(),
verbatimTextOutput('cf'),
hr(),
uiOutput("tokens"),
dropdownButton(
tags$h3("List of Inputs"),
sliderInput(inputId = 'maxword',
label = 'Max words to display',
value = 20,
min = 1,
max = 100),
circle = TRUE, status = "danger",
icon = icon("gear"), width = "300px",
tooltip = tooltipOptions(title = "Click to see inputs !")
),
plotOutput("token_wc",height = 700, width = 700),br(),
dataTableOutput("token_table")
)
),
)
)
)
| /ui.R | no_license | sudhir-voleti/text-classification-shinyapp | R | false | false | 8,496 | r | library(quanteda)
library(magrittr)
library(caret)
library(quanteda.textmodels)
library(shinyWidgets)
library(DT)
library(e1071)
library(wordcloud)
library(RColorBrewer)
#library(lexicon)
hash_lemmas<-readRDS('hash_lemmas.rds')
shinyUI(fluidPage(
title = "Text Classification",
titlePanel(title=div(img(src="logo.png",align='right'),"Text Classification")),
# Input in sidepanel:
sidebarPanel(
pickerInput(
inputId = "sample",
label = "Load Sample Data",
choices = sub('\\.csv$', '',list.files('data')),
choicesOpt = list(icon = c("plane-departure", "amazon", 'twitter','refresh')),
options =list(`live-search` = TRUE,style = "btn-primary")
),
#selectInput("sample","Load Sample Data",choices = sub('\\.csv$', '',list.files('data'))),
p("OR"),
fileInput("file", "Upload Data"),
#actionButton("load","Load Data"),
actionButton(
inputId = "load",
label = "Load Data",
style = "color: #fff; background-color: #337ab7; border-color: #2e6da4",
color = "default",size = 's',icon=icon('cloud-upload-alt')
),
# h5("select X"),
hr(),
strong(p("Variable Selection")),
uiOutput('inp_var'),
# h5("select Y"),
uiOutput('tar_var'),
hr(),
strong(p("Data Preprocessing")),
helpText("Note: Remove symbols does'nt remove @ from twitter handles"),
fluidRow(
column(4,align="left",
prettyCheckbox('remove_punct', 'Remove Punctuation', value = TRUE,status = 'primary',icon = icon("check")),
prettyCheckbox('split_hyphens', 'Split Hyphen', value = TRUE,status = 'primary',icon = icon("check"))),
column(4,align="left",
prettyCheckbox('remove_symbols', 'Remove Symbols', value = TRUE,status = 'primary',icon = icon("check")),
prettyCheckbox('stem', 'Stemming', value = FALSE,status = 'primary',icon = icon("check"))),
column(4,align="left",
prettyCheckbox('remove_numbers', 'Remove Numbers', value = TRUE,status = 'primary',icon = icon("check")),
prettyCheckbox('lemma', 'Lemmatization', value = FALSE,status = 'primary',icon = icon("check"))
),
),
textInput("stopw", ("Enter stop words separated by comma(,)"), value = "will,can"),
#actionButton('plotwc',"Plot WordCloud"),
actionButton(
inputId = "plotwc",
label = "Plot WordCloud",
style = "color: #fff; background-color: #337ab7; border-color: #2e6da4",
color = "default",size = 's'),
# ),
# h5("select training data (in percentage)"),
hr(),
strong(p("Train-Test Split")),
sliderInput("tr_per",label = "select training data (in percentage)",min = 0,max = 100,value = 70,step = 1),
hr(),
#h5("select classification algorithm"),
strong(p("Model Selection")),
selectInput("algo",label = "select algorithm",choices = c("Naive Bayes"="nb","Logistic Regression"="lr")),
#actionButton("apply","Train Model"),
actionButton(
inputId = "apply",
label = "Train Model",
style = "color: #fff; background-color: #337ab7; border-color: #2e6da4",
color = "default",size = 's',icon=icon('cogs')
),
progressBar(id = "pb6", value = 0, status = "success", size = "xs")
),
# Main Panel:
mainPanel(
tabsetPanel(type = "tabs",
#
tabPanel("Overview & Example Dataset",h4(p("How to use this App")),
p("To use this app you need a document corpus in csv file format. Make sure each document is separated from another document with a new line character.
Dataset should contain atleast two columns text (used as input) and targert (used for predictions).
To do basic Text classfication on your text corpus, click on Browse in left-sidebar panel and upload the file. Once the file is uploaded it will do the computations in
back-end with default inputs and accordingly results will be displayed in various tabs.", align = "justify"),
p("If you wish to change the input, modify the input in left side-bar panel and click on Apply changes. Accordingly results in other tab will be refreshed
", align = "Justify"),
h5("Note"),
p("You might observe no change in the outputs after clicking 'Apply Changes'. Wait for few seconds. As soon as all the computations
are over in back-end results will be refreshed",
align = "justify"),
#, height = 280, width = 400
br(),
h4(p("Download Sample file")),
selectInput("downloads","Select Sample file to download",choices = sub('\\.csv$', '',list.files('data'))),
downloadButton('downloadData1', 'Download'),br(),br(),
p("Please note that download will not work with RStudio interface. Download will work only in web-browsers. So open this app in a web-browser and then download the example file. For opening this app in web-browser click on \"Open in Browser\" as shown below -"),
img(src = "example1.png")
)
,
tabPanel("Data",
h5("Data Dimensions"),
verbatimTextOutput('dim'),
h5("Distribution of Target(Y)"),
verbatimTextOutput('y_dis'),
hr(),
h5("Sample dataset"),
dataTableOutput("sampleData"),hr(),
h4("Word Cloud"),
dropdownButton(
tags$h3("List of Inputs"),
sliderInput(inputId = 'minword',
label = 'Min Term Frequency',
value = 10,
min = 1,
max = 50),
circle = TRUE, status = "danger",
icon = icon("gear"), width = "300px",
tooltip = tooltipOptions(title = "Click to see inputs !")
),
plotOutput("wordcloud",height = 700, width = 700),br(),
#textInput("in",label = "text"),
),
tabPanel("Training Report",
h4("Confusion Matrix"),
textOutput("cf_text"),
plotOutput("cf_matrix"),
hr(),
verbatimTextOutput('cf'),
hr(),
uiOutput("tokens"),
dropdownButton(
tags$h3("List of Inputs"),
sliderInput(inputId = 'maxword',
label = 'Max words to display',
value = 20,
min = 1,
max = 100),
circle = TRUE, status = "danger",
icon = icon("gear"), width = "300px",
tooltip = tooltipOptions(title = "Click to see inputs !")
),
plotOutput("token_wc",height = 700, width = 700),br(),
dataTableOutput("token_table")
)
),
)
)
)
|
#############################################################################
# Copyright (c) 2012 Christophe Dutang
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
#
#############################################################################
### SE computation in GNE
###
### R functions
###
#Stackelberg equilibrium computation
SE.nseq <- function(leaders, init, dimx, dimlam,
obj, argobj, grobj, arggrobj, heobj, argheobj,
constr, argconstr, grconstr, arggrconstr, heconstr, argheconstr,
compl, gcompla, gcomplb, argcompl,
dimmu, joint, argjoint, grjoint, arggrjoint, hejoint, arghejoint,
method.follower="default", method.leader="default",
control.follower=list(), control.leader=list(),
maxit.follower=10, silent=TRUE,
simpleconstr=FALSE, ...)
{
if(method.follower == "default") method.follower <- "Newton"
if(method.leader == "default") method.leader <- "BFGS"
argtest1 <- testargfunSSR(init, dimx, dimlam, grobj, arggrobj, constr, argconstr, grconstr, arggrconstr,
compl, argcompl, dimmu, joint, argjoint, grjoint, arggrjoint)
#basic tests for funSSR
test.try <- try( funSSR(init, dimx, dimlam, grobj, arggrobj, constr, argconstr,
grconstr, arggrconstr, compl, argcompl, dimmu, joint, argjoint,
grjoint, arggrjoint), silent=silent )
if(class(test.try) == "try-error")
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Can't evalate Phi(init).", fvec=NA) )
if(any(is.nan(test.try)) || any(is.infinite(test.try)) )
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Phi(init) has infinite or NaN values.", fvec=NA) )
argtest2 <- testargjacSSR(init, dimx, dimlam, heobj, argheobj, constr, argconstr, grconstr, arggrconstr,
heconstr, argheconstr, gcompla, gcomplb, argcompl, dimmu, joint, argjoint, grjoint, arggrjoint,
hejoint, arghejoint)
#basic tests for jacSSR
test.try <- try( jacSSR(init, dimx, dimlam, heobj, argheobj, constr, argconstr, grconstr, arggrconstr,
heconstr, argheconstr, gcompla, gcomplb, argcompl, dimmu, joint, argjoint,
grjoint, arggrjoint, hejoint, arghejoint), silent=silent )
if(class(test.try) == "try-error")
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Can't evaluate Jac Phi(init).", fvec=NA) )
if(any(is.nan(test.try)) || any(is.infinite(test.try)) )
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Jac Phi(init) has infinite or NaN values.", fvec=NA) )
nbplay <- argtest1$nplayer
argtest3 <- testarggapNIR(init[1:nbplay], dimx, obj, argobj)
if(!is.numeric(leaders) || length(leaders) > nbplay-1)
stop("wrong leaders argument.")
if(any(!leaders %in% 1:nbplay))
stop("wrong leaders argument.")
followers <- (1:nbplay)[!(1:nbplay %in% leaders)]
dimx <- argtest1$dimx
n <- sum(dimx)
nfoll <- sum(dimx[followers])
nlead <- sum(dimx[leaders])
dimlam <- argtest1$dimlam
m <- sum(dimlam)
#1st row is the begin index, 2nd row the end index
index4lam <- rbind( cumsum(dimlam) - dimlam + 1, cumsum(dimlam) ) + n
index4x <- rbind( cumsum(dimx) - dimx + 1, cumsum(dimx) ) + 0
index4xfoll <- as.vector(sapply(followers, function(i) index4x[1,i]:index4x[2,i]))
index4xlead <- as.vector(sapply(leaders, function(i) index4x[1,i]:index4x[2,i]))
index4lamfoll <- as.vector(sapply(followers, function(i) index4lam[1,i]:index4lam[2,i]))
index4mufoll <- (1:length(init))[-(1:(n+m))]
bestrespcount <- list(phicnt=0, jaccnt=0)
objleadcount <- list(fn=0)
#wrap objective gradient (x, i, j, arg)
#1st arg=x, 2nd arg=id player, 3rd arg= deriv index, 4th arg=add. arg
grobjfoll <- function(xfoll, play, d1, arg)
transfoll(argtest1$grobj, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$add)
#wrap objective hessian (x, i, j, k, arg)
#1st arg=x, 2nd arg=id player, 3rd arg= deriv index, 4th arg= deriv index, 5th arg=add. arg
heobjfoll <- function(xfoll, play, d1, d2, arg)
transfoll(argtest2$heobj, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$foll[d2], arg$add)
#wrap constraint function (x, i, arg)
#1st arg=x, 2nd arg=id player, 3rd arg= add. arg
if(!is.null(argtest1$constr))
constrfoll <- function(xfoll, play, arg)
transfoll(argtest1$constr, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$add)
else
constrfoll <- NULL
#wrap constraint gradient (x, i, j, arg)
#1st arg=x, 2nd arg=id player, 3rd arg= deriv index, 4th arg=add. arg
if(!is.null(argtest1$grconstr))
grconstrfoll <- function(xfoll, play, d1, arg)
transfoll(argtest1$grconstr, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$add)
else
grconstrfoll <- NULL
#wrap constraint hessian (x, i, j, k, arg)
#1st arg=x, 2nd arg=id player, 3rd arg= deriv index, 4th arg= deriv index, 5th arg=add. arg
if(!is.null(argtest2$heconstr))
heconstrfoll <- function(xfoll, play, d1, d2, arg)
transfoll(argtest2$heconstr, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$foll[d2], arg$add)
else
heconstrfoll <- NULL
#wrap joint function (x, arg)
#1st arg=x, 2nd arg=add. arg
if(!is.null(argtest1$joint))
jointfoll <- function(xfoll, arg)
transfoll(argtest1$joint, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$add)
else
jointfoll <- NULL
#wrap joint gradient (x, j, arg)
#1st arg=x, 2rd arg= deriv index, 3th arg=add. arg
if(!is.null(argtest1$grjoint))
grjointfoll <- function(xfoll, d1, arg)
transfoll(argtest1$grjoint, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[d1], arg$add)
else
grjointfoll <- NULL
#wrap joint hessian (x, j, k, arg)
#1st arg=x, 3rd arg= deriv index, 4th arg= deriv index, 5th arg=add. arg
if(!is.null(argtest2$hejoint))
hejointfoll <- function(xfoll, d1, d2, arg)
transfoll(argtest2$hejoint, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[d1], arg$foll[d2], arg$add)
else
hejointfoll <- NULL
listfollfunc <- list(grobjfoll=grobjfoll, heobjfoll=heobjfoll,
constrfoll=constrfoll, grconstrfoll=grconstrfoll, heconstrfoll=heconstrfoll,
jointfoll=jointfoll, grjointfoll=grjointfoll, hejointfoll=hejointfoll)
#compute the objective of leaders for a strategy x of leaders
#and corresponding followers actions
objleaders <- function(xlead, arg1, arg2, arg3, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init, id4xlead,
follfun, method.follower, control.follower)
{
# cat("1-", id4xfoll, "index4xfoll\t", id4lamfoll, "index4lamfoll\t", id4mufoll, "index4mufoll", "\n")
#
# print(xlead)
# print(attributes(arg1))
# print(attributes(arg2))
# print(attributes(arg3))
# print(leaders)
n <- sum(arg1$dimx)
nfoll <- sum(arg1$dimx[followers])
x <- rep(NA, n)
init2 <- c(rep(xlead, nbplay), rep(1e-3, length(init)-nbplay))
foll <- bestresponse(xlead, arg1, arg2, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init2,
follfun, method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower, ...)
bestrespcount$phicnt <<- bestrespcount$phicnt + foll$counts["phicnt"]
bestrespcount$jaccnt <<- bestrespcount$jaccnt + foll$counts["jaccnt"]
x[id4xlead] <- xlead
x[id4xfoll] <- foll$par[1:nfoll]
objleaders <- sapply(leaders, function(i) arg3$obj(x, i, arg3$argobj))
objleadcount$fn <<- objleadcount$fn + 1
if(length(objleaders) > 1)
return( sqrt(sum(objleaders^2)) )
else
return( objleaders )
}
argfnlist <- list(arg1=argtest1, arg2=argtest2, arg3=argtest3,
leaders=leaders, followers=followers,
id4xfoll=index4xfoll, id4lamfoll=index4lamfoll,
id4mufoll=index4mufoll, init=init, follfun=listfollfunc,
method.follower=method.follower, control.follower=control.follower)
# cat("\nblii\n")
# print(evalwitharglist(objleaders, init[index4xlead], argfnlist))
# cat("blii\n\n")
# stop("here")
# bestresponse(xlead, arg1, arg2, leaders, followers,
# id4xfoll, id4lamfoll, id4mufoll, init, follfun,
# method.follower, control.follower, ...)
#create the constraint function of leaders given best response of followers
#such that constr(x) <= 0 as in input we have constr(x) <= 0
if(is.null(argtest1$joint) && is.null(argtest1$constr))
{
constrleaders <- NULL
}else if(is.null(argtest1$joint) && !is.null(argtest1$constr))
{
if(!simpleconstr)
{
constrleaders <- function(xlead, arg1, arg2, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init,
follfun, method.follower, control.follower,
maxit.follower)
{
nfoll <- sum(arg1$dimx[followers])
foll <- bestresponse(xlead, arg1, arg2, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init,
follfun, method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower)
bestrespcount$phicnt <<- bestrespcount$phicnt + foll$counts["phicnt"]
bestrespcount$jaccnt <<- bestrespcount$jaccnt + foll$counts["jaccnt"]
x <- rep(NA, n)
x[index4xlead] <- xlead
x[id4xfoll] <- foll$par[1:nfoll]
sapply(leaders, function(i) arg1$constr(x, i, arg1$argconstr))
}
argconstrlist <- list(arg1=argtest1, arg2=argtest2,
leaders=leaders, followers=followers,
id4xfoll=index4xfoll, id4lamfoll=index4lamfoll,
id4mufoll=index4mufoll, init=init, follfun=listfollfunc,
method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower)
}else
{
constrleaders <- function(xlead, arg1, arg2, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init,
follfun, method.follower, control.follower,
maxit.follower)
{
nfoll <- sum(arg1$dimx[followers])
foll <- rep(1, nfoll)
x <- rep(NA, n)
x[index4xlead] <- xlead
x[id4xfoll] <- rep(1, nfoll)
sapply(leaders, function(i) arg1$constr(x, i, arg1$argconstr))
}
argconstrlist <- list(arg1=argtest1, arg2=argtest2,
leaders=leaders, followers=followers,
id4xfoll=index4xfoll, id4lamfoll=index4lamfoll,
id4mufoll=index4mufoll, init=NULL, follfun=NULL,
method.follower=NULL,
control.follower=NULL,
maxit.follower=NULL)
}
}else if(!is.null(argtest1$joint) && is.null(argtest1$constr))
{
constrleaders <- function(xlead, arg1, arg2, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init,
follfun, method.follower, control.follower,
maxit.follower)
{
nfoll <- sum(arg1$dimx[followers])
foll <- bestresponse(xlead, arg1, arg2, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init,
follfun, method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower)
bestrespcount$phicnt <<- bestrespcount$phicnt + foll$counts["phicnt"]
bestrespcount$jaccnt <<- bestrespcount$jaccnt + foll$counts["jaccnt"]
x <- rep(NA, n)
x[index4xlead] <- xlead
x[id4xfoll] <- foll$par[1:nfoll]
arg1$joint(x, arg1$argjoint)
}
argconstrlist <- list(arg1=argtest1, arg2=argtest2,
leaders=leaders, followers=followers,
id4xfoll=index4xfoll, id4lamfoll=index4lamfoll,
id4mufoll=index4mufoll, init=init, follfun=listfollfunc,
method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower)
}else
{
constrleaders <- function(xlead, arg1, arg2, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init,
follfun, method.follower, control.follower,
maxit.follower)
{
nfoll <- sum(arg1$dimx[followers])
foll <- bestresponse(xlead, arg1, arg2, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init,
follfun, method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower)
bestrespcount$phicnt <<- bestrespcount$phicnt + foll$counts["phicnt"]
bestrespcount$jaccnt <<- bestrespcount$jaccnt + foll$counts["jaccnt"]
x <- rep(NA, n)
x[index4xlead] <- xlead
x[id4xfoll] <- foll$par[1:nfoll]
y <- arg1$joint(x, arg1$argjoint)
z <- sapply(leaders, function(i) arg1$constr(x, i, arg1$argconstr))
c(y, z)
}
argconstrlist <- list(arg1=argtest1, arg2=argtest2,
leaders=leaders, followers=followers,
id4xfoll=index4xfoll, id4lamfoll=index4lamfoll,
id4mufoll=index4mufoll, init=init, follfun=listfollfunc,
method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower)
}
if(!silent)
cat("start computation of SE\t")
#computation of Stackelberg equilibria
if(is.null(argtest1$joint) && is.null(argtest1$constr))
{
if(!silent)
cat("no constraint function\n")
reslead <- minpb(init[index4xlead], fn=objleaders, method=method.leader,
control=control.leader, argfn=argfnlist, ...)
if(reslead$code == 100)
return(reslead)
else if(reslead$code != 0)
warning("Non-optimal Stackelberg equilibrium.")
parlead <- reslead$par[1:nlead]
resval <- evalwitharglist(objleaders, parlead, argfnlist)
resfoll <- bestresponse(parlead, argtest1, argtest2, leaders, followers,
id4xfoll=index4xfoll, id4lamfoll=index4lamfoll,
id4mufoll=index4mufoll, init, listfollfunc,
method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower)
bestrespcount$phicnt <- bestrespcount$phicnt + resfoll$counts["phicnt"]
bestrespcount$jaccnt <- bestrespcount$jaccnt + resfoll$counts["jaccnt"]
parfol <- resfoll$par[1:nfoll]
res <- list(par = c(parlead, parfol), value = parlead,
counts = list(leadfn= objleadcount$fn, follfn=bestrespcount$phicnt,
folljac=bestrespcount$jaccnt),
iter = reslead$iter, code = reslead$code, message = reslead$message)
}else
{
if(!silent)
cat("with constraint functions\n")
if(is.null(constrleaders))
stop("internal error in SE.nseq.")
reslead <- minpb(init[index4xlead], fn=objleaders, method=method.leader,
hin=constrleaders, arghin=argconstrlist,
control=control.leader, argfn=argfnlist, silent=silent, ...)
if(reslead$code == 100)
return(reslead)
else if(reslead$code != 0)
warning("Non-optimal Stackelberg equilibrium.")
parlead <- reslead$par[1:nlead]
resval <- evalwitharglist(objleaders, parlead, argfnlist)
resfoll <- bestresponse(parlead, argtest1, argtest2, leaders, followers,
id4xfoll=index4xfoll, id4lamfoll=index4lamfoll,
id4mufoll=index4mufoll, init, listfollfunc,
method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower)
bestrespcount$phicnt <- bestrespcount$phicnt + resfoll$counts["phicnt"]
bestrespcount$jaccnt <- bestrespcount$jaccnt + resfoll$counts["jaccnt"]
parfol <- resfoll$par[1:nfoll]
res <- list(par = c(parlead, parfol), value = parlead,
counts = list(leadfn= objleadcount$fn, follfn=bestrespcount$phicnt,
folljac=bestrespcount$jaccnt),
iter = reslead$iter, code = reslead$code, message = reslead$message)
}
if(!silent)
cat("end computation of SE\n")
res
}
#compute the best response of followers for a given strategy x of leaders
bestresponse <- function(xlead, arg1, arg2, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init, follfun,
method.follower, control.follower, maxit.follower=10, ...)
{
# cat("2-", id4xfoll, "index4xfoll\t", id4lamfoll, "index4lamfoll\t", id4mufoll, "index4mufoll", "\n")
# print(sapply(follfun, class))
if(!is.list(follfun))
stop("wrong type for follfun.")
if(!all(sapply(follfun, is.function) | sapply(follfun, is.null)))
stop("wrong argument follfun.")
dimx <- arg1$dimx
dimlam <- arg1$dimlam
nfoll <- sum(dimx[followers])
nbplay <- arg1$nplayer
xfoll <- init[id4xfoll]
lamfoll <- init[id4lamfoll]
mufoll <- init[id4mufoll]
if(is.null(follfun$constrfoll) && is.null(follfun$jointfoll))
initfoll <- xfoll
else if(!is.null(follfun$constrfoll) && is.null(follfun$jointfoll))
initfoll <- c(xfoll, lamfoll)
else if(is.null(follfun$constrfoll) && !is.null(follfun$jointfoll))
initfoll <- c(xfoll, mufoll)
else
initfoll <- c(xfoll, lamfoll, mufoll)
# cat(xfoll, lamfoll, mufoll, "\n")
# print(initfoll)
arggrobjSE <- list(xlead=xlead, add=arg1$arggrobj, lead=leaders, foll=followers, nbplayer=nbplay)
argheobjSE <- list(xlead=xlead, add=arg2$argheobj, lead=leaders, foll=followers, nbplayer=nbplay)
argconstrSE <- list(xlead=xlead, add=arg1$argconstr, lead=leaders, foll=followers, nbplayer=nbplay)
arggrconstrSE <- list(xlead=xlead, add=arg1$arggrconstr, lead=leaders, foll=followers, nbplayer=nbplay)
argheconstrSE <- list(xlead=xlead, add=arg2$argheconstr, lead=leaders, foll=followers, nbplayer=nbplay)
argjointSE <- list(xlead=xlead, add=arg1$argjoint, lead=leaders, foll=followers, nbplayer=nbplay)
arggrjointSE <- list(xlead=xlead, add=arg1$arggrjoint, lead=leaders, foll=followers, nbplayer=nbplay)
arghejointSE <- list(xlead=xlead, add=arg2$arghejoint, lead=leaders, foll=followers, nbplayer=nbplay)
arg1SE <- list(dimx = dimx[followers], dimlam = dimlam[followers],
grobj = follfun$grobjfoll, arggrobj = arggrobjSE,
constr = follfun$constrfoll, argconstr = argconstrSE,
grconstr = follfun$grconstrfoll, arggrconstr = arggrconstrSE,
compl = arg1$compl, argcompl = arg1$argcompl,
dimmu = arg1$dimmu, joint = follfun$jointfoll,
argjoint = argjointSE, grjoint = follfun$grjointfoll,
arggrjoint = arggrjointSE)
arg2SE <- list(dimx = dimx[followers], dimlam = dimlam[followers],
heobj = follfun$heobjfoll, argheobj = argheobjSE,
constr = follfun$constrfoll, argconstr = argconstrSE,
grconstr = follfun$grconstrfoll, arggrconstr = arggrconstrSE,
heconstr = follfun$heconstrfoll, argheconstr = argheconstrSE,
gcompla = arg2$gcompla, gcomplb = arg2$gcomplb, argcompl = arg2$argcompl,
dimmu = arg2$dimmu, joint = follfun$jointfoll, argjoint = argjointSE,
grjoint = follfun$grjointfoll, arggrjoint = arggrjointSE,
hejoint = follfun$hejointfoll, arghejoint = arghejointSE)
# cat("blii\n")
# print(sapply(arg1SE, is.null))
# print(sapply(arg1SE, length))
# cat("blii2\n")
# print(sapply(arg2SE, is.null))
# print(sapply(arg2SE, length))
# cat("dimx", arg1SE$dimx, "dimlam", arg1SE$dimlam, "dimmu", arg1SE$dimmu, "\n")
checkerror <- length(initfoll) != sum(arg1SE$dimx)+sum(arg1SE$dimlam)+sum(arg1SE$dimmu)
if(checkerror)
stop("internal error in bestresponse.")
#wrapped functions
myfunSSR <- function(x, argfun, argjac)
evalwitharglist(funSSR, x, argfun)
myjacSSR <- function(x, argfun, argjac)
evalwitharglist(jacSSR, x, argjac)
res <- list(code=99, value=Inf)
iter <- 0
while(res$code != 1 && iter < maxit.follower)
{
if(iter > 0)
initfoll <- initfoll*(1+rnorm(length(initfoll), 0, .1))
res2 <- nseq(initfoll, myfunSSR, myjacSSR, argfun=arg1SE, argjac=arg2SE,
method=method.follower, control=control.follower, ...)
iter <- iter + 1
if(res2$code == 100)
stop(res2$message)
# cat("iter", iter, res2$value,"\n")
# print(cbind(init=initfoll, opt=res2$par))
if(res2$value < res$value)
res <- res2
}
if(res$code != 1)
warning("Non-optimal point when computing best response")
res
}
#transform a function to a leader/follower setting
transfoll <- function(f, xfoll, id4xfoll, id4xlead, followers, leaders, arg, ...)
{
# cat("n", n, "nlead+nfoll", nlead+nfoll, "bestrespcount$phicnt", bestrespcount$phicnt,"\n")
#print(index4xlead)
# print(attributes(arg))
# print(length(arg))
n <- sum(arg$dimx)
nfoll <- sum(arg$dimx[followers])
nlead <- sum(arg$dimx[leaders])
x <- rep(NA, n)
x[id4xlead] <- arg$xlead[1:nlead]
x[id4xfoll] <- xfoll[1:nfoll]
f(x, ...)
}
SE.objleaders <- function(x, leaders, init, dimx, dimlam,
obj, argobj, grobj, arggrobj, heobj, argheobj,
constr, argconstr, grconstr, arggrconstr, heconstr, argheconstr,
compl, gcompla, gcomplb, argcompl,
dimmu, joint, argjoint, grjoint, arggrjoint, hejoint, arghejoint,
method.follower="default", control.follower=list(),
maxit.follower=10, silent=TRUE, ...)
{
if(method.follower == "default") method.follower <- "Newton"
argtest1 <- testargfunSSR(init, dimx, dimlam, grobj, arggrobj, constr, argconstr, grconstr, arggrconstr,
compl, argcompl, dimmu, joint, argjoint, grjoint, arggrjoint)
#basic tests for funSSR
test.try <- try( funSSR(init, dimx, dimlam, grobj, arggrobj, constr, argconstr,
grconstr, arggrconstr, compl, argcompl, dimmu, joint, argjoint,
grjoint, arggrjoint), silent=silent )
if(class(test.try) == "try-error")
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Can't evalate Phi(init).", fvec=NA) )
if(any(is.nan(test.try)) || any(is.infinite(test.try)) )
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Phi(init) has infinite or NaN values.", fvec=NA) )
argtest2 <- testargjacSSR(init, dimx, dimlam, heobj, argheobj, constr, argconstr, grconstr, arggrconstr,
heconstr, argheconstr, gcompla, gcomplb, argcompl, dimmu, joint, argjoint, grjoint, arggrjoint,
hejoint, arghejoint)
#basic tests for jacSSR
test.try <- try( jacSSR(init, dimx, dimlam, heobj, argheobj, constr, argconstr, grconstr, arggrconstr,
heconstr, argheconstr, gcompla, gcomplb, argcompl, dimmu, joint, argjoint,
grjoint, arggrjoint, hejoint, arghejoint), silent=silent )
if(class(test.try) == "try-error")
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Can't evaluate Jac Phi(init).", fvec=NA) )
if(any(is.nan(test.try)) || any(is.infinite(test.try)) )
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Jac Phi(init) has infinite or NaN values.", fvec=NA) )
nbplay <- argtest1$nplayer
argtest3 <- testarggapNIR(init[1:nbplay], dimx, obj, argobj)
if(!is.numeric(leaders) || length(leaders) > nbplay-1)
stop("wrong leaders argument.")
if(any(!leaders %in% 1:nbplay))
stop("wrong leaders argument.")
followers <- (1:nbplay)[!(1:nbplay %in% leaders)]
dimx <- argtest1$dimx
n <- sum(dimx)
nfoll <- sum(dimx[followers])
nlead <- sum(dimx[leaders])
dimlam <- argtest1$dimlam
m <- sum(dimlam)
#1st row is the begin index, 2nd row the end index
index4lam <- rbind( cumsum(dimlam) - dimlam + 1, cumsum(dimlam) ) + n
index4x <- rbind( cumsum(dimx) - dimx + 1, cumsum(dimx) ) + 0
index4xfoll <- as.vector(sapply(followers, function(i) index4x[1,i]:index4x[2,i]))
index4xlead <- as.vector(sapply(leaders, function(i) index4x[1,i]:index4x[2,i]))
index4lamfoll <- as.vector(sapply(followers, function(i) index4lam[1,i]:index4lam[2,i]))
index4mufoll <- (1:length(init))[-(1:(n+m))]
grobjfoll <- function(xfoll, play, d1, arg)
transfoll(argtest1$grobj, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$add)
heobjfoll <- function(xfoll, play, d1, d2, arg)
transfoll(argtest2$heobj, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$foll[d2], arg$add)
if(!is.null(argtest1$constr))
constrfoll <- function(xfoll, play, arg)
transfoll(argtest1$constr, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$add)
else
constrfoll <- NULL
if(!is.null(argtest1$grconstr))
grconstrfoll <- function(xfoll, play, d1, arg)
transfoll(argtest1$grconstr, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$add)
else
grconstrfoll <- NULL
if(!is.null(argtest2$heconstr))
heconstrfoll <- function(xfoll, play, d1, d2, arg)
transfoll(argtest2$heconstr, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$foll[d2], arg$add)
else
heconstrfoll <- NULL
if(!is.null(argtest1$joint))
jointfoll <- function(xfoll, arg)
transfoll(argtest1$joint, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$add)
else
jointfoll <- NULL
if(!is.null(argtest1$grjoint))
grjointfoll <- function(xfoll, d1, arg)
transfoll(argtest1$grjoint, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[d1], arg$add)
else
grjointfoll <- NULL
if(!is.null(argtest2$hejoint))
hejointfoll <- function(xfoll, d1, d2, arg)
transfoll(argtest2$hejoint, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[d1], arg$foll[d2], arg$add)
else
hejointfoll <- NULL
listfollfunc <- list(grobjfoll=grobjfoll, heobjfoll=heobjfoll,
constrfoll=constrfoll, grconstrfoll=grconstrfoll, heconstrfoll=heconstrfoll,
jointfoll=jointfoll, grjointfoll=grjointfoll, hejointfoll=hejointfoll)
#compute the objective of leaders for a strategy x of leaders
#and corresponding followers actions
objleaders <- function(xlead)
{
#n <- sum(arg1$dimx)
# nfoll <- sum(arg1$dimx[followers])
z <- rep(NA, n)
foll <- bestresponse(xlead, argtest1, argtest2, leaders, followers,
index4xfoll, index4lamfoll, index4mufoll, init,
listfollfunc, method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower, ...)
z[index4xlead] <- xlead
z[index4xfoll] <- foll$par[1:nfoll]
objleaders <- sapply(leaders, function(i) argtest3$obj(z, i, argtest3$argobj))
if(length(objleaders) > 1)
return( sqrt(sum(objleaders^2)) )
else
return( objleaders )
}
sapply(x, objleaders)
}
SE.bestresponse <- function(x, leaders, init, dimx, dimlam,
obj, argobj, grobj, arggrobj, heobj, argheobj,
constr, argconstr, grconstr, arggrconstr, heconstr, argheconstr,
compl, gcompla, gcomplb, argcompl,
dimmu, joint, argjoint, grjoint, arggrjoint, hejoint, arghejoint,
method.follower="default", control.follower=list(), maxit.follower,
silent=TRUE, ...)
{
if(method.follower == "default") method.follower <- "Newton"
if(!is.matrix(init))
init <- matrix(init, length(x), length(init), byrow=TRUE)
argtest1 <- testargfunSSR(init[1,], dimx, dimlam, grobj, arggrobj, constr, argconstr, grconstr, arggrconstr,
compl, argcompl, dimmu, joint, argjoint, grjoint, arggrjoint)
#basic tests for funSSR
test.try <- try( funSSR(init[1,], dimx, dimlam, grobj, arggrobj, constr, argconstr,
grconstr, arggrconstr, compl, argcompl, dimmu, joint, argjoint,
grjoint, arggrjoint), silent=silent )
if(class(test.try) == "try-error")
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Can't evalate Phi(init).", fvec=NA) )
if(any(is.nan(test.try)) || any(is.infinite(test.try)) )
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Phi(init) has infinite or NaN values.", fvec=NA) )
argtest2 <- testargjacSSR(init[1,], dimx, dimlam, heobj, argheobj, constr, argconstr, grconstr, arggrconstr,
heconstr, argheconstr, gcompla, gcomplb, argcompl, dimmu, joint, argjoint, grjoint, arggrjoint,
hejoint, arghejoint)
#basic tests for jacSSR
test.try <- try( jacSSR(init[1,], dimx, dimlam, heobj, argheobj, constr, argconstr, grconstr, arggrconstr,
heconstr, argheconstr, gcompla, gcomplb, argcompl, dimmu, joint, argjoint,
grjoint, arggrjoint, hejoint, arghejoint), silent=silent )
if(class(test.try) == "try-error")
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Can't evaluate Jac Phi(init).", fvec=NA) )
if(any(is.nan(test.try)) || any(is.infinite(test.try)) )
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Jac Phi(init) has infinite or NaN values.", fvec=NA) )
nbplay <- argtest1$nplayer
argtest3 <- testarggapNIR(init[1,1:nbplay], dimx, obj, argobj)
if(!is.numeric(leaders) || length(leaders) > nbplay-1)
stop("wrong leaders argument.")
if(any(!leaders %in% 1:nbplay))
stop("wrong leaders argument.")
followers <- (1:nbplay)[!(1:nbplay %in% leaders)]
dimx <- argtest1$dimx
n <- sum(dimx)
nfoll <- sum(dimx[followers])
nlead <- sum(dimx[leaders])
dimlam <- argtest1$dimlam
m <- sum(dimlam)
#1st row is the begin index, 2nd row the end index
index4lam <- rbind( cumsum(dimlam) - dimlam + 1, cumsum(dimlam) ) + n
index4x <- rbind( cumsum(dimx) - dimx + 1, cumsum(dimx) ) + 0
index4xfoll <- as.vector(sapply(followers, function(i) index4x[1,i]:index4x[2,i]))
index4xlead <- as.vector(sapply(leaders, function(i) index4x[1,i]:index4x[2,i]))
index4lamfoll <- as.vector(sapply(followers, function(i) index4lam[1,i]:index4lam[2,i]))
index4mufoll <- (1:length(init))[-(1:(n+m))]
grobjfoll <- function(xfoll, play, d1, arg)
transfoll(argtest1$grobj, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$add)
heobjfoll <- function(xfoll, play, d1, d2, arg)
transfoll(argtest2$heobj, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$foll[d2], arg$add)
if(!is.null(argtest1$constr))
constrfoll <- function(xfoll, play, arg)
transfoll(argtest1$constr, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$add)
else
constrfoll <- NULL
if(!is.null(argtest1$grconstr))
grconstrfoll <- function(xfoll, play, d1, arg)
transfoll(argtest1$grconstr, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$add)
else
grconstrfoll <- NULL
if(!is.null(argtest2$heconstr))
heconstrfoll <- function(xfoll, play, d1, d2, arg)
transfoll(argtest2$heconstr, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$foll[d2], arg$add)
else
heconstrfoll <- NULL
if(!is.null(argtest1$joint))
jointfoll <- function(xfoll, arg)
transfoll(argtest1$joint, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$add)
else
jointfoll <- NULL
if(!is.null(argtest1$grjoint))
grjointfoll <- function(xfoll, d1, arg)
transfoll(argtest1$grjoint, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[d1], arg$add)
else
grjointfoll <- NULL
if(!is.null(argtest2$hejoint))
hejointfoll <- function(xfoll, d1, d2, arg)
transfoll(argtest2$hejoint, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[d1], arg$foll[d2], arg$add)
else
hejointfoll <- NULL
listfollfunc <- list(grobjfoll=grobjfoll, heobjfoll=heobjfoll,
constrfoll=constrfoll, grconstrfoll=grconstrfoll, heconstrfoll=heconstrfoll,
jointfoll=jointfoll, grjointfoll=grjointfoll, hejointfoll=hejointfoll)
#compute the objective of leaders for a strategy x of leaders
#and corresponding followers actions
bestresp <- function(i)
{
foll <- bestresponse(x[i], argtest1, argtest2, leaders, followers,
index4xfoll, index4lamfoll, index4mufoll, init[i,],
listfollfunc, method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower, ...)
c(par=foll$par[1:nfoll], code=foll$code, value=foll$value, lagrmult=foll$par[-(1:nfoll)])
}
sapply(1:length(x), bestresp)
}
| /GNE/R/solv-SE.R | no_license | ingted/R-Examples | R | false | false | 33,908 | r | #############################################################################
# Copyright (c) 2012 Christophe Dutang
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
#
#############################################################################
### SE computation in GNE
###
### R functions
###
#Stackelberg equilibrium computation
SE.nseq <- function(leaders, init, dimx, dimlam,
obj, argobj, grobj, arggrobj, heobj, argheobj,
constr, argconstr, grconstr, arggrconstr, heconstr, argheconstr,
compl, gcompla, gcomplb, argcompl,
dimmu, joint, argjoint, grjoint, arggrjoint, hejoint, arghejoint,
method.follower="default", method.leader="default",
control.follower=list(), control.leader=list(),
maxit.follower=10, silent=TRUE,
simpleconstr=FALSE, ...)
{
if(method.follower == "default") method.follower <- "Newton"
if(method.leader == "default") method.leader <- "BFGS"
argtest1 <- testargfunSSR(init, dimx, dimlam, grobj, arggrobj, constr, argconstr, grconstr, arggrconstr,
compl, argcompl, dimmu, joint, argjoint, grjoint, arggrjoint)
#basic tests for funSSR
test.try <- try( funSSR(init, dimx, dimlam, grobj, arggrobj, constr, argconstr,
grconstr, arggrconstr, compl, argcompl, dimmu, joint, argjoint,
grjoint, arggrjoint), silent=silent )
if(class(test.try) == "try-error")
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Can't evalate Phi(init).", fvec=NA) )
if(any(is.nan(test.try)) || any(is.infinite(test.try)) )
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Phi(init) has infinite or NaN values.", fvec=NA) )
argtest2 <- testargjacSSR(init, dimx, dimlam, heobj, argheobj, constr, argconstr, grconstr, arggrconstr,
heconstr, argheconstr, gcompla, gcomplb, argcompl, dimmu, joint, argjoint, grjoint, arggrjoint,
hejoint, arghejoint)
#basic tests for jacSSR
test.try <- try( jacSSR(init, dimx, dimlam, heobj, argheobj, constr, argconstr, grconstr, arggrconstr,
heconstr, argheconstr, gcompla, gcomplb, argcompl, dimmu, joint, argjoint,
grjoint, arggrjoint, hejoint, arghejoint), silent=silent )
if(class(test.try) == "try-error")
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Can't evaluate Jac Phi(init).", fvec=NA) )
if(any(is.nan(test.try)) || any(is.infinite(test.try)) )
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Jac Phi(init) has infinite or NaN values.", fvec=NA) )
nbplay <- argtest1$nplayer
argtest3 <- testarggapNIR(init[1:nbplay], dimx, obj, argobj)
if(!is.numeric(leaders) || length(leaders) > nbplay-1)
stop("wrong leaders argument.")
if(any(!leaders %in% 1:nbplay))
stop("wrong leaders argument.")
followers <- (1:nbplay)[!(1:nbplay %in% leaders)]
dimx <- argtest1$dimx
n <- sum(dimx)
nfoll <- sum(dimx[followers])
nlead <- sum(dimx[leaders])
dimlam <- argtest1$dimlam
m <- sum(dimlam)
#1st row is the begin index, 2nd row the end index
index4lam <- rbind( cumsum(dimlam) - dimlam + 1, cumsum(dimlam) ) + n
index4x <- rbind( cumsum(dimx) - dimx + 1, cumsum(dimx) ) + 0
index4xfoll <- as.vector(sapply(followers, function(i) index4x[1,i]:index4x[2,i]))
index4xlead <- as.vector(sapply(leaders, function(i) index4x[1,i]:index4x[2,i]))
index4lamfoll <- as.vector(sapply(followers, function(i) index4lam[1,i]:index4lam[2,i]))
index4mufoll <- (1:length(init))[-(1:(n+m))]
bestrespcount <- list(phicnt=0, jaccnt=0)
objleadcount <- list(fn=0)
#wrap objective gradient (x, i, j, arg)
#1st arg=x, 2nd arg=id player, 3rd arg= deriv index, 4th arg=add. arg
grobjfoll <- function(xfoll, play, d1, arg)
transfoll(argtest1$grobj, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$add)
#wrap objective hessian (x, i, j, k, arg)
#1st arg=x, 2nd arg=id player, 3rd arg= deriv index, 4th arg= deriv index, 5th arg=add. arg
heobjfoll <- function(xfoll, play, d1, d2, arg)
transfoll(argtest2$heobj, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$foll[d2], arg$add)
#wrap constraint function (x, i, arg)
#1st arg=x, 2nd arg=id player, 3rd arg= add. arg
if(!is.null(argtest1$constr))
constrfoll <- function(xfoll, play, arg)
transfoll(argtest1$constr, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$add)
else
constrfoll <- NULL
#wrap constraint gradient (x, i, j, arg)
#1st arg=x, 2nd arg=id player, 3rd arg= deriv index, 4th arg=add. arg
if(!is.null(argtest1$grconstr))
grconstrfoll <- function(xfoll, play, d1, arg)
transfoll(argtest1$grconstr, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$add)
else
grconstrfoll <- NULL
#wrap constraint hessian (x, i, j, k, arg)
#1st arg=x, 2nd arg=id player, 3rd arg= deriv index, 4th arg= deriv index, 5th arg=add. arg
if(!is.null(argtest2$heconstr))
heconstrfoll <- function(xfoll, play, d1, d2, arg)
transfoll(argtest2$heconstr, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$foll[d2], arg$add)
else
heconstrfoll <- NULL
#wrap joint function (x, arg)
#1st arg=x, 2nd arg=add. arg
if(!is.null(argtest1$joint))
jointfoll <- function(xfoll, arg)
transfoll(argtest1$joint, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$add)
else
jointfoll <- NULL
#wrap joint gradient (x, j, arg)
#1st arg=x, 2rd arg= deriv index, 3th arg=add. arg
if(!is.null(argtest1$grjoint))
grjointfoll <- function(xfoll, d1, arg)
transfoll(argtest1$grjoint, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[d1], arg$add)
else
grjointfoll <- NULL
#wrap joint hessian (x, j, k, arg)
#1st arg=x, 3rd arg= deriv index, 4th arg= deriv index, 5th arg=add. arg
if(!is.null(argtest2$hejoint))
hejointfoll <- function(xfoll, d1, d2, arg)
transfoll(argtest2$hejoint, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[d1], arg$foll[d2], arg$add)
else
hejointfoll <- NULL
listfollfunc <- list(grobjfoll=grobjfoll, heobjfoll=heobjfoll,
constrfoll=constrfoll, grconstrfoll=grconstrfoll, heconstrfoll=heconstrfoll,
jointfoll=jointfoll, grjointfoll=grjointfoll, hejointfoll=hejointfoll)
#compute the objective of leaders for a strategy x of leaders
#and corresponding followers actions
objleaders <- function(xlead, arg1, arg2, arg3, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init, id4xlead,
follfun, method.follower, control.follower)
{
# cat("1-", id4xfoll, "index4xfoll\t", id4lamfoll, "index4lamfoll\t", id4mufoll, "index4mufoll", "\n")
#
# print(xlead)
# print(attributes(arg1))
# print(attributes(arg2))
# print(attributes(arg3))
# print(leaders)
n <- sum(arg1$dimx)
nfoll <- sum(arg1$dimx[followers])
x <- rep(NA, n)
init2 <- c(rep(xlead, nbplay), rep(1e-3, length(init)-nbplay))
foll <- bestresponse(xlead, arg1, arg2, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init2,
follfun, method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower, ...)
bestrespcount$phicnt <<- bestrespcount$phicnt + foll$counts["phicnt"]
bestrespcount$jaccnt <<- bestrespcount$jaccnt + foll$counts["jaccnt"]
x[id4xlead] <- xlead
x[id4xfoll] <- foll$par[1:nfoll]
objleaders <- sapply(leaders, function(i) arg3$obj(x, i, arg3$argobj))
objleadcount$fn <<- objleadcount$fn + 1
if(length(objleaders) > 1)
return( sqrt(sum(objleaders^2)) )
else
return( objleaders )
}
argfnlist <- list(arg1=argtest1, arg2=argtest2, arg3=argtest3,
leaders=leaders, followers=followers,
id4xfoll=index4xfoll, id4lamfoll=index4lamfoll,
id4mufoll=index4mufoll, init=init, follfun=listfollfunc,
method.follower=method.follower, control.follower=control.follower)
# cat("\nblii\n")
# print(evalwitharglist(objleaders, init[index4xlead], argfnlist))
# cat("blii\n\n")
# stop("here")
# bestresponse(xlead, arg1, arg2, leaders, followers,
# id4xfoll, id4lamfoll, id4mufoll, init, follfun,
# method.follower, control.follower, ...)
#create the constraint function of leaders given best response of followers
#such that constr(x) <= 0 as in input we have constr(x) <= 0
if(is.null(argtest1$joint) && is.null(argtest1$constr))
{
constrleaders <- NULL
}else if(is.null(argtest1$joint) && !is.null(argtest1$constr))
{
if(!simpleconstr)
{
constrleaders <- function(xlead, arg1, arg2, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init,
follfun, method.follower, control.follower,
maxit.follower)
{
nfoll <- sum(arg1$dimx[followers])
foll <- bestresponse(xlead, arg1, arg2, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init,
follfun, method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower)
bestrespcount$phicnt <<- bestrespcount$phicnt + foll$counts["phicnt"]
bestrespcount$jaccnt <<- bestrespcount$jaccnt + foll$counts["jaccnt"]
x <- rep(NA, n)
x[index4xlead] <- xlead
x[id4xfoll] <- foll$par[1:nfoll]
sapply(leaders, function(i) arg1$constr(x, i, arg1$argconstr))
}
argconstrlist <- list(arg1=argtest1, arg2=argtest2,
leaders=leaders, followers=followers,
id4xfoll=index4xfoll, id4lamfoll=index4lamfoll,
id4mufoll=index4mufoll, init=init, follfun=listfollfunc,
method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower)
}else
{
constrleaders <- function(xlead, arg1, arg2, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init,
follfun, method.follower, control.follower,
maxit.follower)
{
nfoll <- sum(arg1$dimx[followers])
foll <- rep(1, nfoll)
x <- rep(NA, n)
x[index4xlead] <- xlead
x[id4xfoll] <- rep(1, nfoll)
sapply(leaders, function(i) arg1$constr(x, i, arg1$argconstr))
}
argconstrlist <- list(arg1=argtest1, arg2=argtest2,
leaders=leaders, followers=followers,
id4xfoll=index4xfoll, id4lamfoll=index4lamfoll,
id4mufoll=index4mufoll, init=NULL, follfun=NULL,
method.follower=NULL,
control.follower=NULL,
maxit.follower=NULL)
}
}else if(!is.null(argtest1$joint) && is.null(argtest1$constr))
{
constrleaders <- function(xlead, arg1, arg2, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init,
follfun, method.follower, control.follower,
maxit.follower)
{
nfoll <- sum(arg1$dimx[followers])
foll <- bestresponse(xlead, arg1, arg2, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init,
follfun, method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower)
bestrespcount$phicnt <<- bestrespcount$phicnt + foll$counts["phicnt"]
bestrespcount$jaccnt <<- bestrespcount$jaccnt + foll$counts["jaccnt"]
x <- rep(NA, n)
x[index4xlead] <- xlead
x[id4xfoll] <- foll$par[1:nfoll]
arg1$joint(x, arg1$argjoint)
}
argconstrlist <- list(arg1=argtest1, arg2=argtest2,
leaders=leaders, followers=followers,
id4xfoll=index4xfoll, id4lamfoll=index4lamfoll,
id4mufoll=index4mufoll, init=init, follfun=listfollfunc,
method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower)
}else
{
constrleaders <- function(xlead, arg1, arg2, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init,
follfun, method.follower, control.follower,
maxit.follower)
{
nfoll <- sum(arg1$dimx[followers])
foll <- bestresponse(xlead, arg1, arg2, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init,
follfun, method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower)
bestrespcount$phicnt <<- bestrespcount$phicnt + foll$counts["phicnt"]
bestrespcount$jaccnt <<- bestrespcount$jaccnt + foll$counts["jaccnt"]
x <- rep(NA, n)
x[index4xlead] <- xlead
x[id4xfoll] <- foll$par[1:nfoll]
y <- arg1$joint(x, arg1$argjoint)
z <- sapply(leaders, function(i) arg1$constr(x, i, arg1$argconstr))
c(y, z)
}
argconstrlist <- list(arg1=argtest1, arg2=argtest2,
leaders=leaders, followers=followers,
id4xfoll=index4xfoll, id4lamfoll=index4lamfoll,
id4mufoll=index4mufoll, init=init, follfun=listfollfunc,
method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower)
}
if(!silent)
cat("start computation of SE\t")
#computation of Stackelberg equilibria
if(is.null(argtest1$joint) && is.null(argtest1$constr))
{
if(!silent)
cat("no constraint function\n")
reslead <- minpb(init[index4xlead], fn=objleaders, method=method.leader,
control=control.leader, argfn=argfnlist, ...)
if(reslead$code == 100)
return(reslead)
else if(reslead$code != 0)
warning("Non-optimal Stackelberg equilibrium.")
parlead <- reslead$par[1:nlead]
resval <- evalwitharglist(objleaders, parlead, argfnlist)
resfoll <- bestresponse(parlead, argtest1, argtest2, leaders, followers,
id4xfoll=index4xfoll, id4lamfoll=index4lamfoll,
id4mufoll=index4mufoll, init, listfollfunc,
method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower)
bestrespcount$phicnt <- bestrespcount$phicnt + resfoll$counts["phicnt"]
bestrespcount$jaccnt <- bestrespcount$jaccnt + resfoll$counts["jaccnt"]
parfol <- resfoll$par[1:nfoll]
res <- list(par = c(parlead, parfol), value = parlead,
counts = list(leadfn= objleadcount$fn, follfn=bestrespcount$phicnt,
folljac=bestrespcount$jaccnt),
iter = reslead$iter, code = reslead$code, message = reslead$message)
}else
{
if(!silent)
cat("with constraint functions\n")
if(is.null(constrleaders))
stop("internal error in SE.nseq.")
reslead <- minpb(init[index4xlead], fn=objleaders, method=method.leader,
hin=constrleaders, arghin=argconstrlist,
control=control.leader, argfn=argfnlist, silent=silent, ...)
if(reslead$code == 100)
return(reslead)
else if(reslead$code != 0)
warning("Non-optimal Stackelberg equilibrium.")
parlead <- reslead$par[1:nlead]
resval <- evalwitharglist(objleaders, parlead, argfnlist)
resfoll <- bestresponse(parlead, argtest1, argtest2, leaders, followers,
id4xfoll=index4xfoll, id4lamfoll=index4lamfoll,
id4mufoll=index4mufoll, init, listfollfunc,
method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower)
bestrespcount$phicnt <- bestrespcount$phicnt + resfoll$counts["phicnt"]
bestrespcount$jaccnt <- bestrespcount$jaccnt + resfoll$counts["jaccnt"]
parfol <- resfoll$par[1:nfoll]
res <- list(par = c(parlead, parfol), value = parlead,
counts = list(leadfn= objleadcount$fn, follfn=bestrespcount$phicnt,
folljac=bestrespcount$jaccnt),
iter = reslead$iter, code = reslead$code, message = reslead$message)
}
if(!silent)
cat("end computation of SE\n")
res
}
#compute the best response of followers for a given strategy x of leaders
bestresponse <- function(xlead, arg1, arg2, leaders, followers,
id4xfoll, id4lamfoll, id4mufoll, init, follfun,
method.follower, control.follower, maxit.follower=10, ...)
{
# cat("2-", id4xfoll, "index4xfoll\t", id4lamfoll, "index4lamfoll\t", id4mufoll, "index4mufoll", "\n")
# print(sapply(follfun, class))
if(!is.list(follfun))
stop("wrong type for follfun.")
if(!all(sapply(follfun, is.function) | sapply(follfun, is.null)))
stop("wrong argument follfun.")
dimx <- arg1$dimx
dimlam <- arg1$dimlam
nfoll <- sum(dimx[followers])
nbplay <- arg1$nplayer
xfoll <- init[id4xfoll]
lamfoll <- init[id4lamfoll]
mufoll <- init[id4mufoll]
if(is.null(follfun$constrfoll) && is.null(follfun$jointfoll))
initfoll <- xfoll
else if(!is.null(follfun$constrfoll) && is.null(follfun$jointfoll))
initfoll <- c(xfoll, lamfoll)
else if(is.null(follfun$constrfoll) && !is.null(follfun$jointfoll))
initfoll <- c(xfoll, mufoll)
else
initfoll <- c(xfoll, lamfoll, mufoll)
# cat(xfoll, lamfoll, mufoll, "\n")
# print(initfoll)
arggrobjSE <- list(xlead=xlead, add=arg1$arggrobj, lead=leaders, foll=followers, nbplayer=nbplay)
argheobjSE <- list(xlead=xlead, add=arg2$argheobj, lead=leaders, foll=followers, nbplayer=nbplay)
argconstrSE <- list(xlead=xlead, add=arg1$argconstr, lead=leaders, foll=followers, nbplayer=nbplay)
arggrconstrSE <- list(xlead=xlead, add=arg1$arggrconstr, lead=leaders, foll=followers, nbplayer=nbplay)
argheconstrSE <- list(xlead=xlead, add=arg2$argheconstr, lead=leaders, foll=followers, nbplayer=nbplay)
argjointSE <- list(xlead=xlead, add=arg1$argjoint, lead=leaders, foll=followers, nbplayer=nbplay)
arggrjointSE <- list(xlead=xlead, add=arg1$arggrjoint, lead=leaders, foll=followers, nbplayer=nbplay)
arghejointSE <- list(xlead=xlead, add=arg2$arghejoint, lead=leaders, foll=followers, nbplayer=nbplay)
arg1SE <- list(dimx = dimx[followers], dimlam = dimlam[followers],
grobj = follfun$grobjfoll, arggrobj = arggrobjSE,
constr = follfun$constrfoll, argconstr = argconstrSE,
grconstr = follfun$grconstrfoll, arggrconstr = arggrconstrSE,
compl = arg1$compl, argcompl = arg1$argcompl,
dimmu = arg1$dimmu, joint = follfun$jointfoll,
argjoint = argjointSE, grjoint = follfun$grjointfoll,
arggrjoint = arggrjointSE)
arg2SE <- list(dimx = dimx[followers], dimlam = dimlam[followers],
heobj = follfun$heobjfoll, argheobj = argheobjSE,
constr = follfun$constrfoll, argconstr = argconstrSE,
grconstr = follfun$grconstrfoll, arggrconstr = arggrconstrSE,
heconstr = follfun$heconstrfoll, argheconstr = argheconstrSE,
gcompla = arg2$gcompla, gcomplb = arg2$gcomplb, argcompl = arg2$argcompl,
dimmu = arg2$dimmu, joint = follfun$jointfoll, argjoint = argjointSE,
grjoint = follfun$grjointfoll, arggrjoint = arggrjointSE,
hejoint = follfun$hejointfoll, arghejoint = arghejointSE)
# cat("blii\n")
# print(sapply(arg1SE, is.null))
# print(sapply(arg1SE, length))
# cat("blii2\n")
# print(sapply(arg2SE, is.null))
# print(sapply(arg2SE, length))
# cat("dimx", arg1SE$dimx, "dimlam", arg1SE$dimlam, "dimmu", arg1SE$dimmu, "\n")
checkerror <- length(initfoll) != sum(arg1SE$dimx)+sum(arg1SE$dimlam)+sum(arg1SE$dimmu)
if(checkerror)
stop("internal error in bestresponse.")
#wrapped functions
myfunSSR <- function(x, argfun, argjac)
evalwitharglist(funSSR, x, argfun)
myjacSSR <- function(x, argfun, argjac)
evalwitharglist(jacSSR, x, argjac)
res <- list(code=99, value=Inf)
iter <- 0
while(res$code != 1 && iter < maxit.follower)
{
if(iter > 0)
initfoll <- initfoll*(1+rnorm(length(initfoll), 0, .1))
res2 <- nseq(initfoll, myfunSSR, myjacSSR, argfun=arg1SE, argjac=arg2SE,
method=method.follower, control=control.follower, ...)
iter <- iter + 1
if(res2$code == 100)
stop(res2$message)
# cat("iter", iter, res2$value,"\n")
# print(cbind(init=initfoll, opt=res2$par))
if(res2$value < res$value)
res <- res2
}
if(res$code != 1)
warning("Non-optimal point when computing best response")
res
}
#transform a function to a leader/follower setting
transfoll <- function(f, xfoll, id4xfoll, id4xlead, followers, leaders, arg, ...)
{
# cat("n", n, "nlead+nfoll", nlead+nfoll, "bestrespcount$phicnt", bestrespcount$phicnt,"\n")
#print(index4xlead)
# print(attributes(arg))
# print(length(arg))
n <- sum(arg$dimx)
nfoll <- sum(arg$dimx[followers])
nlead <- sum(arg$dimx[leaders])
x <- rep(NA, n)
x[id4xlead] <- arg$xlead[1:nlead]
x[id4xfoll] <- xfoll[1:nfoll]
f(x, ...)
}
SE.objleaders <- function(x, leaders, init, dimx, dimlam,
obj, argobj, grobj, arggrobj, heobj, argheobj,
constr, argconstr, grconstr, arggrconstr, heconstr, argheconstr,
compl, gcompla, gcomplb, argcompl,
dimmu, joint, argjoint, grjoint, arggrjoint, hejoint, arghejoint,
method.follower="default", control.follower=list(),
maxit.follower=10, silent=TRUE, ...)
{
if(method.follower == "default") method.follower <- "Newton"
argtest1 <- testargfunSSR(init, dimx, dimlam, grobj, arggrobj, constr, argconstr, grconstr, arggrconstr,
compl, argcompl, dimmu, joint, argjoint, grjoint, arggrjoint)
#basic tests for funSSR
test.try <- try( funSSR(init, dimx, dimlam, grobj, arggrobj, constr, argconstr,
grconstr, arggrconstr, compl, argcompl, dimmu, joint, argjoint,
grjoint, arggrjoint), silent=silent )
if(class(test.try) == "try-error")
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Can't evalate Phi(init).", fvec=NA) )
if(any(is.nan(test.try)) || any(is.infinite(test.try)) )
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Phi(init) has infinite or NaN values.", fvec=NA) )
argtest2 <- testargjacSSR(init, dimx, dimlam, heobj, argheobj, constr, argconstr, grconstr, arggrconstr,
heconstr, argheconstr, gcompla, gcomplb, argcompl, dimmu, joint, argjoint, grjoint, arggrjoint,
hejoint, arghejoint)
#basic tests for jacSSR
test.try <- try( jacSSR(init, dimx, dimlam, heobj, argheobj, constr, argconstr, grconstr, arggrconstr,
heconstr, argheconstr, gcompla, gcomplb, argcompl, dimmu, joint, argjoint,
grjoint, arggrjoint, hejoint, arghejoint), silent=silent )
if(class(test.try) == "try-error")
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Can't evaluate Jac Phi(init).", fvec=NA) )
if(any(is.nan(test.try)) || any(is.infinite(test.try)) )
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Jac Phi(init) has infinite or NaN values.", fvec=NA) )
nbplay <- argtest1$nplayer
argtest3 <- testarggapNIR(init[1:nbplay], dimx, obj, argobj)
if(!is.numeric(leaders) || length(leaders) > nbplay-1)
stop("wrong leaders argument.")
if(any(!leaders %in% 1:nbplay))
stop("wrong leaders argument.")
followers <- (1:nbplay)[!(1:nbplay %in% leaders)]
dimx <- argtest1$dimx
n <- sum(dimx)
nfoll <- sum(dimx[followers])
nlead <- sum(dimx[leaders])
dimlam <- argtest1$dimlam
m <- sum(dimlam)
#1st row is the begin index, 2nd row the end index
index4lam <- rbind( cumsum(dimlam) - dimlam + 1, cumsum(dimlam) ) + n
index4x <- rbind( cumsum(dimx) - dimx + 1, cumsum(dimx) ) + 0
index4xfoll <- as.vector(sapply(followers, function(i) index4x[1,i]:index4x[2,i]))
index4xlead <- as.vector(sapply(leaders, function(i) index4x[1,i]:index4x[2,i]))
index4lamfoll <- as.vector(sapply(followers, function(i) index4lam[1,i]:index4lam[2,i]))
index4mufoll <- (1:length(init))[-(1:(n+m))]
grobjfoll <- function(xfoll, play, d1, arg)
transfoll(argtest1$grobj, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$add)
heobjfoll <- function(xfoll, play, d1, d2, arg)
transfoll(argtest2$heobj, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$foll[d2], arg$add)
if(!is.null(argtest1$constr))
constrfoll <- function(xfoll, play, arg)
transfoll(argtest1$constr, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$add)
else
constrfoll <- NULL
if(!is.null(argtest1$grconstr))
grconstrfoll <- function(xfoll, play, d1, arg)
transfoll(argtest1$grconstr, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$add)
else
grconstrfoll <- NULL
if(!is.null(argtest2$heconstr))
heconstrfoll <- function(xfoll, play, d1, d2, arg)
transfoll(argtest2$heconstr, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$foll[d2], arg$add)
else
heconstrfoll <- NULL
if(!is.null(argtest1$joint))
jointfoll <- function(xfoll, arg)
transfoll(argtest1$joint, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$add)
else
jointfoll <- NULL
if(!is.null(argtest1$grjoint))
grjointfoll <- function(xfoll, d1, arg)
transfoll(argtest1$grjoint, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[d1], arg$add)
else
grjointfoll <- NULL
if(!is.null(argtest2$hejoint))
hejointfoll <- function(xfoll, d1, d2, arg)
transfoll(argtest2$hejoint, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[d1], arg$foll[d2], arg$add)
else
hejointfoll <- NULL
listfollfunc <- list(grobjfoll=grobjfoll, heobjfoll=heobjfoll,
constrfoll=constrfoll, grconstrfoll=grconstrfoll, heconstrfoll=heconstrfoll,
jointfoll=jointfoll, grjointfoll=grjointfoll, hejointfoll=hejointfoll)
#compute the objective of leaders for a strategy x of leaders
#and corresponding followers actions
objleaders <- function(xlead)
{
#n <- sum(arg1$dimx)
# nfoll <- sum(arg1$dimx[followers])
z <- rep(NA, n)
foll <- bestresponse(xlead, argtest1, argtest2, leaders, followers,
index4xfoll, index4lamfoll, index4mufoll, init,
listfollfunc, method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower, ...)
z[index4xlead] <- xlead
z[index4xfoll] <- foll$par[1:nfoll]
objleaders <- sapply(leaders, function(i) argtest3$obj(z, i, argtest3$argobj))
if(length(objleaders) > 1)
return( sqrt(sum(objleaders^2)) )
else
return( objleaders )
}
sapply(x, objleaders)
}
SE.bestresponse <- function(x, leaders, init, dimx, dimlam,
obj, argobj, grobj, arggrobj, heobj, argheobj,
constr, argconstr, grconstr, arggrconstr, heconstr, argheconstr,
compl, gcompla, gcomplb, argcompl,
dimmu, joint, argjoint, grjoint, arggrjoint, hejoint, arghejoint,
method.follower="default", control.follower=list(), maxit.follower,
silent=TRUE, ...)
{
if(method.follower == "default") method.follower <- "Newton"
if(!is.matrix(init))
init <- matrix(init, length(x), length(init), byrow=TRUE)
argtest1 <- testargfunSSR(init[1,], dimx, dimlam, grobj, arggrobj, constr, argconstr, grconstr, arggrconstr,
compl, argcompl, dimmu, joint, argjoint, grjoint, arggrjoint)
#basic tests for funSSR
test.try <- try( funSSR(init[1,], dimx, dimlam, grobj, arggrobj, constr, argconstr,
grconstr, arggrconstr, compl, argcompl, dimmu, joint, argjoint,
grjoint, arggrjoint), silent=silent )
if(class(test.try) == "try-error")
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Can't evalate Phi(init).", fvec=NA) )
if(any(is.nan(test.try)) || any(is.infinite(test.try)) )
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Phi(init) has infinite or NaN values.", fvec=NA) )
argtest2 <- testargjacSSR(init[1,], dimx, dimlam, heobj, argheobj, constr, argconstr, grconstr, arggrconstr,
heconstr, argheconstr, gcompla, gcomplb, argcompl, dimmu, joint, argjoint, grjoint, arggrjoint,
hejoint, arghejoint)
#basic tests for jacSSR
test.try <- try( jacSSR(init[1,], dimx, dimlam, heobj, argheobj, constr, argconstr, grconstr, arggrconstr,
heconstr, argheconstr, gcompla, gcomplb, argcompl, dimmu, joint, argjoint,
grjoint, arggrjoint, hejoint, arghejoint), silent=silent )
if(class(test.try) == "try-error")
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Can't evaluate Jac Phi(init).", fvec=NA) )
if(any(is.nan(test.try)) || any(is.infinite(test.try)) )
return( list(par= NA, value=NA, counts=NA, iter=NA, code=100,
message="Jac Phi(init) has infinite or NaN values.", fvec=NA) )
nbplay <- argtest1$nplayer
argtest3 <- testarggapNIR(init[1,1:nbplay], dimx, obj, argobj)
if(!is.numeric(leaders) || length(leaders) > nbplay-1)
stop("wrong leaders argument.")
if(any(!leaders %in% 1:nbplay))
stop("wrong leaders argument.")
followers <- (1:nbplay)[!(1:nbplay %in% leaders)]
dimx <- argtest1$dimx
n <- sum(dimx)
nfoll <- sum(dimx[followers])
nlead <- sum(dimx[leaders])
dimlam <- argtest1$dimlam
m <- sum(dimlam)
#1st row is the begin index, 2nd row the end index
index4lam <- rbind( cumsum(dimlam) - dimlam + 1, cumsum(dimlam) ) + n
index4x <- rbind( cumsum(dimx) - dimx + 1, cumsum(dimx) ) + 0
index4xfoll <- as.vector(sapply(followers, function(i) index4x[1,i]:index4x[2,i]))
index4xlead <- as.vector(sapply(leaders, function(i) index4x[1,i]:index4x[2,i]))
index4lamfoll <- as.vector(sapply(followers, function(i) index4lam[1,i]:index4lam[2,i]))
index4mufoll <- (1:length(init))[-(1:(n+m))]
grobjfoll <- function(xfoll, play, d1, arg)
transfoll(argtest1$grobj, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$add)
heobjfoll <- function(xfoll, play, d1, d2, arg)
transfoll(argtest2$heobj, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$foll[d2], arg$add)
if(!is.null(argtest1$constr))
constrfoll <- function(xfoll, play, arg)
transfoll(argtest1$constr, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$add)
else
constrfoll <- NULL
if(!is.null(argtest1$grconstr))
grconstrfoll <- function(xfoll, play, d1, arg)
transfoll(argtest1$grconstr, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$add)
else
grconstrfoll <- NULL
if(!is.null(argtest2$heconstr))
heconstrfoll <- function(xfoll, play, d1, d2, arg)
transfoll(argtest2$heconstr, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[play], arg$foll[d1], arg$foll[d2], arg$add)
else
heconstrfoll <- NULL
if(!is.null(argtest1$joint))
jointfoll <- function(xfoll, arg)
transfoll(argtest1$joint, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$add)
else
jointfoll <- NULL
if(!is.null(argtest1$grjoint))
grjointfoll <- function(xfoll, d1, arg)
transfoll(argtest1$grjoint, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[d1], arg$add)
else
grjointfoll <- NULL
if(!is.null(argtest2$hejoint))
hejointfoll <- function(xfoll, d1, d2, arg)
transfoll(argtest2$hejoint, xfoll, index4xfoll, index4xlead, followers, leaders,
arg, arg$foll[d1], arg$foll[d2], arg$add)
else
hejointfoll <- NULL
listfollfunc <- list(grobjfoll=grobjfoll, heobjfoll=heobjfoll,
constrfoll=constrfoll, grconstrfoll=grconstrfoll, heconstrfoll=heconstrfoll,
jointfoll=jointfoll, grjointfoll=grjointfoll, hejointfoll=hejointfoll)
#compute the objective of leaders for a strategy x of leaders
#and corresponding followers actions
bestresp <- function(i)
{
foll <- bestresponse(x[i], argtest1, argtest2, leaders, followers,
index4xfoll, index4lamfoll, index4mufoll, init[i,],
listfollfunc, method.follower=method.follower,
control.follower=control.follower,
maxit.follower=maxit.follower, ...)
c(par=foll$par[1:nfoll], code=foll$code, value=foll$value, lagrmult=foll$par[-(1:nfoll)])
}
sapply(1:length(x), bestresp)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/confint.R
\name{confint,USL-method}
\alias{confint,USL-method}
\title{Confidence Intervals for USL model parameters}
\usage{
\S4method{confint}{USL}(object, parm, level = 0.95)
}
\arguments{
\item{object}{A USL object.}
\item{parm}{A specification of which parameters are to be given confidence
intervals, either a vector of numbers or a vector of names. If missing,
all parameters are considered.}
\item{level}{The confidence level required.}
}
\value{
A matrix (or vector) with columns giving lower and upper confidence
limits for each parameter. These will be labelled as (1-level)/2 and
1 - (1-level)/2 in \% (by default 2.5\% and 97.5\%).
}
\description{
Estimate confidence intervals for one or more parameters in a USL model.
The intervals are calculated from the parameter standard error using the
Student t distribution at the given level.
}
\details{
Bootstrapping is no longer used to estimate confidence intervals.
}
\examples{
require(usl)
data(specsdm91)
## Create USL model
usl.model <- usl(throughput ~ load, specsdm91)
## Print confidence intervals
confint(usl.model)
}
\seealso{
\code{\link{usl}}
}
| /man/confint-USL-method.Rd | no_license | smoeding/usl | R | false | true | 1,205 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/confint.R
\name{confint,USL-method}
\alias{confint,USL-method}
\title{Confidence Intervals for USL model parameters}
\usage{
\S4method{confint}{USL}(object, parm, level = 0.95)
}
\arguments{
\item{object}{A USL object.}
\item{parm}{A specification of which parameters are to be given confidence
intervals, either a vector of numbers or a vector of names. If missing,
all parameters are considered.}
\item{level}{The confidence level required.}
}
\value{
A matrix (or vector) with columns giving lower and upper confidence
limits for each parameter. These will be labelled as (1-level)/2 and
1 - (1-level)/2 in \% (by default 2.5\% and 97.5\%).
}
\description{
Estimate confidence intervals for one or more parameters in a USL model.
The intervals are calculated from the parameter standard error using the
Student t distribution at the given level.
}
\details{
Bootstrapping is no longer used to estimate confidence intervals.
}
\examples{
require(usl)
data(specsdm91)
## Create USL model
usl.model <- usl(throughput ~ load, specsdm91)
## Print confidence intervals
confint(usl.model)
}
\seealso{
\code{\link{usl}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binary.R
\name{compute_binary_response}
\alias{compute_binary_response}
\title{Returns binary response estimates}
\usage{
compute_binary_response(response, weight, sample_size)
}
\arguments{
\item{response}{a vector of binary (0 or 1) responses}
\item{weight}{a vector of sample weights for inverse probability weighting;
invariant up to a scaling factor}
\item{sample_size}{The sample size to use, which may be a non-integer (as
responses from ZIPs that span geographical boundaries are weighted
proportionately, and survey weights may also be applied)}
}
\description{
This function takes vectors as input and computes the binary response values
(a point estimate named "val", a standard error named "se", and a sample size
named "sample_size").
}
| /facebook/delphiFacebook/man/compute_binary_response.Rd | permissive | alexcoda/covidcast-indicators | R | false | true | 830 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binary.R
\name{compute_binary_response}
\alias{compute_binary_response}
\title{Returns binary response estimates}
\usage{
compute_binary_response(response, weight, sample_size)
}
\arguments{
\item{response}{a vector of binary (0 or 1) responses}
\item{weight}{a vector of sample weights for inverse probability weighting;
invariant up to a scaling factor}
\item{sample_size}{The sample size to use, which may be a non-integer (as
responses from ZIPs that span geographical boundaries are weighted
proportionately, and survey weights may also be applied)}
}
\description{
This function takes vectors as input and computes the binary response values
(a point estimate named "val", a standard error named "se", and a sample size
named "sample_size").
}
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
#----------------------------------------------------------------------
# Used to test out customer dataset to make sure h2o model predict and pojo predict generate
# the same answers. However, customer dataset is not to be made public and hence this test
# is a NOPASS.
#----------------------------------------------------------------------
test <-
function() {
#----------------------------------------------------------------------
# Parameters for the test.
#----------------------------------------------------------------------
# Story:
# The objective of the test is to verify java code generation
# for big models containing huge amount of trees.
# This case verify multi-classifiers.
training_file <- test_file <- locate("smalldata/Training_Data.csv")
#training_file <- test_file <- locate("smalldata/dd.csv")
training_frame <- h2o.importFile(training_file)
test_frame <- h2o.importFile(test_file)
browser()
params <- list()
params$ntrees <- 100
params$max_depth <- 7
params$x <- 1:14
params$y <- "Label"
params$training_frame <- training_frame
params$seed <- 42
doJavapredictTest("gbm",test_file,test_frame,params)
}
doTest("gbm test", test) | /h2o-r/tests/testdir_javapredict/runit_hexdev_692_NOPASS_dum.R | permissive | yyenigun/h2o-3 | R | false | false | 1,439 | r | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
#----------------------------------------------------------------------
# Used to test out customer dataset to make sure h2o model predict and pojo predict generate
# the same answers. However, customer dataset is not to be made public and hence this test
# is a NOPASS.
#----------------------------------------------------------------------
test <-
function() {
#----------------------------------------------------------------------
# Parameters for the test.
#----------------------------------------------------------------------
# Story:
# The objective of the test is to verify java code generation
# for big models containing huge amount of trees.
# This case verify multi-classifiers.
training_file <- test_file <- locate("smalldata/Training_Data.csv")
#training_file <- test_file <- locate("smalldata/dd.csv")
training_frame <- h2o.importFile(training_file)
test_frame <- h2o.importFile(test_file)
browser()
params <- list()
params$ntrees <- 100
params$max_depth <- 7
params$x <- 1:14
params$y <- "Label"
params$training_frame <- training_frame
params$seed <- 42
doJavapredictTest("gbm",test_file,test_frame,params)
}
doTest("gbm test", test) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rcmd.R
\name{rcmd}
\alias{rcmd}
\alias{rcmd_safe}
\title{Run an \verb{R CMD} command}
\usage{
rcmd(
cmd,
cmdargs = character(),
libpath = .libPaths(),
repos = default_repos(),
stdout = NULL,
stderr = NULL,
poll_connection = TRUE,
echo = FALSE,
show = FALSE,
callback = NULL,
block_callback = NULL,
spinner = show && interactive(),
system_profile = FALSE,
user_profile = "project",
env = rcmd_safe_env(),
timeout = Inf,
wd = ".",
fail_on_status = FALSE,
...
)
rcmd_safe(
cmd,
cmdargs = character(),
libpath = .libPaths(),
repos = default_repos(),
stdout = NULL,
stderr = NULL,
poll_connection = TRUE,
echo = FALSE,
show = FALSE,
callback = NULL,
block_callback = NULL,
spinner = show && interactive(),
system_profile = FALSE,
user_profile = "project",
env = rcmd_safe_env(),
timeout = Inf,
wd = ".",
fail_on_status = FALSE,
...
)
}
\arguments{
\item{cmd}{Command to run. See \code{R --help} from the command
line for the various commands. In the current version of R (3.2.4)
these are: \code{BATCH}, \code{COMPILE}, \code{SHLIB}, \code{INSTALL}, \code{REMOVE}, \code{build},
\code{check}, \code{LINK}, \code{Rprof}, \code{Rdconv}, \code{Rd2pdf}, \code{Rd2txt}, \code{Stangle},
\code{Sweave}, \code{Rdiff}, \code{config}, \code{javareconf}, \code{rtags}.}
\item{cmdargs}{Command line arguments.}
\item{libpath}{The library path.}
\item{repos}{The \code{repos} option. If \code{NULL}, then no
\code{repos} option is set. This options is only used if
\code{user_profile} or \code{system_profile} is set \code{FALSE},
as it is set using the system or the user profile.}
\item{stdout}{Optionally a file name to send the standard output to.}
\item{stderr}{Optionally a file name to send the standard error to.
It may be the same as \code{stdout}, in which case standard error is
redirected to standard output. It can also be the special string
\code{"2>&1"}, in which case standard error will be redirected to standard
output.}
\item{poll_connection}{Whether to have a control connection to
the process. This is used to transmit messages from the subprocess
to the parent.}
\item{echo}{Whether to echo the complete command run by \code{rcmd}.}
\item{show}{Logical, whether to show the standard output on the screen
while the child process is running. Note that this is independent
of the \code{stdout} and \code{stderr} arguments. The standard
error is not shown currently.}
\item{callback}{A function to call for each line of the standard
output and standard error from the child process. It works together
with the \code{show} option; i.e. if \code{show = TRUE}, and a
callback is provided, then the output is shown of the screen, and the
callback is also called.}
\item{block_callback}{A function to call for each block of the standard
output and standard error. This callback is not line oriented, i.e.
multiple lines or half a line can be passed to the callback.}
\item{spinner}{Whether to show a calming spinner on the screen while
the child R session is running. By default it is shown if
\code{show = TRUE} and the R session is interactive.}
\item{system_profile}{Whether to use the system profile file.}
\item{user_profile}{Whether to use the user's profile file.
If this is \code{"project"}, then only the profile from the working
directory is used, but the \code{R_PROFILE_USER} environment variable
and the user level profile are not. See also "Security considerations"
below.}
\item{env}{Environment variables to set for the child process.}
\item{timeout}{Timeout for the function call to finish. It can be a
\link[base:difftime]{base::difftime} object, or a real number, meaning seconds.
If the process does not finish before the timeout period expires,
then a \code{system_command_timeout_error} error is thrown. \code{Inf}
means no timeout.}
\item{wd}{Working directory to use for running the command. Defaults
to the current working directory.}
\item{fail_on_status}{Whether to throw an R error if the command returns
with a non-zero status code. By default no error is thrown.}
\item{...}{Extra arguments are passed to \code{\link[processx:run]{processx::run()}}.}
}
\value{
A list with the command line \verb{$command}),
standard output (\verb{$stdout}), standard error (\code{stderr}),
exit status (\verb{$status}) of the external \verb{R CMD} command, and
whether a timeout was reached (\verb{$timeout}).
}
\description{
Run an \verb{R CMD} command form within R. This will usually start
another R process, from a shell script.
}
\details{
Starting from \code{callr} 2.0.0, \code{rcmd()} has safer defaults, the same as
the \code{rcmd_safe()} default values. Use \code{\link[=rcmd_copycat]{rcmd_copycat()}} for the old
defaults.
}
\section{Security considerations}{
\code{callr} makes a copy of the user's \code{.Renviron} file and potentially of
the local or user \code{.Rprofile}, in the session temporary
directory. Avoid storing sensitive information such as passwords, in
your environment file or your profile, otherwise this information will
get scattered in various files, at least temporarily, until the
subprocess finishes. You can use the keyring package to avoid passwords
in plain files.
}
\examples{
\dontshow{if (FALSE) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
rcmd("config", "CC")
\dontshow{\}) # examplesIf}
}
\seealso{
Other R CMD commands:
\code{\link{rcmd_bg}()},
\code{\link{rcmd_copycat}()}
}
\concept{R CMD commands}
| /man/rcmd.Rd | permissive | r-lib/callr | R | false | true | 5,561 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rcmd.R
\name{rcmd}
\alias{rcmd}
\alias{rcmd_safe}
\title{Run an \verb{R CMD} command}
\usage{
rcmd(
cmd,
cmdargs = character(),
libpath = .libPaths(),
repos = default_repos(),
stdout = NULL,
stderr = NULL,
poll_connection = TRUE,
echo = FALSE,
show = FALSE,
callback = NULL,
block_callback = NULL,
spinner = show && interactive(),
system_profile = FALSE,
user_profile = "project",
env = rcmd_safe_env(),
timeout = Inf,
wd = ".",
fail_on_status = FALSE,
...
)
rcmd_safe(
cmd,
cmdargs = character(),
libpath = .libPaths(),
repos = default_repos(),
stdout = NULL,
stderr = NULL,
poll_connection = TRUE,
echo = FALSE,
show = FALSE,
callback = NULL,
block_callback = NULL,
spinner = show && interactive(),
system_profile = FALSE,
user_profile = "project",
env = rcmd_safe_env(),
timeout = Inf,
wd = ".",
fail_on_status = FALSE,
...
)
}
\arguments{
\item{cmd}{Command to run. See \code{R --help} from the command
line for the various commands. In the current version of R (3.2.4)
these are: \code{BATCH}, \code{COMPILE}, \code{SHLIB}, \code{INSTALL}, \code{REMOVE}, \code{build},
\code{check}, \code{LINK}, \code{Rprof}, \code{Rdconv}, \code{Rd2pdf}, \code{Rd2txt}, \code{Stangle},
\code{Sweave}, \code{Rdiff}, \code{config}, \code{javareconf}, \code{rtags}.}
\item{cmdargs}{Command line arguments.}
\item{libpath}{The library path.}
\item{repos}{The \code{repos} option. If \code{NULL}, then no
\code{repos} option is set. This options is only used if
\code{user_profile} or \code{system_profile} is set \code{FALSE},
as it is set using the system or the user profile.}
\item{stdout}{Optionally a file name to send the standard output to.}
\item{stderr}{Optionally a file name to send the standard error to.
It may be the same as \code{stdout}, in which case standard error is
redirected to standard output. It can also be the special string
\code{"2>&1"}, in which case standard error will be redirected to standard
output.}
\item{poll_connection}{Whether to have a control connection to
the process. This is used to transmit messages from the subprocess
to the parent.}
\item{echo}{Whether to echo the complete command run by \code{rcmd}.}
\item{show}{Logical, whether to show the standard output on the screen
while the child process is running. Note that this is independent
of the \code{stdout} and \code{stderr} arguments. The standard
error is not shown currently.}
\item{callback}{A function to call for each line of the standard
output and standard error from the child process. It works together
with the \code{show} option; i.e. if \code{show = TRUE}, and a
callback is provided, then the output is shown of the screen, and the
callback is also called.}
\item{block_callback}{A function to call for each block of the standard
output and standard error. This callback is not line oriented, i.e.
multiple lines or half a line can be passed to the callback.}
\item{spinner}{Whether to show a calming spinner on the screen while
the child R session is running. By default it is shown if
\code{show = TRUE} and the R session is interactive.}
\item{system_profile}{Whether to use the system profile file.}
\item{user_profile}{Whether to use the user's profile file.
If this is \code{"project"}, then only the profile from the working
directory is used, but the \code{R_PROFILE_USER} environment variable
and the user level profile are not. See also "Security considerations"
below.}
\item{env}{Environment variables to set for the child process.}
\item{timeout}{Timeout for the function call to finish. It can be a
\link[base:difftime]{base::difftime} object, or a real number, meaning seconds.
If the process does not finish before the timeout period expires,
then a \code{system_command_timeout_error} error is thrown. \code{Inf}
means no timeout.}
\item{wd}{Working directory to use for running the command. Defaults
to the current working directory.}
\item{fail_on_status}{Whether to throw an R error if the command returns
with a non-zero status code. By default no error is thrown.}
\item{...}{Extra arguments are passed to \code{\link[processx:run]{processx::run()}}.}
}
\value{
A list with the command line \verb{$command}),
standard output (\verb{$stdout}), standard error (\code{stderr}),
exit status (\verb{$status}) of the external \verb{R CMD} command, and
whether a timeout was reached (\verb{$timeout}).
}
\description{
Run an \verb{R CMD} command form within R. This will usually start
another R process, from a shell script.
}
\details{
Starting from \code{callr} 2.0.0, \code{rcmd()} has safer defaults, the same as
the \code{rcmd_safe()} default values. Use \code{\link[=rcmd_copycat]{rcmd_copycat()}} for the old
defaults.
}
\section{Security considerations}{
\code{callr} makes a copy of the user's \code{.Renviron} file and potentially of
the local or user \code{.Rprofile}, in the session temporary
directory. Avoid storing sensitive information such as passwords, in
your environment file or your profile, otherwise this information will
get scattered in various files, at least temporarily, until the
subprocess finishes. You can use the keyring package to avoid passwords
in plain files.
}
\examples{
\dontshow{if (FALSE) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
rcmd("config", "CC")
\dontshow{\}) # examplesIf}
}
\seealso{
Other R CMD commands:
\code{\link{rcmd_bg}()},
\code{\link{rcmd_copycat}()}
}
\concept{R CMD commands}
|
#' Maximization step using coordinate descent optimization.
#'
#' @param p List of parameters.
#' @param item_data Matrix or data frame of item responses.
#' @param pred_data Matrix or data frame of DIF and/or impact predictors.
#' @param mean_predictors Possibly different matrix of predictors for the mean
#' impact equation.
#' @param var_predictors Possibly different matrix of predictors for the
#' variance impact equation.
#' @param eout E step output, including matrix for item and impact equations,
#' in addition to theta values (possibly adaptive).
#' @param item_type Optional character value or vector indicating the type of
#' item to be modeled.
#' @param pen_type Character value indicating the penalty function to use.
#' @param tau_current A single numeric value of tau that exists within
#' \code{tau_vec}.
#' @param pen Current penalty index.
#' @param alpha Numeric value indicating the alpha parameter in the elastic net
#' penalty function.
#' @param gamma Numeric value indicating the gamma parameter in the MCP
#' function.
#' @param anchor Optional numeric value or vector indicating which item
#' response(s) are anchors (e.g., \code{anchor = 1}).
#' @param final_control Control parameters.
#' @param samp_size Sample size in data set.
#' @param num_responses Number of responses for each item.
#' @param num_items Number of items in data set.
#' @param num_quad Number of quadrature points used for approximating the
#' latent variable.
#' @param num_predictors Number of predictors.
#' @param num_tau Logical indicating whether the minimum tau value needs to be
#' identified during the regDIF procedure.
#' @param max_tau Logical indicating whether to output the maximum tau value
#' needed to remove all DIF from the model.
#'
#' @return a \code{"list"} of estimates obtained from the maximization step using coordinate
#' descent
#'
#' @keywords internal
#'
Mstep_cd2 <-
function(p,
item_data,
pred_data,
mean_predictors,
var_predictors,
eout,
item_type,
pen_type,
tau_current,
pen,
alpha,
gamma,
anchor,
final_control,
samp_size,
num_responses,
num_items,
num_quad,
num_predictors,
num_tau,
max_tau) {
# Set under-identified model to FALSE until proven TRUE.
under_identified <- FALSE
# Update theta and etable.
theta <- eout$theta
etable <- eout$etable
p_cd <- p
# Last Mstep
if(max_tau) id_max_z <- 0
# CD Maximization and print settings.
lastp_cd_all <- p_cd
eps_cd_all <- Inf
iter_cd_all <- 1
# Loop until convergence or maximum number of iterations.
while(eps_cd_all > final_control$tol){
# Latent mean impact updates.
for(cov in 1:ncol(mean_predictors)) {
# CD Maximization and print settings.
lastp_cd <- p_cd
eps_cd <- Inf
iter_cd <- 1
# Loop until convergence or maximum number of iterations.
while(eps_cd > final_control$tol){
anl_deriv <- d_alpha(c(p_cd[[num_items+1]],p_cd[[num_items+2]]),
etable,
theta,
mean_predictors,
var_predictors,
cov=cov,
samp_size,
num_items,
num_quad)
p_cd[[num_items+1]][[cov]] <-
p_cd[[num_items+1]][[cov]] - anl_deriv[[1]]/anl_deriv[[2]]
# Update and check for convergence: Calculate the difference
# in parameter estimates from current to previous.
eps_cd = sqrt(sum((unlist(p_cd)-unlist(lastp_cd))^2))
# Update parameter list.
lastp_cd <- p_cd
# Update the iteration number.
iter_cd = iter_cd + 1
}
}
# Latent variance impact updates.
for(cov in 1:ncol(var_predictors)) {
# CD Maximization and print settings.
lastp_cd <- p_cd
eps_cd <- Inf
iter_cd <- 1
# Loop until convergence or maximum number of iterations.
while(eps_cd > final_control$tol){
anl_deriv <- d_phi(c(p_cd[[num_items+1]],p_cd[[num_items+2]]),
etable,
theta,
mean_predictors,
var_predictors,
cov=cov,
samp_size,
num_items,
num_quad)
p_cd[[num_items+2]][[cov]] <-
p_cd[[num_items+2]][[cov]] - anl_deriv[[1]]/anl_deriv[[2]]
# Update and check for convergence: Calculate the difference
# in parameter estimates from current to previous.
eps_cd = sqrt(sum((unlist(p_cd)-unlist(lastp_cd))^2))
# Update parameter list.
lastp_cd <- p_cd
# Update the iteration number.
iter_cd = iter_cd + 1
}
}
# Item response updates.
for (item in 1:num_items) {
# Get posterior probabilities.
# Obtain E-tables for each response category.
if(item_type[item] != "cfa") {
etable_item <- lapply(1:num_responses[item], function(x) etable)
for(resp in 1:num_responses[item]) {
etable_item[[resp]][which(
!(item_data[,item] == resp)), ] <- 0
}
}
if(item_type[item] == "cfa") {
# Intercept updates.
anl_deriv <- d_mu_gaussian("c0",
p[[item]],
etable,
theta,
item_data[,item],
pred_data,
cov=NULL,
samp_size,
num_items,
num_quad)
p[[item]][[1]] <- p[[item]][[1]] - anl_deriv[[1]]/anl_deriv[[2]]
# Slope updates.
if(item_type[item] != "Rasch") {
a0_parms <- grep(paste0("a0_itm",item,"_"),names(p[[item]]),fixed=T)
anl_deriv <- d_mu_gaussian("a0",
p[[item]],
etable_item,
theta,
item_data[,item],
pred_data,
cov=NULL,
samp_size,
num_items,
num_quad)
p[[item]] <- p[[item]][a0_parms] - anl_deriv[[1]]/anl_deriv[[2]]
}
# Residual updates.
s0_parms <- grep(paste0("s0_itm",item,"_"),names(p[[item]]),fixed=T)
anl_deriv <- d_sigma_gaussian("s0",
p[[item]],
etable_item,
theta,
item_data[,item],
pred_data,
cov=NULL,
samp_size,
num_items,
num_quad)
p[[item]][s0_parms][1] <- p[[item]][s0_parms][1] -
anl_deriv[[1]]/anl_deriv[[2]]
if(p[[item]][s0_parms][[1]] < 0) p[[item]][s0_parms][[1]] <- 1
if(!any(item == anchor)) {
# Residual DIF updates.
for(cov in 1:num_predictors) {
s1_parms <-
grep(paste0("s1_itm",item,"_cov",cov),names(p[[item]]),fixed=T)
anl_deriv <- d_sigma_gaussian("s1",
p[[item]],
etable_item,
theta,
item_data[,item],
pred_data,
cov=cov,
samp_size,
num_items,
num_quad)
p[[item]][s1_parms][1] <- p[[item]][s1_parms][1] -
anl_deriv[[1]]/anl_deriv[[2]]
}
p2 <- unlist(p)
# Intercept DIF updates.
for(cov in 1:num_predictors){
# End routine if only one anchor item is left on each covariate
# for each item parameter.
if(is.null(anchor) &
sum(p2[grep(paste0("c1(.*?)cov",cov),names(p2))] != 0) >
(num_items - 1) &
alpha == 1 &&
(length(final_control$start.values) == 0 || pen > 1) &&
num_tau >= 10){
under_identified <- TRUE
break
}
c1_parms <-
grep(paste0("c1_itm",item,"_cov",cov),names(p[[item]]),fixed=T)
anl_deriv <- d_mu_gaussian("c1",
p[[item]],
etable_item,
theta,
item_data[,item],
pred_data,
cov,
samp_size,
num_items,
num_quad)
z <- p[[item]][c1_parms] - anl_deriv[[1]]/anl_deriv[[2]]
if(max_tau) id_max_z <- c(id_max_z,z)
p[[item]][c1_parms] <-
ifelse(pen_type == "lasso",
soft_threshold(z,alpha,tau_current),
firm_threshold(z,alpha,tau_current,gamma))
}
# Slope DIF updates.
for(cov in 1:num_predictors){
# End routine if only one anchor item is left on each covariate
# for each item parameter.
if(is.null(anchor) &
sum(p2[grep(paste0("a1(.*?)cov",cov),names(p2))] != 0) >
(num_items - 1) &
alpha == 1 &&
(length(final_control$start.values) == 0 || pen > 1) &&
num_tau >= 10){
under_identified <- TRUE
break
}
if(item_type[item] != "Rasch"){
a1_parms <-
grep(paste0("a1_itm",item,"_cov",cov),names(p[[item]]),fixed=T)
anl_deriv <- d_mu_gaussian("a1",
p[[item]],
etable_item,
theta,
item_data[,item],
pred_data,
cov,
samp_size,
num_items,
num_quad)
z <- p[[item]][a1_parms] - anl_deriv[[1]]/anl_deriv[[2]]
if(max_tau) id_max_z <- c(id_max_z,z)
p[[item]][a1_parms] <- ifelse(pen_type == "lasso",
soft_threshold(z,alpha,tau_current),
firm_threshold(z,alpha,tau_current,gamma))
}
}
}
# Bernoulli responses.
} else if(item_type[item] == "2pl") {
# CD Maximization and print settings.
lastp_cd <- p_cd
eps_cd <- Inf
iter_cd <- 1
# Loop until convergence or maximum number of iterations.
while(eps_cd > final_control$tol){
# Intercept updates.
anl_deriv <- d_bernoulli("c0",
p_cd[[item]],
etable_item,
theta,
pred_data,
cov=0,
samp_size,
num_items,
num_quad)
p_cd[[item]][[1]] <- p_cd[[item]][[1]] - anl_deriv[[1]]/anl_deriv[[2]]
# Update and check for convergence: Calculate the difference
# in parameter estimates from current to previous.
eps_cd = sqrt(sum((unlist(p_cd)-unlist(lastp_cd))^2))
# Update parameter list.
lastp_cd <- p_cd
# Update the iteration number.
iter_cd = iter_cd + 1
}
# Slope updates.
if(item_type[item] != "Rasch") {
# CD Maximization and print settings.
lastp_cd <- p_cd
eps_cd <- Inf
iter_cd <- 1
# Loop until convergence or maximum number of iterations.
while(eps_cd > final_control$tol){
anl_deriv <- d_bernoulli("a0",
p_cd[[item]],
etable_item,
theta,
pred_data,
cov=0,
samp_size,
num_items,
num_quad)
p_cd[[item]][[2]] <- p_cd[[item]][[2]] - anl_deriv[[1]]/anl_deriv[[2]]
# Update and check for convergence: Calculate the difference
# in parameter estimates from current to previous.
eps_cd = sqrt(sum((unlist(p_cd)-unlist(lastp_cd))^2))
# Update parameter list.
lastp_cd <- p_cd
# Update the iteration number.
iter_cd = iter_cd + 1
}
}
if(!any(item == anchor)) {
p2_cd <- unlist(p_cd)
# Intercept DIF updates.
for(cov in 1:num_predictors) {
# End routine if only one anchor item is left on each covariate
# for each item parameter.
if(is.null(anchor) &
sum(p2_cd[grep(paste0("c1(.*?)cov",cov),names(p2_cd))] != 0) >
(num_items - 1) &
alpha == 1 &&
(length(final_control$start.values) == 0 || pen > 1) &&
num_tau >= 10){
under_identified <- TRUE
break
}
# CD Maximization and print settings.
lastp_cd <- p_cd
eps_cd <- Inf
iter_cd <- 1
# Loop until convergence or maximum number of iterations.
while(eps_cd > final_control$tol){
anl_deriv <- d_bernoulli("c1",
p_cd[[item]],
etable_item,
theta,
pred_data,
cov,
samp_size,
num_items,
num_quad)
z <- p_cd[[item]][[2+cov]] - anl_deriv[[1]]/anl_deriv[[2]]
if(max_tau) id_max_z <- c(id_max_z,z)
p_cd[[item]][[2+cov]] <- ifelse(pen_type == "lasso",
soft_threshold(z,alpha,tau_current),
firm_threshold(z,alpha,tau_current,gamma))
# Update and check for convergence: Calculate the difference
# in parameter estimates from current to previous.
eps_cd = sqrt(sum((unlist(p_cd)-unlist(lastp_cd))^2))
# Update parameter list.
lastp_cd <- p_cd
# Update the iteration number.
iter_cd = iter_cd + 1
}
}
# Slope DIF updates.
for(cov in 1:num_predictors) {
# End routine if only one anchor item is left on each covariate
# for each item parameter.
if(is.null(anchor) &
sum(p2_cd[grep(paste0("a1(.*?)cov",cov),names(p2_cd))] != 0) >
(num_items - 1) &
alpha == 1 &&
(length(final_control$start.values) == 0 || pen > 1) &&
num_tau >= 10){
under_identified <- TRUE
break
}
if(item_type[item] != "Rasch") {
# CD Maximization and print settings.
lastp_cd <- p_cd
eps_cd <- Inf
iter_cd <- 1
# Loop until convergence or maximum number of iterations.
while(eps_cd > final_control$tol){
anl_deriv <- d_bernoulli("a1",
p_cd[[item]],
etable_item,
theta,
pred_data,
cov,
samp_size,
num_items,
num_quad)
z <- p_cd[[item]][[2+num_predictors+cov]] -
anl_deriv[[1]]/anl_deriv[[2]]
if(max_tau) id_max_z <- c(id_max_z,z)
p_cd[[item]][[2+num_predictors+cov]] <-
ifelse(pen_type == "lasso",
soft_threshold(z,alpha,tau_current),
firm_threshold(z,alpha,tau_current,gamma))
# Update and check for convergence: Calculate the difference
# in parameter estimates from current to previous.
eps_cd = sqrt(sum((unlist(p_cd)-unlist(lastp_cd))^2))
# Update parameter list.
lastp_cd <- p_cd
# Update the iteration number.
iter_cd = iter_cd + 1
}
}
}
}
# Categorical.
} else {
# Intercept updates.
anl_deriv <- d_categorical("c0",
p[[item]],
etable_item,
theta,
pred_data,
thr=-1,
cov=-1,
samp_size,
num_responses[[item]],
num_items,
num_quad)
p[[item]][[1]] <- p[[item]][[1]] - anl_deriv[[1]]/anl_deriv[[2]]
# Threshold updates.
for(thr in 2:(num_responses[item]-1)) {
anl_deriv <- d_categorical("c0",
p[[item]],
etable_item,
theta,
pred_data,
thr=thr,
cov=-1,
samp_size,
num_responses[[item]],
num_items,
num_quad)
p[[item]][[thr]] <- p[[item]][[thr]] - anl_deriv[[1]]/anl_deriv[[2]]
}
# Slope updates.
if(item_type[item] != "Rasch") {
anl_deriv <- d_categorical("a0",
p[[item]],
etable_item,
theta,
pred_data,
thr=-1,
cov=-1,
samp_size,
num_responses[[item]],
num_items,
num_quad)
p[[item]][[num_responses[[item]]]] <-
p[[item]][[num_responses[[item]]]] - anl_deriv[[1]]/anl_deriv[[2]]
}
if(!any(item == anchor)){
p2 <- unlist(p)
# Intercept DIF updates.
for(cov in 1:num_predictors) {
# End routine if only one anchor item is left on each covariate
# for each item parameter.
if(is.null(anchor) &
sum(p2[grep(paste0("c1(.*?)cov",cov),names(p2))] != 0) >
(num_items - 1) &
alpha == 1 &&
(length(final_control$start.values) == 0 || pen > 1) &&
num_tau >= 10){
under_identified <- TRUE
break
}
anl_deriv <- d_categorical("c1",
p[[item]],
etable_item,
theta,
pred_data,
thr=-1,
cov,
samp_size,
num_responses[[item]],
num_items,
num_quad)
z <- p[[item]][[num_responses[[item]]+cov]] -
anl_deriv[[1]]/anl_deriv[[2]]
if(max_tau) id_max_z <- c(id_max_z,z)
p[[item]][[num_responses[[item]]+cov]] <-
ifelse(pen_type == "lasso",
soft_threshold(z,alpha,tau_current),
firm_threshold(z,alpha,tau_current,gamma))
}
# Slope DIF updates.
for(cov in 1:num_predictors) {
# End routine if only one anchor item is left on each covariate
# for each item parameter.
if(is.null(anchor) &
sum(p2[grep(paste0("a1(.*?)cov",cov),names(p2))] != 0) >
(num_items - 1) &
alpha == 1 &&
(length(final_control$start.values) == 0 || pen > 1) &&
num_tau >= 10){
under_identified <- TRUE
break
}
if(item_type[item] != "Rasch") {
anl_deriv <- d_categorical("a1",
p[[item]],
etable_item,
theta,
pred_data,
thr=-1,
cov,
samp_size,
num_responses[[item]],
num_items,
num_quad)
z <- p[[item]][[length(p[[item]])-ncol(pred_data)+cov]] -
anl_deriv[[1]]/anl_deriv[[2]]
if(max_tau) id_max_z <- c(id_max_z,z)
p[[item]][[length(p[[item]])-ncol(pred_data)+cov]] <-
ifelse(pen_type == "lasso",
soft_threshold(z,alpha,tau_current),
firm_threshold(z,alpha,tau_current,gamma))
}
}
}
# Gaussian responses.
}
}
p_cd_all <- p_cd
# Update and check for convergence: Calculate the difference
# in parameter estimates from current to previous.
eps_cd_all = sqrt(sum((unlist(p_cd_all)-unlist(lastp_cd_all))^2))
# Update parameter list.
lastp_cd_all <- p_cd_all
print(paste0("CD iter: ",iter_cd_all))
# Update the iteration number.
iter_cd_all = iter_cd_all + 1
}
if(max_tau) {
id_max_z <- max(abs(id_max_z))
return(id_max_z)
} else {
return(list(p=p_cd_all,
under_identified=under_identified))
}
}
| /R/m_step_cd2.R | permissive | wbelzak/regDIF | R | false | false | 23,586 | r | #' Maximization step using coordinate descent optimization.
#'
#' @param p List of parameters.
#' @param item_data Matrix or data frame of item responses.
#' @param pred_data Matrix or data frame of DIF and/or impact predictors.
#' @param mean_predictors Possibly different matrix of predictors for the mean
#' impact equation.
#' @param var_predictors Possibly different matrix of predictors for the
#' variance impact equation.
#' @param eout E step output, including matrix for item and impact equations,
#' in addition to theta values (possibly adaptive).
#' @param item_type Optional character value or vector indicating the type of
#' item to be modeled.
#' @param pen_type Character value indicating the penalty function to use.
#' @param tau_current A single numeric value of tau that exists within
#' \code{tau_vec}.
#' @param pen Current penalty index.
#' @param alpha Numeric value indicating the alpha parameter in the elastic net
#' penalty function.
#' @param gamma Numeric value indicating the gamma parameter in the MCP
#' function.
#' @param anchor Optional numeric value or vector indicating which item
#' response(s) are anchors (e.g., \code{anchor = 1}).
#' @param final_control Control parameters.
#' @param samp_size Sample size in data set.
#' @param num_responses Number of responses for each item.
#' @param num_items Number of items in data set.
#' @param num_quad Number of quadrature points used for approximating the
#' latent variable.
#' @param num_predictors Number of predictors.
#' @param num_tau Logical indicating whether the minimum tau value needs to be
#' identified during the regDIF procedure.
#' @param max_tau Logical indicating whether to output the maximum tau value
#' needed to remove all DIF from the model.
#'
#' @return a \code{"list"} of estimates obtained from the maximization step using coordinate
#' descent
#'
#' @keywords internal
#'
Mstep_cd2 <-
function(p,
item_data,
pred_data,
mean_predictors,
var_predictors,
eout,
item_type,
pen_type,
tau_current,
pen,
alpha,
gamma,
anchor,
final_control,
samp_size,
num_responses,
num_items,
num_quad,
num_predictors,
num_tau,
max_tau) {
# Set under-identified model to FALSE until proven TRUE.
under_identified <- FALSE
# Update theta and etable.
theta <- eout$theta
etable <- eout$etable
p_cd <- p
# Last Mstep
if(max_tau) id_max_z <- 0
# CD Maximization and print settings.
lastp_cd_all <- p_cd
eps_cd_all <- Inf
iter_cd_all <- 1
# Loop until convergence or maximum number of iterations.
while(eps_cd_all > final_control$tol){
# Latent mean impact updates.
for(cov in 1:ncol(mean_predictors)) {
# CD Maximization and print settings.
lastp_cd <- p_cd
eps_cd <- Inf
iter_cd <- 1
# Loop until convergence or maximum number of iterations.
while(eps_cd > final_control$tol){
anl_deriv <- d_alpha(c(p_cd[[num_items+1]],p_cd[[num_items+2]]),
etable,
theta,
mean_predictors,
var_predictors,
cov=cov,
samp_size,
num_items,
num_quad)
p_cd[[num_items+1]][[cov]] <-
p_cd[[num_items+1]][[cov]] - anl_deriv[[1]]/anl_deriv[[2]]
# Update and check for convergence: Calculate the difference
# in parameter estimates from current to previous.
eps_cd = sqrt(sum((unlist(p_cd)-unlist(lastp_cd))^2))
# Update parameter list.
lastp_cd <- p_cd
# Update the iteration number.
iter_cd = iter_cd + 1
}
}
# Latent variance impact updates.
for(cov in 1:ncol(var_predictors)) {
# CD Maximization and print settings.
lastp_cd <- p_cd
eps_cd <- Inf
iter_cd <- 1
# Loop until convergence or maximum number of iterations.
while(eps_cd > final_control$tol){
anl_deriv <- d_phi(c(p_cd[[num_items+1]],p_cd[[num_items+2]]),
etable,
theta,
mean_predictors,
var_predictors,
cov=cov,
samp_size,
num_items,
num_quad)
p_cd[[num_items+2]][[cov]] <-
p_cd[[num_items+2]][[cov]] - anl_deriv[[1]]/anl_deriv[[2]]
# Update and check for convergence: Calculate the difference
# in parameter estimates from current to previous.
eps_cd = sqrt(sum((unlist(p_cd)-unlist(lastp_cd))^2))
# Update parameter list.
lastp_cd <- p_cd
# Update the iteration number.
iter_cd = iter_cd + 1
}
}
# Item response updates.
for (item in 1:num_items) {
# Get posterior probabilities.
# Obtain E-tables for each response category.
if(item_type[item] != "cfa") {
etable_item <- lapply(1:num_responses[item], function(x) etable)
for(resp in 1:num_responses[item]) {
etable_item[[resp]][which(
!(item_data[,item] == resp)), ] <- 0
}
}
if(item_type[item] == "cfa") {
# Intercept updates.
anl_deriv <- d_mu_gaussian("c0",
p[[item]],
etable,
theta,
item_data[,item],
pred_data,
cov=NULL,
samp_size,
num_items,
num_quad)
p[[item]][[1]] <- p[[item]][[1]] - anl_deriv[[1]]/anl_deriv[[2]]
# Slope updates.
if(item_type[item] != "Rasch") {
a0_parms <- grep(paste0("a0_itm",item,"_"),names(p[[item]]),fixed=T)
anl_deriv <- d_mu_gaussian("a0",
p[[item]],
etable_item,
theta,
item_data[,item],
pred_data,
cov=NULL,
samp_size,
num_items,
num_quad)
p[[item]] <- p[[item]][a0_parms] - anl_deriv[[1]]/anl_deriv[[2]]
}
# Residual updates.
s0_parms <- grep(paste0("s0_itm",item,"_"),names(p[[item]]),fixed=T)
anl_deriv <- d_sigma_gaussian("s0",
p[[item]],
etable_item,
theta,
item_data[,item],
pred_data,
cov=NULL,
samp_size,
num_items,
num_quad)
p[[item]][s0_parms][1] <- p[[item]][s0_parms][1] -
anl_deriv[[1]]/anl_deriv[[2]]
if(p[[item]][s0_parms][[1]] < 0) p[[item]][s0_parms][[1]] <- 1
if(!any(item == anchor)) {
# Residual DIF updates.
for(cov in 1:num_predictors) {
s1_parms <-
grep(paste0("s1_itm",item,"_cov",cov),names(p[[item]]),fixed=T)
anl_deriv <- d_sigma_gaussian("s1",
p[[item]],
etable_item,
theta,
item_data[,item],
pred_data,
cov=cov,
samp_size,
num_items,
num_quad)
p[[item]][s1_parms][1] <- p[[item]][s1_parms][1] -
anl_deriv[[1]]/anl_deriv[[2]]
}
p2 <- unlist(p)
# Intercept DIF updates.
for(cov in 1:num_predictors){
# End routine if only one anchor item is left on each covariate
# for each item parameter.
if(is.null(anchor) &
sum(p2[grep(paste0("c1(.*?)cov",cov),names(p2))] != 0) >
(num_items - 1) &
alpha == 1 &&
(length(final_control$start.values) == 0 || pen > 1) &&
num_tau >= 10){
under_identified <- TRUE
break
}
c1_parms <-
grep(paste0("c1_itm",item,"_cov",cov),names(p[[item]]),fixed=T)
anl_deriv <- d_mu_gaussian("c1",
p[[item]],
etable_item,
theta,
item_data[,item],
pred_data,
cov,
samp_size,
num_items,
num_quad)
z <- p[[item]][c1_parms] - anl_deriv[[1]]/anl_deriv[[2]]
if(max_tau) id_max_z <- c(id_max_z,z)
p[[item]][c1_parms] <-
ifelse(pen_type == "lasso",
soft_threshold(z,alpha,tau_current),
firm_threshold(z,alpha,tau_current,gamma))
}
# Slope DIF updates.
for(cov in 1:num_predictors){
# End routine if only one anchor item is left on each covariate
# for each item parameter.
if(is.null(anchor) &
sum(p2[grep(paste0("a1(.*?)cov",cov),names(p2))] != 0) >
(num_items - 1) &
alpha == 1 &&
(length(final_control$start.values) == 0 || pen > 1) &&
num_tau >= 10){
under_identified <- TRUE
break
}
if(item_type[item] != "Rasch"){
a1_parms <-
grep(paste0("a1_itm",item,"_cov",cov),names(p[[item]]),fixed=T)
anl_deriv <- d_mu_gaussian("a1",
p[[item]],
etable_item,
theta,
item_data[,item],
pred_data,
cov,
samp_size,
num_items,
num_quad)
z <- p[[item]][a1_parms] - anl_deriv[[1]]/anl_deriv[[2]]
if(max_tau) id_max_z <- c(id_max_z,z)
p[[item]][a1_parms] <- ifelse(pen_type == "lasso",
soft_threshold(z,alpha,tau_current),
firm_threshold(z,alpha,tau_current,gamma))
}
}
}
# Bernoulli responses.
} else if(item_type[item] == "2pl") {
# CD Maximization and print settings.
lastp_cd <- p_cd
eps_cd <- Inf
iter_cd <- 1
# Loop until convergence or maximum number of iterations.
while(eps_cd > final_control$tol){
# Intercept updates.
anl_deriv <- d_bernoulli("c0",
p_cd[[item]],
etable_item,
theta,
pred_data,
cov=0,
samp_size,
num_items,
num_quad)
p_cd[[item]][[1]] <- p_cd[[item]][[1]] - anl_deriv[[1]]/anl_deriv[[2]]
# Update and check for convergence: Calculate the difference
# in parameter estimates from current to previous.
eps_cd = sqrt(sum((unlist(p_cd)-unlist(lastp_cd))^2))
# Update parameter list.
lastp_cd <- p_cd
# Update the iteration number.
iter_cd = iter_cd + 1
}
# Slope updates.
if(item_type[item] != "Rasch") {
# CD Maximization and print settings.
lastp_cd <- p_cd
eps_cd <- Inf
iter_cd <- 1
# Loop until convergence or maximum number of iterations.
while(eps_cd > final_control$tol){
anl_deriv <- d_bernoulli("a0",
p_cd[[item]],
etable_item,
theta,
pred_data,
cov=0,
samp_size,
num_items,
num_quad)
p_cd[[item]][[2]] <- p_cd[[item]][[2]] - anl_deriv[[1]]/anl_deriv[[2]]
# Update and check for convergence: Calculate the difference
# in parameter estimates from current to previous.
eps_cd = sqrt(sum((unlist(p_cd)-unlist(lastp_cd))^2))
# Update parameter list.
lastp_cd <- p_cd
# Update the iteration number.
iter_cd = iter_cd + 1
}
}
if(!any(item == anchor)) {
p2_cd <- unlist(p_cd)
# Intercept DIF updates.
for(cov in 1:num_predictors) {
# End routine if only one anchor item is left on each covariate
# for each item parameter.
if(is.null(anchor) &
sum(p2_cd[grep(paste0("c1(.*?)cov",cov),names(p2_cd))] != 0) >
(num_items - 1) &
alpha == 1 &&
(length(final_control$start.values) == 0 || pen > 1) &&
num_tau >= 10){
under_identified <- TRUE
break
}
# CD Maximization and print settings.
lastp_cd <- p_cd
eps_cd <- Inf
iter_cd <- 1
# Loop until convergence or maximum number of iterations.
while(eps_cd > final_control$tol){
anl_deriv <- d_bernoulli("c1",
p_cd[[item]],
etable_item,
theta,
pred_data,
cov,
samp_size,
num_items,
num_quad)
z <- p_cd[[item]][[2+cov]] - anl_deriv[[1]]/anl_deriv[[2]]
if(max_tau) id_max_z <- c(id_max_z,z)
p_cd[[item]][[2+cov]] <- ifelse(pen_type == "lasso",
soft_threshold(z,alpha,tau_current),
firm_threshold(z,alpha,tau_current,gamma))
# Update and check for convergence: Calculate the difference
# in parameter estimates from current to previous.
eps_cd = sqrt(sum((unlist(p_cd)-unlist(lastp_cd))^2))
# Update parameter list.
lastp_cd <- p_cd
# Update the iteration number.
iter_cd = iter_cd + 1
}
}
# Slope DIF updates.
for(cov in 1:num_predictors) {
# End routine if only one anchor item is left on each covariate
# for each item parameter.
if(is.null(anchor) &
sum(p2_cd[grep(paste0("a1(.*?)cov",cov),names(p2_cd))] != 0) >
(num_items - 1) &
alpha == 1 &&
(length(final_control$start.values) == 0 || pen > 1) &&
num_tau >= 10){
under_identified <- TRUE
break
}
if(item_type[item] != "Rasch") {
# CD Maximization and print settings.
lastp_cd <- p_cd
eps_cd <- Inf
iter_cd <- 1
# Loop until convergence or maximum number of iterations.
while(eps_cd > final_control$tol){
anl_deriv <- d_bernoulli("a1",
p_cd[[item]],
etable_item,
theta,
pred_data,
cov,
samp_size,
num_items,
num_quad)
z <- p_cd[[item]][[2+num_predictors+cov]] -
anl_deriv[[1]]/anl_deriv[[2]]
if(max_tau) id_max_z <- c(id_max_z,z)
p_cd[[item]][[2+num_predictors+cov]] <-
ifelse(pen_type == "lasso",
soft_threshold(z,alpha,tau_current),
firm_threshold(z,alpha,tau_current,gamma))
# Update and check for convergence: Calculate the difference
# in parameter estimates from current to previous.
eps_cd = sqrt(sum((unlist(p_cd)-unlist(lastp_cd))^2))
# Update parameter list.
lastp_cd <- p_cd
# Update the iteration number.
iter_cd = iter_cd + 1
}
}
}
}
# Categorical.
} else {
# Intercept updates.
anl_deriv <- d_categorical("c0",
p[[item]],
etable_item,
theta,
pred_data,
thr=-1,
cov=-1,
samp_size,
num_responses[[item]],
num_items,
num_quad)
p[[item]][[1]] <- p[[item]][[1]] - anl_deriv[[1]]/anl_deriv[[2]]
# Threshold updates.
for(thr in 2:(num_responses[item]-1)) {
anl_deriv <- d_categorical("c0",
p[[item]],
etable_item,
theta,
pred_data,
thr=thr,
cov=-1,
samp_size,
num_responses[[item]],
num_items,
num_quad)
p[[item]][[thr]] <- p[[item]][[thr]] - anl_deriv[[1]]/anl_deriv[[2]]
}
# Slope updates.
if(item_type[item] != "Rasch") {
anl_deriv <- d_categorical("a0",
p[[item]],
etable_item,
theta,
pred_data,
thr=-1,
cov=-1,
samp_size,
num_responses[[item]],
num_items,
num_quad)
p[[item]][[num_responses[[item]]]] <-
p[[item]][[num_responses[[item]]]] - anl_deriv[[1]]/anl_deriv[[2]]
}
if(!any(item == anchor)){
p2 <- unlist(p)
# Intercept DIF updates.
for(cov in 1:num_predictors) {
# End routine if only one anchor item is left on each covariate
# for each item parameter.
if(is.null(anchor) &
sum(p2[grep(paste0("c1(.*?)cov",cov),names(p2))] != 0) >
(num_items - 1) &
alpha == 1 &&
(length(final_control$start.values) == 0 || pen > 1) &&
num_tau >= 10){
under_identified <- TRUE
break
}
anl_deriv <- d_categorical("c1",
p[[item]],
etable_item,
theta,
pred_data,
thr=-1,
cov,
samp_size,
num_responses[[item]],
num_items,
num_quad)
z <- p[[item]][[num_responses[[item]]+cov]] -
anl_deriv[[1]]/anl_deriv[[2]]
if(max_tau) id_max_z <- c(id_max_z,z)
p[[item]][[num_responses[[item]]+cov]] <-
ifelse(pen_type == "lasso",
soft_threshold(z,alpha,tau_current),
firm_threshold(z,alpha,tau_current,gamma))
}
# Slope DIF updates.
for(cov in 1:num_predictors) {
# End routine if only one anchor item is left on each covariate
# for each item parameter.
if(is.null(anchor) &
sum(p2[grep(paste0("a1(.*?)cov",cov),names(p2))] != 0) >
(num_items - 1) &
alpha == 1 &&
(length(final_control$start.values) == 0 || pen > 1) &&
num_tau >= 10){
under_identified <- TRUE
break
}
if(item_type[item] != "Rasch") {
anl_deriv <- d_categorical("a1",
p[[item]],
etable_item,
theta,
pred_data,
thr=-1,
cov,
samp_size,
num_responses[[item]],
num_items,
num_quad)
z <- p[[item]][[length(p[[item]])-ncol(pred_data)+cov]] -
anl_deriv[[1]]/anl_deriv[[2]]
if(max_tau) id_max_z <- c(id_max_z,z)
p[[item]][[length(p[[item]])-ncol(pred_data)+cov]] <-
ifelse(pen_type == "lasso",
soft_threshold(z,alpha,tau_current),
firm_threshold(z,alpha,tau_current,gamma))
}
}
}
# Gaussian responses.
}
}
p_cd_all <- p_cd
# Update and check for convergence: Calculate the difference
# in parameter estimates from current to previous.
eps_cd_all = sqrt(sum((unlist(p_cd_all)-unlist(lastp_cd_all))^2))
# Update parameter list.
lastp_cd_all <- p_cd_all
print(paste0("CD iter: ",iter_cd_all))
# Update the iteration number.
iter_cd_all = iter_cd_all + 1
}
if(max_tau) {
id_max_z <- max(abs(id_max_z))
return(id_max_z)
} else {
return(list(p=p_cd_all,
under_identified=under_identified))
}
}
|
library(rsconnect)
rsconnect::deployApp('.')
y
| /HWL7_RShiny/shinyapp_conn.R | no_license | brisenodaniel/Data_Viz | R | false | false | 47 | r | library(rsconnect)
rsconnect::deployApp('.')
y
|
library("ggplot2")
library("gridExtra")
source("stansettings.R")
source("data_MBH.R")
names <- c("FUVcolor","NUVcolor","Logsigma","LogRe","LogLk",
"M","LogMassk")
errnames <- c("errFUVcolor","errNUVcolor","errLogsigma","errLogRe","errLogLk",
"errM","errLogMassk")
#JOINT FIT
pars <- c(4,6,8,10,12,14,16)
errpars <- pars+1
M <- length(pars)
N <- length(data_MBH[["logMBH"]])
xobs <- data_MBH[pars]
xerr <- data_MBH[errpars]
xobs <- t(data.matrix(xobs))
xerr <- t(data.matrix(xerr))
yobs <- data_MBH[[1]]
yerr1 <- data_MBH[[2]]
yerr2 <- data_MBH[[3]]
standatajoint <- list(N=N,M=M,xobs=xobs,xerr=xerr,yobs=yobs,yerr1=yerr1,yerr2=yerr2)
stanfit_joint <- stan("joint_fit.stan",data=standatajoint,iter=4000,warmup=2000)
stanfit_joint_sum_mean <- summary(stanfit_joint)$summary[,"mean"]
yreal_joint_names <- vector(mode="character",length=N)
for(i in 1:N){yreal_joint_names[i] <- paste("yreal[",i,"]",sep="")}
yreal_joint <- stanfit_joint_sum_mean[yreal_joint_names]
xreal_joint <- matrix(data=NA,nrow=N,ncol=length(names))
for(j in 1:length(names)){
xreal_joint_names <- vector(mode="character",length=N)
for(i in 1:N){xreal_joint_names[i] <- paste("xreal[",j,",",i,"]",sep="")}
xreal_joint[,j] <- stanfit_joint_sum_mean[xreal_joint_names]
}
data_joint <- data.frame(xreal_joint)
names_joint <- paste(names,"_joint",sep="")
colnames(data_joint) <- names_joint
data_joint["yreal_joint"] <- yreal_joint
#We generate a column with identical values for the legend
data_joint["yreal"] <- "yreal"
data_joint["galaxynames"] <- galaxynames #this is for merging
data_joint_merged <- merge(data_MBH,data_joint,by="galaxynames")
#INDIVIDUAL FITS
yobs <- data_MBH[["logMBH"]]
N <- length(yobs)
yerr1 <- data_MBH[["err1logMBH"]]
yerr2 <- data_MBH[["err2logMBH"]]
plots <- list()
stanfits_ind <- list()
S <- 4
s <- 2
plotting_vars <- c(5,6,7) #these are the variables to plot
for(i in plotting_vars){
xobs <- data_MBH[[names[i]]]
xerr <- data_MBH[[errnames[i]]]
standata <- list(N=N,M=1,yobs=yobs,yerr1=yerr1,yerr2=yerr2,xobs=xobs,xerr=xerr,iter=4000,warmup=2000)
stanfit_ind <- stan("individual_fit.stan",data = standata,iter=4000,warmup=2000)
stanfits_ind <- c(stanfits_ind, stanfit_ind)
stanfit_ind_sum_mean <- summary(stanfit_ind)$summary[,"mean"]
yreal_ind_names <- vector(mode="character",length=N)
for(k in 1:N){yreal_ind_names[k] <- paste("yreal[",k,"]",sep="")}
yreal_ind <- stanfit_ind_sum_mean[yreal_ind_names]
xreal_ind_names <- vector(mode="character",length=N)
for(k in 1:N){xreal_ind_names[k] <- paste("xreal[",k,"]",sep="")}
xreal_ind <- stanfit_ind_sum_mean[xreal_ind_names]
data_ind <- data.frame(yreal_ind,xreal_ind,row.names=NULL)
data_ind["yreal"] <- "yreal" #this is for legend
data_ind["galaxynames"] <- galaxynames #this is form merging
data_ind_merged <- merge(data_MBH,data_ind,by="galaxynames")
#Individual plot
p <- ggplot(data=data_ind_merged)+
geom_errorbarh(aes_string(xmin=paste(names[i],errnames[i],sep="-"),
xmax=paste(names[i],errnames[i],sep="+"),y="logMBH"),
linetype="dashed")+
geom_errorbar(aes_string(x=names[i],ymin=paste("logMBH","err1logMBH",sep="-"),
ymax=paste("logMBH","err1logMBH",sep="+")),
linetype="dashed")+
geom_point(aes_string(x=names[i],y="logMBH",color="types"),size=S)+
xlab(names[i])+ylab("Black Hole Mass")+
geom_abline(aes_string(slope=stanfit_ind_sum_mean[["beta1"]],intercept=stanfit_ind_sum_mean[["beta0"]]),
size = 1.1, color = "green",linetype="longdash")+
geom_segment(aes_string(x=names[i],y="logMBH",xend="xreal_ind",yend="yreal_ind"),
arrow=arrow(length=unit(0.2,"cm")),size=1.2)+
ggtitle(paste("Individual fit for",names[i]))+
scale_color_manual(name="Color legend",values=c("Yobs Type1"="cyan","Yobs upper bound"="red"))+
theme(legend.text=element_text(size=11),
legend.title=element_text(size=14),
legend.position = "none",
legend.justification = "left",
legend.direction = "horizontal",
title=element_text(size=14))
plots <- c(plots,list(p))
#Joint plot
p <- ggplot(data=data_joint_merged)+
geom_errorbarh(aes_string(xmin=paste(names[i],errnames[i],sep="-"),
xmax=paste(names[i],errnames[i],sep="+"),y="logMBH"),
linetype="dashed")+
geom_errorbar(aes_string(x=names[i], ymin=paste("logMBH","err1logMBH",sep="-"),
ymax=paste("logMBH","err1logMBH",sep="+")),
linetype="dashed")+
geom_point(aes_string(x=names[i],y="logMBH",color="types"),size=S)+
xlab(names[i])+ylab("Black Hole Mass")+
geom_abline(aes_string(slope=stanfit_joint_sum_mean[[paste("beta1[",i,"]",sep="")]],
intercept=stanfit_joint_sum_mean[[paste("beta0[",i,"]",sep="")]]),
size = 1.1, color = "green")+
geom_abline(aes_string(slope=stanfit_ind_sum_mean[["beta1"]],intercept=stanfit_ind_sum_mean[["beta0"]]),
size = 1.1, color = "green",linetype="longdash")+
geom_segment(aes_string(x=names[i],y="logMBH",xend=names_joint[i],yend="yreal_joint"),
arrow=arrow(length=unit(0.2,"cm")),size=1.2)+
ggtitle(paste("Joint fit for",names[i]))+
scale_color_manual(name="Color legend",values=c("Yobs Type1"="cyan","Yobs upper bound"="red"))+
theme(legend.text=element_text(size=11),
legend.title=element_text(size=14),
legend.position = "none",
legend.justification = "left",
legend.direction = "horizontal",
title=element_text(size=14))
plots <- c(plots,list(p))
}
do.call("grid.arrange", c(plots, ncol=2))
| /IndividualJointPlots.R | no_license | r-buitrago/Bayesian-Data-Analysis-of-Black-Hole-Masses | R | false | false | 5,950 | r | library("ggplot2")
library("gridExtra")
source("stansettings.R")
source("data_MBH.R")
names <- c("FUVcolor","NUVcolor","Logsigma","LogRe","LogLk",
"M","LogMassk")
errnames <- c("errFUVcolor","errNUVcolor","errLogsigma","errLogRe","errLogLk",
"errM","errLogMassk")
#JOINT FIT
pars <- c(4,6,8,10,12,14,16)
errpars <- pars+1
M <- length(pars)
N <- length(data_MBH[["logMBH"]])
xobs <- data_MBH[pars]
xerr <- data_MBH[errpars]
xobs <- t(data.matrix(xobs))
xerr <- t(data.matrix(xerr))
yobs <- data_MBH[[1]]
yerr1 <- data_MBH[[2]]
yerr2 <- data_MBH[[3]]
standatajoint <- list(N=N,M=M,xobs=xobs,xerr=xerr,yobs=yobs,yerr1=yerr1,yerr2=yerr2)
stanfit_joint <- stan("joint_fit.stan",data=standatajoint,iter=4000,warmup=2000)
stanfit_joint_sum_mean <- summary(stanfit_joint)$summary[,"mean"]
yreal_joint_names <- vector(mode="character",length=N)
for(i in 1:N){yreal_joint_names[i] <- paste("yreal[",i,"]",sep="")}
yreal_joint <- stanfit_joint_sum_mean[yreal_joint_names]
xreal_joint <- matrix(data=NA,nrow=N,ncol=length(names))
for(j in 1:length(names)){
xreal_joint_names <- vector(mode="character",length=N)
for(i in 1:N){xreal_joint_names[i] <- paste("xreal[",j,",",i,"]",sep="")}
xreal_joint[,j] <- stanfit_joint_sum_mean[xreal_joint_names]
}
data_joint <- data.frame(xreal_joint)
names_joint <- paste(names,"_joint",sep="")
colnames(data_joint) <- names_joint
data_joint["yreal_joint"] <- yreal_joint
#We generate a column with identical values for the legend
data_joint["yreal"] <- "yreal"
data_joint["galaxynames"] <- galaxynames #this is for merging
data_joint_merged <- merge(data_MBH,data_joint,by="galaxynames")
#INDIVIDUAL FITS
yobs <- data_MBH[["logMBH"]]
N <- length(yobs)
yerr1 <- data_MBH[["err1logMBH"]]
yerr2 <- data_MBH[["err2logMBH"]]
plots <- list()
stanfits_ind <- list()
S <- 4
s <- 2
plotting_vars <- c(5,6,7) #these are the variables to plot
for(i in plotting_vars){
xobs <- data_MBH[[names[i]]]
xerr <- data_MBH[[errnames[i]]]
standata <- list(N=N,M=1,yobs=yobs,yerr1=yerr1,yerr2=yerr2,xobs=xobs,xerr=xerr,iter=4000,warmup=2000)
stanfit_ind <- stan("individual_fit.stan",data = standata,iter=4000,warmup=2000)
stanfits_ind <- c(stanfits_ind, stanfit_ind)
stanfit_ind_sum_mean <- summary(stanfit_ind)$summary[,"mean"]
yreal_ind_names <- vector(mode="character",length=N)
for(k in 1:N){yreal_ind_names[k] <- paste("yreal[",k,"]",sep="")}
yreal_ind <- stanfit_ind_sum_mean[yreal_ind_names]
xreal_ind_names <- vector(mode="character",length=N)
for(k in 1:N){xreal_ind_names[k] <- paste("xreal[",k,"]",sep="")}
xreal_ind <- stanfit_ind_sum_mean[xreal_ind_names]
data_ind <- data.frame(yreal_ind,xreal_ind,row.names=NULL)
data_ind["yreal"] <- "yreal" #this is for legend
data_ind["galaxynames"] <- galaxynames #this is form merging
data_ind_merged <- merge(data_MBH,data_ind,by="galaxynames")
#Individual plot
p <- ggplot(data=data_ind_merged)+
geom_errorbarh(aes_string(xmin=paste(names[i],errnames[i],sep="-"),
xmax=paste(names[i],errnames[i],sep="+"),y="logMBH"),
linetype="dashed")+
geom_errorbar(aes_string(x=names[i],ymin=paste("logMBH","err1logMBH",sep="-"),
ymax=paste("logMBH","err1logMBH",sep="+")),
linetype="dashed")+
geom_point(aes_string(x=names[i],y="logMBH",color="types"),size=S)+
xlab(names[i])+ylab("Black Hole Mass")+
geom_abline(aes_string(slope=stanfit_ind_sum_mean[["beta1"]],intercept=stanfit_ind_sum_mean[["beta0"]]),
size = 1.1, color = "green",linetype="longdash")+
geom_segment(aes_string(x=names[i],y="logMBH",xend="xreal_ind",yend="yreal_ind"),
arrow=arrow(length=unit(0.2,"cm")),size=1.2)+
ggtitle(paste("Individual fit for",names[i]))+
scale_color_manual(name="Color legend",values=c("Yobs Type1"="cyan","Yobs upper bound"="red"))+
theme(legend.text=element_text(size=11),
legend.title=element_text(size=14),
legend.position = "none",
legend.justification = "left",
legend.direction = "horizontal",
title=element_text(size=14))
plots <- c(plots,list(p))
#Joint plot
p <- ggplot(data=data_joint_merged)+
geom_errorbarh(aes_string(xmin=paste(names[i],errnames[i],sep="-"),
xmax=paste(names[i],errnames[i],sep="+"),y="logMBH"),
linetype="dashed")+
geom_errorbar(aes_string(x=names[i], ymin=paste("logMBH","err1logMBH",sep="-"),
ymax=paste("logMBH","err1logMBH",sep="+")),
linetype="dashed")+
geom_point(aes_string(x=names[i],y="logMBH",color="types"),size=S)+
xlab(names[i])+ylab("Black Hole Mass")+
geom_abline(aes_string(slope=stanfit_joint_sum_mean[[paste("beta1[",i,"]",sep="")]],
intercept=stanfit_joint_sum_mean[[paste("beta0[",i,"]",sep="")]]),
size = 1.1, color = "green")+
geom_abline(aes_string(slope=stanfit_ind_sum_mean[["beta1"]],intercept=stanfit_ind_sum_mean[["beta0"]]),
size = 1.1, color = "green",linetype="longdash")+
geom_segment(aes_string(x=names[i],y="logMBH",xend=names_joint[i],yend="yreal_joint"),
arrow=arrow(length=unit(0.2,"cm")),size=1.2)+
ggtitle(paste("Joint fit for",names[i]))+
scale_color_manual(name="Color legend",values=c("Yobs Type1"="cyan","Yobs upper bound"="red"))+
theme(legend.text=element_text(size=11),
legend.title=element_text(size=14),
legend.position = "none",
legend.justification = "left",
legend.direction = "horizontal",
title=element_text(size=14))
plots <- c(plots,list(p))
}
do.call("grid.arrange", c(plots, ncol=2))
|
#' Generate simulated weather record.
#'
#' Generate time series of temperature measurements
#'
#' @param timebase A vector of dates / times in decimal julian day. Either this
#' or \code{years} must be supplied.
#' @param years A scalar, the number of years to model. Either this or
#' \code{timebase} must be supplied.
#' @param spectrum A data frame of \code{cbind(frequency, cyc_range, phase,
#' tau)}. Day frequency = 1, year frequency = 1/325.25. Cycling range = 2 *
#' amplitude. Phase is -pi to pi, as output by nlts_plus_phase. tau is lag,
#' out output by nlts_plus_phase.
#' @param mean The mean temperature. Use \code{t_intercept} for trended data.
#' Default 0.
#' @param t_int T intercept for use with linear trend.
#' @param t_slope Slope for use with linear trend.
#' @param mean_resid Mean residual. Currently rnorm with sd = 1, without
#' autocorrelation. Next version will have ARIMA autocorrelation.
#' @return a numeric vector of the number of days since 1/1/1960, dates before
#' this are negative.
#' @export
#' @examples
#' library(ggplot2)
#' tropical_spectrum <- data.frame(frequency = c(1, 1 / 365),
#' cyc_range = c(6.53, 4.1),
#' phase = c(0.421,0.189),
#' tau = c(0,0))
#' tropical_mean <- 25.845
#' n_temperate_spectrum <- data.frame(frequency = c(1, 1 / 365),
#' cyc_range = c(5.33, 26.36),
#' phase = c(0.319,1.057),
#' tau = c(0,0))
#' n_temperate_mean <- 10.457
#' tropical_ts <- gen_cycling_rec(years = 2,
#' spectrum = tropical_spectrum,
#' mean = tropical_mean)
#' tropical_ts$region <- "Tropical"
#' n_temperate_ts <- gen_cycling_rec(years = 2,
#' spectrum = n_temperate_spectrum,
#' mean = n_temperate_mean)
#' n_temperate_ts$region <- "North Temperate"
#' ts_data <- rbind(tropical_ts,
#' n_temperate_ts)
#' ts_data <- factor(ts_data$region,
#' levels = c("Tropical",
#' "North Temperate"))
#' ggplot(data = ts_data,
#' aes(x = jday,
#' y = temperature,
#' color = region)) +
#' geom_line(alpha = 0.7)
gen_cycling_rec <- function(timebase = NULL,
years = NULL,
spectrum = stop("data frame of data.frame(frequency, cyc_range, phase, tau) required"),
mean = 0,
t_int = NULL,
t_slope = NULL,
mean_resid = NULL) {
if (is.null(timebase)) {
if (is.null(years)) {stop("Either timebase or years must be supplied")}
timebase = (1:(years * 365 * 24)) / 24
}
#add mean
temperatures <- rep(mean, length(timebase))
#add trend
if (!is.null(t_int)) {
if (is.null(t_slope)) {stop("If t_int is supplied, t_slope must also be suppled")}
temperatures <- temperatures + ((t_slope * timebase) + t_intercept)
}
#loop through adding frequencies.
for (i in 1:dim(spectrum)[1]) { #Note: looped to save memory
temperatures <- temperatures + (spectrum$cyc_range[i] *
cos(2 * pi * spectrum$frequency[i] *
(timebase - spectrum$tau[i]) +
spectrum$phase[i]))
}
#add residuals
if (!is.null(mean_resid)) {
temperatures <- temperatures + rnorm(length(temperatures),
mean = mean_resid,
sd = 1)
}
return(data.frame(jday = timebase,
temperature = temperatures))
}
#' Lomb Scargle periodogram, with phase.
#'
#' Determine the periodogram of a time series using the Lomg and Scargle
#' least-square method, also return phase. This is derived from an old version
#' from the \pkg{nlts} package.
#' Warning: This is memory intensive and slow if
#' the entire spectrum is calculated, especially if it is more than 10 years,
#' and sampling is every 4 hours or more frequent.
#'
#' @param temperature A vector of temperature values.
#' @param timebase A vector of decimal Julian days. (ex: 6AM, 31s day: 31.25)
#' @param freq A vector of frequencies to test. Set to NULL for full spectrum.
#' @return A "lomb" object with temperature cycling (2 * amplitude) value,
#' freqency, phase, p (white noise), and tau (lag).
#' @export
#' @examples
#' tropical_spectrum <- data.frame(frequency = c(1, 1 / 365),
#' cyc_range = c(6.53, 4.1),
#' phase = c(0.421,0.189),
#' tau = c(0,0))
#' tropical_mean <- 25.845
#' tropical_ts <- gen_cycling_rec(years = 2,
#' spectrum = tropical_spectrum,
#' mean = tropical_mean)
#' tropical_lomb <- spec_lomb_phase(tropical$temperature,
#' tropical$jday)
#' tropical_lomb
#' @export
#'
spec_lomb_phase <- function (temperature = stop("Temperatures missing"),
timebase = stop("Julian days missing"),
freq = c(1, 1 / 365.25)) {
if (is.null(freq)) {
nyear <- max(timebase) - min(timebase) + 1
f <- seq(0, 0.5, length = nyear / 2)
}
else {
f <- freq
}
if (length(temperature) != length(timebase))
stop("temperature and timebase different lengths")
if (min(f) < 0 || max(f) > 1)
stop("freq must be between 0 and 1")
if (min(f) == 0)
f <- f[f > 0]
nt <- length(timebase)
nf <- length(f)
#from Horne and Baliunas 1986
number.ind.freq <- -6.362 + (1.193*nt) + (0.00098*nt)^2
ones.t <- rep(1, nt)
ones.f <- rep(1, nf)
omega <- 2 * pi * f
hbar <- mean(temperature)
hvar <- var(temperature)
hdev <- temperature - hbar
two.omega.t <- 2 * omega %*% t(timebase)
sum.sin <- sin(two.omega.t) %*% ones.t
sum.cos <- cos(two.omega.t) %*% ones.t
tau <- atan(sum.sin/sum.cos) / (2 * omega)
t.m.tau <- (ones.f %*% t(timebase)) - (tau %*% t(ones.t))
omega.ttau <- (omega %*% t(ones.t)) * t.m.tau
sin.ott <- sin(omega.ttau)
cos.ott <- cos(omega.ttau)
z <- ((cos.ott %*% hdev)^2 / ((cos.ott^2) %*% ones.t) + (sin.ott %*% hdev)^2 / ((sin.ott^2) %*% ones.t)) / (2 * hvar)
max <- z == max(z,
na.rm = TRUE)
max <- max[is.na(max) == FALSE]
#From Hocke K. 1998
a <- (sqrt(2/nt) * (cos.ott %*% hdev)) / (((cos.ott^2) %*% ones.t)^(1/2))
b <- (sqrt(2/nt) * (sin.ott %*% hdev)) / (((sin.ott^2) %*% ones.t)^(1/2))
phi <- -atan2(b,a)
P <- 1 - ((1 - exp(-z[, 1]))^number.ind.freq)
res <- list(cyc_range = sqrt(z[, 1] * 2 * hvar / (nt / 2)),
freq = f,
f.max = f[max],
per.max = 1 / f[max],
phase = phi,
p = P,
tau = tau)
class(res) <- "lomb"
res
} | /analysis/TempcyclesAnalysis.R | no_license | lbuckley/ClimateBiology | R | false | false | 7,448 | r | #' Generate simulated weather record.
#'
#' Generate time series of temperature measurements
#'
#' @param timebase A vector of dates / times in decimal julian day. Either this
#' or \code{years} must be supplied.
#' @param years A scalar, the number of years to model. Either this or
#' \code{timebase} must be supplied.
#' @param spectrum A data frame of \code{cbind(frequency, cyc_range, phase,
#' tau)}. Day frequency = 1, year frequency = 1/325.25. Cycling range = 2 *
#' amplitude. Phase is -pi to pi, as output by nlts_plus_phase. tau is lag,
#' out output by nlts_plus_phase.
#' @param mean The mean temperature. Use \code{t_intercept} for trended data.
#' Default 0.
#' @param t_int T intercept for use with linear trend.
#' @param t_slope Slope for use with linear trend.
#' @param mean_resid Mean residual. Currently rnorm with sd = 1, without
#' autocorrelation. Next version will have ARIMA autocorrelation.
#' @return a numeric vector of the number of days since 1/1/1960, dates before
#' this are negative.
#' @export
#' @examples
#' library(ggplot2)
#' tropical_spectrum <- data.frame(frequency = c(1, 1 / 365),
#' cyc_range = c(6.53, 4.1),
#' phase = c(0.421,0.189),
#' tau = c(0,0))
#' tropical_mean <- 25.845
#' n_temperate_spectrum <- data.frame(frequency = c(1, 1 / 365),
#' cyc_range = c(5.33, 26.36),
#' phase = c(0.319,1.057),
#' tau = c(0,0))
#' n_temperate_mean <- 10.457
#' tropical_ts <- gen_cycling_rec(years = 2,
#' spectrum = tropical_spectrum,
#' mean = tropical_mean)
#' tropical_ts$region <- "Tropical"
#' n_temperate_ts <- gen_cycling_rec(years = 2,
#' spectrum = n_temperate_spectrum,
#' mean = n_temperate_mean)
#' n_temperate_ts$region <- "North Temperate"
#' ts_data <- rbind(tropical_ts,
#' n_temperate_ts)
#' ts_data <- factor(ts_data$region,
#' levels = c("Tropical",
#' "North Temperate"))
#' ggplot(data = ts_data,
#' aes(x = jday,
#' y = temperature,
#' color = region)) +
#' geom_line(alpha = 0.7)
gen_cycling_rec <- function(timebase = NULL,
years = NULL,
spectrum = stop("data frame of data.frame(frequency, cyc_range, phase, tau) required"),
mean = 0,
t_int = NULL,
t_slope = NULL,
mean_resid = NULL) {
if (is.null(timebase)) {
if (is.null(years)) {stop("Either timebase or years must be supplied")}
timebase = (1:(years * 365 * 24)) / 24
}
#add mean
temperatures <- rep(mean, length(timebase))
#add trend
if (!is.null(t_int)) {
if (is.null(t_slope)) {stop("If t_int is supplied, t_slope must also be suppled")}
temperatures <- temperatures + ((t_slope * timebase) + t_intercept)
}
#loop through adding frequencies.
for (i in 1:dim(spectrum)[1]) { #Note: looped to save memory
temperatures <- temperatures + (spectrum$cyc_range[i] *
cos(2 * pi * spectrum$frequency[i] *
(timebase - spectrum$tau[i]) +
spectrum$phase[i]))
}
#add residuals
if (!is.null(mean_resid)) {
temperatures <- temperatures + rnorm(length(temperatures),
mean = mean_resid,
sd = 1)
}
return(data.frame(jday = timebase,
temperature = temperatures))
}
#' Lomb Scargle periodogram, with phase.
#'
#' Determine the periodogram of a time series using the Lomg and Scargle
#' least-square method, also return phase. This is derived from an old version
#' from the \pkg{nlts} package.
#' Warning: This is memory intensive and slow if
#' the entire spectrum is calculated, especially if it is more than 10 years,
#' and sampling is every 4 hours or more frequent.
#'
#' @param temperature A vector of temperature values.
#' @param timebase A vector of decimal Julian days. (ex: 6AM, 31s day: 31.25)
#' @param freq A vector of frequencies to test. Set to NULL for full spectrum.
#' @return A "lomb" object with temperature cycling (2 * amplitude) value,
#' freqency, phase, p (white noise), and tau (lag).
#' @export
#' @examples
#' tropical_spectrum <- data.frame(frequency = c(1, 1 / 365),
#' cyc_range = c(6.53, 4.1),
#' phase = c(0.421,0.189),
#' tau = c(0,0))
#' tropical_mean <- 25.845
#' tropical_ts <- gen_cycling_rec(years = 2,
#' spectrum = tropical_spectrum,
#' mean = tropical_mean)
#' tropical_lomb <- spec_lomb_phase(tropical$temperature,
#' tropical$jday)
#' tropical_lomb
#' @export
#'
spec_lomb_phase <- function (temperature = stop("Temperatures missing"),
timebase = stop("Julian days missing"),
freq = c(1, 1 / 365.25)) {
if (is.null(freq)) {
nyear <- max(timebase) - min(timebase) + 1
f <- seq(0, 0.5, length = nyear / 2)
}
else {
f <- freq
}
if (length(temperature) != length(timebase))
stop("temperature and timebase different lengths")
if (min(f) < 0 || max(f) > 1)
stop("freq must be between 0 and 1")
if (min(f) == 0)
f <- f[f > 0]
nt <- length(timebase)
nf <- length(f)
#from Horne and Baliunas 1986
number.ind.freq <- -6.362 + (1.193*nt) + (0.00098*nt)^2
ones.t <- rep(1, nt)
ones.f <- rep(1, nf)
omega <- 2 * pi * f
hbar <- mean(temperature)
hvar <- var(temperature)
hdev <- temperature - hbar
two.omega.t <- 2 * omega %*% t(timebase)
sum.sin <- sin(two.omega.t) %*% ones.t
sum.cos <- cos(two.omega.t) %*% ones.t
tau <- atan(sum.sin/sum.cos) / (2 * omega)
t.m.tau <- (ones.f %*% t(timebase)) - (tau %*% t(ones.t))
omega.ttau <- (omega %*% t(ones.t)) * t.m.tau
sin.ott <- sin(omega.ttau)
cos.ott <- cos(omega.ttau)
z <- ((cos.ott %*% hdev)^2 / ((cos.ott^2) %*% ones.t) + (sin.ott %*% hdev)^2 / ((sin.ott^2) %*% ones.t)) / (2 * hvar)
max <- z == max(z,
na.rm = TRUE)
max <- max[is.na(max) == FALSE]
#From Hocke K. 1998
a <- (sqrt(2/nt) * (cos.ott %*% hdev)) / (((cos.ott^2) %*% ones.t)^(1/2))
b <- (sqrt(2/nt) * (sin.ott %*% hdev)) / (((sin.ott^2) %*% ones.t)^(1/2))
phi <- -atan2(b,a)
P <- 1 - ((1 - exp(-z[, 1]))^number.ind.freq)
res <- list(cyc_range = sqrt(z[, 1] * 2 * hvar / (nt / 2)),
freq = f,
f.max = f[max],
per.max = 1 / f[max],
phase = phi,
p = P,
tau = tau)
class(res) <- "lomb"
res
} |
library(ape)
testtree <- read.tree("1341_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1341_1_unrooted.txt") | /codeml_files/newick_trees_processed/1341_1/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("1341_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1341_1_unrooted.txt") |
require(XML)
Cfile <- file("commands", "w")
cmd1 <- paste("cp config.xml config.old")
system(cmd1)
##output_file_prefix <- format(Sys.time(), "%Y%m%d%H%M")
output_file_prefix <- "bm1"
output_path = "out"
rep = 0
output_dir = "out"
cell_position_file = "cellPos.txt"
Nstep = 1600
dt = 0.01
eta = 20
two_population_model = 0
## q0= 1.5
## a = 1.16
## b = 0.86
q0= 1.15
a = 1.0
b = 1.0
a_braf = 0.5
b_braf = 0.5
braf_mosaic_percentage = 0.0
mu = 0.01
stretching_force = 0.0
H_rate = 0.004
flag_output_cell = 1
flag_output_force = 0
flag_record_final_state = 0
##stretching_force_list <- c(1, 3, 7, 10, 14, 16)
##for ( stretching_force in stretching_force_list )
##for ( stretching_force in c(seq(15, 15, length.out=10), seq(10, 10, length.out=10) ))
##for ( i in seq(10) )
{
##stretching_force = 4
rep = rep + 1
cat("\n ***********run********* ", rep, "\n")
FolderName <- paste("case_", output_file_prefix, "_", as.character(rep), sep='')
dir.create(FolderName)
## The following part is to produce "config.xml"
d <- xmlTreeParse("config.xml")
r <- xmlRoot(d)
xmlValue(r[["output_dir"]]) <- output_dir
xmlValue(r[["cell_position_file"]]) <- cell_position_file
xmlValue(r[["Nstep"]]) <- Nstep
xmlValue(r[["dt"]]) <- dt
xmlValue(r[["eta"]]) <- eta
xmlValue(r[["two_population_model"]]) <- two_population_model
xmlValue(r[["q0"]]) <- q0
xmlValue(r[["a"]]) <- a
xmlValue(r[["b"]]) <- b
xmlValue(r[["a_braf"]]) <- a_braf
xmlValue(r[["b_braf"]]) <- b_braf
xmlValue(r[["braf_mosaic_percentage"]]) <- braf_mosaic_percentage
xmlValue(r[["mu"]]) <- mu
xmlValue(r[["stretching_force"]]) <- stretching_force
xmlValue(r[["H_rate"]]) <- H_rate
xmlValue(r[["flag_output_cell"]]) <- flag_output_cell
xmlValue(r[["flag_output_force"]]) <- flag_output_force
xmlValue(r[["flag_record_final_state"]]) <- flag_record_final_state
saveXML(r, "config.xml")
## Copy the seqfile.txt into folder
cmd2 <- paste("cp config.xml", FolderName)
cmd3 <- paste("mkdir -p ", FolderName, "/out", sep='')
cmd4 <- paste("cp PhysModel", FolderName)
cmd5 <- paste("cp cellPos.txt", FolderName)
system(cmd2)
system(cmd3)
system(cmd4)
system(cmd5)
##rtime <- runif(1, 20, 200)
##runcmd <- paste("sleep ", rtime, "; cd", FolderName, "; screen PhysModel; cd ..")
## screen, on PC
runcmd <- paste("cd", FolderName, "; screen -d -m PhysModel; cd ..")
## runcmd <- paste("screen -d -m ", FolderName, "/PhysModel", sep='')
## ## bsub, on server
## runcmd <- paste("bsub ", FolderName, "/PhysModel -o output_", rep, sep='')
## runcmd <- paste("cd ", FolderName, "; bsub PhysModel -o output_", rep, "; cd ..", sep='')
writeLines(runcmd, Cfile)
}
cmd1 <- paste("mv config.old config.xml")
system(cmd1)
close(Cfile)
| /preprocess.r | permissive | hydrays/CellModel | R | false | false | 2,855 | r | require(XML)
Cfile <- file("commands", "w")
cmd1 <- paste("cp config.xml config.old")
system(cmd1)
##output_file_prefix <- format(Sys.time(), "%Y%m%d%H%M")
output_file_prefix <- "bm1"
output_path = "out"
rep = 0
output_dir = "out"
cell_position_file = "cellPos.txt"
Nstep = 1600
dt = 0.01
eta = 20
two_population_model = 0
## q0= 1.5
## a = 1.16
## b = 0.86
q0= 1.15
a = 1.0
b = 1.0
a_braf = 0.5
b_braf = 0.5
braf_mosaic_percentage = 0.0
mu = 0.01
stretching_force = 0.0
H_rate = 0.004
flag_output_cell = 1
flag_output_force = 0
flag_record_final_state = 0
##stretching_force_list <- c(1, 3, 7, 10, 14, 16)
##for ( stretching_force in stretching_force_list )
##for ( stretching_force in c(seq(15, 15, length.out=10), seq(10, 10, length.out=10) ))
##for ( i in seq(10) )
{
##stretching_force = 4
rep = rep + 1
cat("\n ***********run********* ", rep, "\n")
FolderName <- paste("case_", output_file_prefix, "_", as.character(rep), sep='')
dir.create(FolderName)
## The following part is to produce "config.xml"
d <- xmlTreeParse("config.xml")
r <- xmlRoot(d)
xmlValue(r[["output_dir"]]) <- output_dir
xmlValue(r[["cell_position_file"]]) <- cell_position_file
xmlValue(r[["Nstep"]]) <- Nstep
xmlValue(r[["dt"]]) <- dt
xmlValue(r[["eta"]]) <- eta
xmlValue(r[["two_population_model"]]) <- two_population_model
xmlValue(r[["q0"]]) <- q0
xmlValue(r[["a"]]) <- a
xmlValue(r[["b"]]) <- b
xmlValue(r[["a_braf"]]) <- a_braf
xmlValue(r[["b_braf"]]) <- b_braf
xmlValue(r[["braf_mosaic_percentage"]]) <- braf_mosaic_percentage
xmlValue(r[["mu"]]) <- mu
xmlValue(r[["stretching_force"]]) <- stretching_force
xmlValue(r[["H_rate"]]) <- H_rate
xmlValue(r[["flag_output_cell"]]) <- flag_output_cell
xmlValue(r[["flag_output_force"]]) <- flag_output_force
xmlValue(r[["flag_record_final_state"]]) <- flag_record_final_state
saveXML(r, "config.xml")
## Copy the seqfile.txt into folder
cmd2 <- paste("cp config.xml", FolderName)
cmd3 <- paste("mkdir -p ", FolderName, "/out", sep='')
cmd4 <- paste("cp PhysModel", FolderName)
cmd5 <- paste("cp cellPos.txt", FolderName)
system(cmd2)
system(cmd3)
system(cmd4)
system(cmd5)
##rtime <- runif(1, 20, 200)
##runcmd <- paste("sleep ", rtime, "; cd", FolderName, "; screen PhysModel; cd ..")
## screen, on PC
runcmd <- paste("cd", FolderName, "; screen -d -m PhysModel; cd ..")
## runcmd <- paste("screen -d -m ", FolderName, "/PhysModel", sep='')
## ## bsub, on server
## runcmd <- paste("bsub ", FolderName, "/PhysModel -o output_", rep, sep='')
## runcmd <- paste("cd ", FolderName, "; bsub PhysModel -o output_", rep, "; cd ..", sep='')
writeLines(runcmd, Cfile)
}
cmd1 <- paste("mv config.old config.xml")
system(cmd1)
close(Cfile)
|
library(datalimited2)
library(reshape2)
library(ggplot2)
source ("jacobian_prior_funcs.r")
stocklist <- c("Cod6a", "Cod7ek",
"CodFaroe", "CodNS",
"Had6b" , "Had7bk" , "HadNS" ,
"Ple7a", "Ple7hk" , "Sol7a",
"Sol7fg" , "Sol7hk" , "Sol2024",
"Whg6a", "Whg7a" ,
"Whg7bk" , "WhgNS")
# set reps
no.reps <- 5
# set change for each prior
p= 0.1
## try to run in parallel
library(doSNOW)
cl <- makeSOCKcluster(8)
registerDoSNOW(cl)
pb <- txtProgressBar(max=length(stocklist), style=3)
progress <- function(n) setTxtProgressBar(pb, n)
opts <- list(progress=progress)
all_stocks_res <- foreach(i = 1:length(stocklist), .combine = rbind, .options.snow=opts) %dopar% {
library(datalimited2)
##for (i in stocklist){
## load the data for each stock - should do this in loop but naming variable
Cod6a.data = read.csv("../Data/Cod_6_a/Cod.6a.Data.csv")
Cod7ek.data = read.csv("../Data/Cod_7_ek/Cod.7ek.Data.csv")
CodFaroe.data = read.csv("../Data/Cod_Faroe/Cod.Faroe.Data.csv")
CodNS.data = read.csv("../Data/Cod_NS/Cod.NS.Data.csv")
Had6b.data = read.csv("../Data/Had_6_b/Had.6b.Data.csv")
Had7bk.data = read.csv("../Data/Had_7_bk/Had.7bk.Data.csv")
HadNS.data = read.csv("../Data/Had_NS/Had.NS.Data.csv")
Ple7a.data = read.csv("../Data/Ple_7_a/Ple.7a.Data.csv")
Ple7hk.data = read.csv("../Data/Ple_7_hk/Ple.7hk.Data.csv")
Sol7a.data = read.csv("../Data/Sol_7_a/Sol.7a.Data.csv")
Sol7fg.data = read.csv("../Data/Sol_7_fg/Sol.7fg.Data.csv")
Sol7hk.data = read.csv("../Data/Sol_7_hk/Sol.7hk.Data.csv")
Sol2024.data = read.csv("../Data/Sol_2024/Sol.2024.Data.csv")
Whg6a.data = read.csv("../Data/Whg_6_a/Whg.6a.Data.csv")
Whg7a.data = read.csv("../Data/Whg_7_a/Whg.7a.Data.csv")
Whg7bk.data = read.csv("../Data/Whg_7_bk/Whg.7bk.Data.csv")
WhgNS.data = read.csv("../Data/Whg_NS/Whg.NS.Data.csv")
print(paste("Working on", stocklist[i]))
data = get(paste0(stocklist[i], ".data"))
#### set up priors using the seperate functions ####
# Calculate 3-yr moving average (average of past 3 years)
ct.raw <- data$catch# / 1000
ct <- ma(data$catch)
# Identify number of years and start/end years
yr <- data$year # functions use this quantity
nyr <- length(yr)
start.yr <- min(yr)
end.yr <- max(yr)
# Prior information for resilience (r) of stock ---------------------------
# Can either classify as High, Medium, Low or Very low or manually set the boundaries
pb.resilience = "Medium"
## Lower r estimate
pb.r.low = NA
## Upper r estimate
pb.r.hi = NA
if (is.na(pb.r.low)) {if (pb.resilience == "High"){pb.r.low = 0.6; pb.r.hi = 1.5; start.r = c(0.6,1.5)}
else if (pb.resilience == "Medium"){pb.r.low = 0.2; pb.r.hi = 0.8; start.r = c(0.2,0.8)}
else if (pb.resilience == "Low"){pb.r.low = 0.05; pb.r.hi = 0.5; start.r = c(0.05,0.5)}
else if (pb.resilience == "Very low"){pb.r.low = 0.015; pb.r.hi = 0.1; start.r = c(0.015,0.1)}}
endb.hi<- endb.low <-intb.hi<- intb.low <-stb.hi<- stb.low <- NA
startbio <- startbio_prior(stb.low, stb.hi, start.yr)
int_params <- intbio_prior(intb.low, intb.hi, int.yr, start.yr, end.yr, startbio, yr, ct)
intbio <- int_params[[1]]
int.yr <- int_params[[2]]
endbio <- endbio_prior(endb.low, endb.hi, nyr, ct.raw, ct)
start.k <- k_prior(endbio, start.r, ct)
#intermediate year as a position in the time series
int.yr.num <- int.yr - start.yr
## start it where we start the main cmsy run
start_prior <- c(rl = pb.r.low, ru = pb.r.hi,
# kl = start.k[1], ku = start.k[2],
p0l = startbio[1], p0u = startbio[2],
pMl = intbio[1], pMu = intbio[2],
pTl = endbio[1], pTu = endbio[2])
#### set up the sensitivity options
cmsy_input <- matrix(data = start_prior, nrow = 17, ncol = 8, byrow = T)
cmsy_input[1,1] <- cmsy_input[1,1] * (1 + p)
cmsy_input[2,1] <- cmsy_input[2,1] * (1 - p)
cmsy_input[3,2] <- cmsy_input[3,2] * (1 + p)
cmsy_input[4,2] <- cmsy_input[4,2] * (1 - p)
cmsy_input[5,3] <- cmsy_input[5,3] * (1 + p)
cmsy_input[6,3] <- cmsy_input[6,3] * (1 - p)
cmsy_input[7,4] <- cmsy_input[7,4] * (1 + p)
cmsy_input[8,4] <- cmsy_input[8,4] * (1 - p)
cmsy_input[9,5] <- cmsy_input[9,5] * (1 + p)
cmsy_input[10,5] <- cmsy_input[10,5] * (1 - p)
cmsy_input[11,6] <- cmsy_input[11,6] * (1 + p)
cmsy_input[12,6] <- cmsy_input[12,6] * (1 - p)
cmsy_input[13,7] <- cmsy_input[13,7] * (1 + p)
cmsy_input[14,7] <- cmsy_input[14,7] * (1 - p)
cmsy_input[15,8] <- cmsy_input[15,8] * (1 + p)
cmsy_input[16,8] <- cmsy_input[16,8] * (1 - p)
# cmsy_input
rep_names <- c("rlow_up", "rlow_down", "rhi_up", "rhi_down",
"stblow_up", "stblow_down", "stbhi_up", "stbhi_down",
"intblow_up", "intblow_down", "intbhi_up", "intbhi_down",
"endblow_up", "endblow_down", "endbhi_up", "endbhi_down", "Orig")
# names for output structure
Rep2 <- c("rlow","rhi", "stblow", "stbhi",
"intblow", "intbhi", "endblow", "endbhi")
all_res <- data.frame(matrix(ncol=6,nrow=0,
dimnames=list(NULL, c("Rep", "B_BMSY_start", "B_BMSY_mid", "B_BMSY_end", "Stock", "trial"))))
for (j in 1:no.reps){
cmsy_output <- data.frame(Rep = rep_names, B_BMSY_start = NA,
B_BMSY_mid = NA, B_BMSY_end = NA)
for (k in 1:17){
temp <- cmsy2(year = data$year, catch = data$catch, resilience = NA,
r.low = cmsy_input[k,1], r.hi =cmsy_input[k,2],
stb.low = cmsy_input[k,3], stb.hi = cmsy_input[k,4],
int.yr = int.yr,
intb.low = cmsy_input[k,5], intb.hi = cmsy_input[k,6],
endb.low = cmsy_input[k,7], endb.hi = cmsy_input[k,8], verbose = T)
cmsy_output$B_BMSY_start[k] <- temp$ref_ts$bbmsy[[1]]
cmsy_output$B_BMSY_mid[k] <- temp$ref_ts$bbmsy[temp$ref_ts$year == int.yr]
cmsy_output$B_BMSY_end[k] <- temp$ref_ts$bbmsy[length(temp$ref_ts$bbmsy)]
}
cmsy_output$Stock <- stocklist[i]
### save raw output
save(cmsy_output, file = paste0("CMSY2/CMSY2_Sensitivity_", stocklist[i], "_rep", j, ".RData"))
#code to refine output and save
ups <- cmsy_output[c(1,3,5,7,9,11,13,15),]
ups$Rep <- Rep2
downs <- cmsy_output[c(2,4,6,8,10,12,14,16),]
downs$Rep <- Rep2
res <- ups
res$B_BMSY_start <- ups$B_BMSY_start - downs$B_BMSY_start
res$B_BMSY_mid <- ups$B_BMSY_mid - downs$B_BMSY_mid
res$B_BMSY_end <- ups$B_BMSY_end - downs$B_BMSY_end
res$trial <- j
all_res <- rbind(all_res, res)
}
##
save(all_res, file = paste0("CMSY2/CMSY2_Sensitivity_", stocklist[i], "_all.RData"))
all_res
}
close(pb)
stopCluster(cl)
save(all_stocks_res, file = paste0("CMSY2/CMSY2_Sensitivity_all_stocks.RData"))
| /Jacobian/jacobian_cmsy2_parallel.R | no_license | Hanqingpeng/DataLimited_CMSY_SPiCT | R | false | false | 7,056 | r | library(datalimited2)
library(reshape2)
library(ggplot2)
source ("jacobian_prior_funcs.r")
stocklist <- c("Cod6a", "Cod7ek",
"CodFaroe", "CodNS",
"Had6b" , "Had7bk" , "HadNS" ,
"Ple7a", "Ple7hk" , "Sol7a",
"Sol7fg" , "Sol7hk" , "Sol2024",
"Whg6a", "Whg7a" ,
"Whg7bk" , "WhgNS")
# set reps
no.reps <- 5
# set change for each prior
p= 0.1
## try to run in parallel
library(doSNOW)
cl <- makeSOCKcluster(8)
registerDoSNOW(cl)
pb <- txtProgressBar(max=length(stocklist), style=3)
progress <- function(n) setTxtProgressBar(pb, n)
opts <- list(progress=progress)
all_stocks_res <- foreach(i = 1:length(stocklist), .combine = rbind, .options.snow=opts) %dopar% {
library(datalimited2)
##for (i in stocklist){
## load the data for each stock - should do this in loop but naming variable
Cod6a.data = read.csv("../Data/Cod_6_a/Cod.6a.Data.csv")
Cod7ek.data = read.csv("../Data/Cod_7_ek/Cod.7ek.Data.csv")
CodFaroe.data = read.csv("../Data/Cod_Faroe/Cod.Faroe.Data.csv")
CodNS.data = read.csv("../Data/Cod_NS/Cod.NS.Data.csv")
Had6b.data = read.csv("../Data/Had_6_b/Had.6b.Data.csv")
Had7bk.data = read.csv("../Data/Had_7_bk/Had.7bk.Data.csv")
HadNS.data = read.csv("../Data/Had_NS/Had.NS.Data.csv")
Ple7a.data = read.csv("../Data/Ple_7_a/Ple.7a.Data.csv")
Ple7hk.data = read.csv("../Data/Ple_7_hk/Ple.7hk.Data.csv")
Sol7a.data = read.csv("../Data/Sol_7_a/Sol.7a.Data.csv")
Sol7fg.data = read.csv("../Data/Sol_7_fg/Sol.7fg.Data.csv")
Sol7hk.data = read.csv("../Data/Sol_7_hk/Sol.7hk.Data.csv")
Sol2024.data = read.csv("../Data/Sol_2024/Sol.2024.Data.csv")
Whg6a.data = read.csv("../Data/Whg_6_a/Whg.6a.Data.csv")
Whg7a.data = read.csv("../Data/Whg_7_a/Whg.7a.Data.csv")
Whg7bk.data = read.csv("../Data/Whg_7_bk/Whg.7bk.Data.csv")
WhgNS.data = read.csv("../Data/Whg_NS/Whg.NS.Data.csv")
print(paste("Working on", stocklist[i]))
data = get(paste0(stocklist[i], ".data"))
#### set up priors using the seperate functions ####
# Calculate 3-yr moving average (average of past 3 years)
ct.raw <- data$catch# / 1000
ct <- ma(data$catch)
# Identify number of years and start/end years
yr <- data$year # functions use this quantity
nyr <- length(yr)
start.yr <- min(yr)
end.yr <- max(yr)
# Prior information for resilience (r) of stock ---------------------------
# Can either classify as High, Medium, Low or Very low or manually set the boundaries
pb.resilience = "Medium"
## Lower r estimate
pb.r.low = NA
## Upper r estimate
pb.r.hi = NA
if (is.na(pb.r.low)) {if (pb.resilience == "High"){pb.r.low = 0.6; pb.r.hi = 1.5; start.r = c(0.6,1.5)}
else if (pb.resilience == "Medium"){pb.r.low = 0.2; pb.r.hi = 0.8; start.r = c(0.2,0.8)}
else if (pb.resilience == "Low"){pb.r.low = 0.05; pb.r.hi = 0.5; start.r = c(0.05,0.5)}
else if (pb.resilience == "Very low"){pb.r.low = 0.015; pb.r.hi = 0.1; start.r = c(0.015,0.1)}}
endb.hi<- endb.low <-intb.hi<- intb.low <-stb.hi<- stb.low <- NA
startbio <- startbio_prior(stb.low, stb.hi, start.yr)
int_params <- intbio_prior(intb.low, intb.hi, int.yr, start.yr, end.yr, startbio, yr, ct)
intbio <- int_params[[1]]
int.yr <- int_params[[2]]
endbio <- endbio_prior(endb.low, endb.hi, nyr, ct.raw, ct)
start.k <- k_prior(endbio, start.r, ct)
#intermediate year as a position in the time series
int.yr.num <- int.yr - start.yr
## start it where we start the main cmsy run
start_prior <- c(rl = pb.r.low, ru = pb.r.hi,
# kl = start.k[1], ku = start.k[2],
p0l = startbio[1], p0u = startbio[2],
pMl = intbio[1], pMu = intbio[2],
pTl = endbio[1], pTu = endbio[2])
#### set up the sensitivity options
cmsy_input <- matrix(data = start_prior, nrow = 17, ncol = 8, byrow = T)
cmsy_input[1,1] <- cmsy_input[1,1] * (1 + p)
cmsy_input[2,1] <- cmsy_input[2,1] * (1 - p)
cmsy_input[3,2] <- cmsy_input[3,2] * (1 + p)
cmsy_input[4,2] <- cmsy_input[4,2] * (1 - p)
cmsy_input[5,3] <- cmsy_input[5,3] * (1 + p)
cmsy_input[6,3] <- cmsy_input[6,3] * (1 - p)
cmsy_input[7,4] <- cmsy_input[7,4] * (1 + p)
cmsy_input[8,4] <- cmsy_input[8,4] * (1 - p)
cmsy_input[9,5] <- cmsy_input[9,5] * (1 + p)
cmsy_input[10,5] <- cmsy_input[10,5] * (1 - p)
cmsy_input[11,6] <- cmsy_input[11,6] * (1 + p)
cmsy_input[12,6] <- cmsy_input[12,6] * (1 - p)
cmsy_input[13,7] <- cmsy_input[13,7] * (1 + p)
cmsy_input[14,7] <- cmsy_input[14,7] * (1 - p)
cmsy_input[15,8] <- cmsy_input[15,8] * (1 + p)
cmsy_input[16,8] <- cmsy_input[16,8] * (1 - p)
# cmsy_input
rep_names <- c("rlow_up", "rlow_down", "rhi_up", "rhi_down",
"stblow_up", "stblow_down", "stbhi_up", "stbhi_down",
"intblow_up", "intblow_down", "intbhi_up", "intbhi_down",
"endblow_up", "endblow_down", "endbhi_up", "endbhi_down", "Orig")
# names for output structure
Rep2 <- c("rlow","rhi", "stblow", "stbhi",
"intblow", "intbhi", "endblow", "endbhi")
all_res <- data.frame(matrix(ncol=6,nrow=0,
dimnames=list(NULL, c("Rep", "B_BMSY_start", "B_BMSY_mid", "B_BMSY_end", "Stock", "trial"))))
for (j in 1:no.reps){
cmsy_output <- data.frame(Rep = rep_names, B_BMSY_start = NA,
B_BMSY_mid = NA, B_BMSY_end = NA)
for (k in 1:17){
temp <- cmsy2(year = data$year, catch = data$catch, resilience = NA,
r.low = cmsy_input[k,1], r.hi =cmsy_input[k,2],
stb.low = cmsy_input[k,3], stb.hi = cmsy_input[k,4],
int.yr = int.yr,
intb.low = cmsy_input[k,5], intb.hi = cmsy_input[k,6],
endb.low = cmsy_input[k,7], endb.hi = cmsy_input[k,8], verbose = T)
cmsy_output$B_BMSY_start[k] <- temp$ref_ts$bbmsy[[1]]
cmsy_output$B_BMSY_mid[k] <- temp$ref_ts$bbmsy[temp$ref_ts$year == int.yr]
cmsy_output$B_BMSY_end[k] <- temp$ref_ts$bbmsy[length(temp$ref_ts$bbmsy)]
}
cmsy_output$Stock <- stocklist[i]
### save raw output
save(cmsy_output, file = paste0("CMSY2/CMSY2_Sensitivity_", stocklist[i], "_rep", j, ".RData"))
#code to refine output and save
ups <- cmsy_output[c(1,3,5,7,9,11,13,15),]
ups$Rep <- Rep2
downs <- cmsy_output[c(2,4,6,8,10,12,14,16),]
downs$Rep <- Rep2
res <- ups
res$B_BMSY_start <- ups$B_BMSY_start - downs$B_BMSY_start
res$B_BMSY_mid <- ups$B_BMSY_mid - downs$B_BMSY_mid
res$B_BMSY_end <- ups$B_BMSY_end - downs$B_BMSY_end
res$trial <- j
all_res <- rbind(all_res, res)
}
##
save(all_res, file = paste0("CMSY2/CMSY2_Sensitivity_", stocklist[i], "_all.RData"))
all_res
}
close(pb)
stopCluster(cl)
save(all_stocks_res, file = paste0("CMSY2/CMSY2_Sensitivity_all_stocks.RData"))
|
#' Perform Secure-by-default or Woefully Insecure 'DNS' Queries
#'
#' Methods are provided to query 'Domain Name System' ('DNS') stub
#' and recursive resolvers for all 'DNS' resource record types using 'UDP',
#' TCP', and/or 'TLS' transport layers. 'DNS' query support is provided
#' by the 'getdns' (<getdnsapi.net>) C library.
#'
#'
#' - URL: <https://gitlab.com/hrbrmstr/clandnstine>
#' - BugReports: <https://gitlab.com/hrbrmstr/clandnstine/issues>
#'
#' @md
#' @name clandnstine
#' @docType package
#' @author Bob Rudis (bob@@rud.is)
#' @keywords internal
#' @import httr R6
#' @importFrom glue glue_data
#' @importFrom jsonlite fromJSON
#' @useDynLib clandnstine, .registration = TRUE
#' @importFrom Rcpp sourceCpp
NULL | /R/clandnstine-package.R | permissive | hrbrmstr/clandnstine | R | false | false | 726 | r | #' Perform Secure-by-default or Woefully Insecure 'DNS' Queries
#'
#' Methods are provided to query 'Domain Name System' ('DNS') stub
#' and recursive resolvers for all 'DNS' resource record types using 'UDP',
#' TCP', and/or 'TLS' transport layers. 'DNS' query support is provided
#' by the 'getdns' (<getdnsapi.net>) C library.
#'
#'
#' - URL: <https://gitlab.com/hrbrmstr/clandnstine>
#' - BugReports: <https://gitlab.com/hrbrmstr/clandnstine/issues>
#'
#' @md
#' @name clandnstine
#' @docType package
#' @author Bob Rudis (bob@@rud.is)
#' @keywords internal
#' @import httr R6
#' @importFrom glue glue_data
#' @importFrom jsonlite fromJSON
#' @useDynLib clandnstine, .registration = TRUE
#' @importFrom Rcpp sourceCpp
NULL |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% BufferedOutputStream.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{write.BufferedOutputStream}
\alias{write.BufferedOutputStream}
\alias{BufferedOutputStream.write}
\alias{write.BufferedOutputStream}
\alias{write,BufferedOutputStream-method}
\title{Writes one or more bytes to the output stream}
\usage{\method{write}{BufferedOutputStream}(this, b, off=1, len=length(b), ...)}
\description{
Writes one or more bytes to the output stream.
}
\value{
Returns nothing.
}
\author{Henrik Bengtsson (\url{http://www.braju.com/R/})}
\seealso{
For more information see \code{\link{BufferedOutputStream}}..
}
\keyword{internal}
\keyword{methods}
| /man/write.BufferedOutputStream.Rd | no_license | HenrikBengtsson/R.io | R | false | false | 922 | rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% BufferedOutputStream.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{write.BufferedOutputStream}
\alias{write.BufferedOutputStream}
\alias{BufferedOutputStream.write}
\alias{write.BufferedOutputStream}
\alias{write,BufferedOutputStream-method}
\title{Writes one or more bytes to the output stream}
\usage{\method{write}{BufferedOutputStream}(this, b, off=1, len=length(b), ...)}
\description{
Writes one or more bytes to the output stream.
}
\value{
Returns nothing.
}
\author{Henrik Bengtsson (\url{http://www.braju.com/R/})}
\seealso{
For more information see \code{\link{BufferedOutputStream}}..
}
\keyword{internal}
\keyword{methods}
|
#title:shot-data
#description:
#input:
#output:
library(ggplot2)
library(jpeg)
library(grid)
library(dplyr)
court_file <- "./images/nba-court.jpg"
court_image <- rasterGrob(
readJPEG(court_file),
width = unit(1, "npc"),
height = unit(1, "npc"))
thompson_shot_chart <- ggplot(data = thompson) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Klay Thompson (2016 season)') +
theme_minimal()
iguodala_shot_chart <- ggplot(data = iguodala) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Andre Iguodala (2016 season)') +
theme_minimal()
green_shot_chart <- ggplot(data = green) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Draymond Green (2016 season)') +
theme_minimal()
durant_shot_chart <- ggplot(data = durant) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Kevin Durant (2016 season)') +
theme_minimal()
curry_shot_chart <- ggplot(data = curry) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Stephen Curry (2016 season)') +
theme_minimal()
pdf(file = "./images/klay-thompson-shot-chart.pdf",width = 6.5, height = 5)
thompson_shot_chart
dev.off()
pdf(file = "./images/andre-iguodala-shot-chart.pdf",width = 6.5, height = 5)
iguodala_shot_chart
dev.off()
pdf(file = "./images/draymond-green-shot-chart.pdf",width = 6.5, height = 5)
green_shot_chart
dev.off()
pdf(file = "./images/kevin-durant-shot-chart.pdf",width = 6.5, height = 5)
durant_shot_chart
dev.off()
pdf(file = "./images/stephen-curry-shot-chart.pdf",width = 6.5, height = 5)
curry_shot_chart
dev.off()
curry <- mutate(curry,player="curry")
iguodala <- mutate(iguodala, player = 'iguodala')
green <- mutate(green,player="green")
durant <- mutate(durant,player="durant")
thompson <- mutate(thompson,player="thompson")
total <- rbind(curry,iguodala,green,durant,thompson)
write.csv(x=total, file = "./data/shots-data.csv")
pdf(file = "./images/gsw-shot-charts.pdf",width = 6.5, height = 5)
gsw_shot_chart <- ggplot(data=total, aes(x=x,y=y)) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(color=shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: GSW Shot Charts (2016 season)') +
theme_minimal()+
facet_wrap(.~player)
gsw_shot_chart
dev.off()
png(filename = "./images/gsw-shot-charts.png",width = 800,height = 700)
gsw_shot_chart
dev.off()
| /workout01/code/make-shot-chart-script.R | no_license | stat133-sp19/hw-stat133-snowman36 | R | false | false | 2,844 | r | #title:shot-data
#description:
#input:
#output:
library(ggplot2)
library(jpeg)
library(grid)
library(dplyr)
court_file <- "./images/nba-court.jpg"
court_image <- rasterGrob(
readJPEG(court_file),
width = unit(1, "npc"),
height = unit(1, "npc"))
thompson_shot_chart <- ggplot(data = thompson) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Klay Thompson (2016 season)') +
theme_minimal()
iguodala_shot_chart <- ggplot(data = iguodala) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Andre Iguodala (2016 season)') +
theme_minimal()
green_shot_chart <- ggplot(data = green) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Draymond Green (2016 season)') +
theme_minimal()
durant_shot_chart <- ggplot(data = durant) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Kevin Durant (2016 season)') +
theme_minimal()
curry_shot_chart <- ggplot(data = curry) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Stephen Curry (2016 season)') +
theme_minimal()
pdf(file = "./images/klay-thompson-shot-chart.pdf",width = 6.5, height = 5)
thompson_shot_chart
dev.off()
pdf(file = "./images/andre-iguodala-shot-chart.pdf",width = 6.5, height = 5)
iguodala_shot_chart
dev.off()
pdf(file = "./images/draymond-green-shot-chart.pdf",width = 6.5, height = 5)
green_shot_chart
dev.off()
pdf(file = "./images/kevin-durant-shot-chart.pdf",width = 6.5, height = 5)
durant_shot_chart
dev.off()
pdf(file = "./images/stephen-curry-shot-chart.pdf",width = 6.5, height = 5)
curry_shot_chart
dev.off()
curry <- mutate(curry,player="curry")
iguodala <- mutate(iguodala, player = 'iguodala')
green <- mutate(green,player="green")
durant <- mutate(durant,player="durant")
thompson <- mutate(thompson,player="thompson")
total <- rbind(curry,iguodala,green,durant,thompson)
write.csv(x=total, file = "./data/shots-data.csv")
pdf(file = "./images/gsw-shot-charts.pdf",width = 6.5, height = 5)
gsw_shot_chart <- ggplot(data=total, aes(x=x,y=y)) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(color=shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: GSW Shot Charts (2016 season)') +
theme_minimal()+
facet_wrap(.~player)
gsw_shot_chart
dev.off()
png(filename = "./images/gsw-shot-charts.png",width = 800,height = 700)
gsw_shot_chart
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{mutation_categories}
\alias{mutation_categories}
\title{trinucleotides mutation categories}
\format{
matrix of 96 trinucleotides mutation categories
}
\usage{
data(mutation_categories)
}
\value{
matrix of 96 trinucleotides mutation categories
}
\description{
96 trinucleotides mutation categories
}
| /man/mutation_categories.Rd | permissive | danro9685/SparseSignatures | R | false | true | 405 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{mutation_categories}
\alias{mutation_categories}
\title{trinucleotides mutation categories}
\format{
matrix of 96 trinucleotides mutation categories
}
\usage{
data(mutation_categories)
}
\value{
matrix of 96 trinucleotides mutation categories
}
\description{
96 trinucleotides mutation categories
}
|
/Covid_Analysis/covid_analysis.R | no_license | DonG96-Vienna/Data_Analysis | R | false | false | 1,808 | r | ||
#####################################
## Functions for building networks
#####################################
build_network <- function(graph, build_fxn, thin = 10, n = 500, ...){
## Function for building networks
## Each iteration of building increases the graph size by 1
## Parameters
## graph must begin with the desired starting graph
## build_fxn must be a function that takes in a graph and returns the edges that new node connects to
## thin describes how many iterations should pass before the graph is saved
## n is the final desired size of the graph
## ... describes all parameters that can be sent to the build_function
## Output
## A list of graphs generated from the creation process
init_graph_length <- length(V(g))
if(init_graph_length == 0){
stop("Must start with a graph that has at least one node")
}
## graph_list of length that accounts for the thinning
## Basically want to save every 10 iterations, but not necessarily from starting point (so use floor)
## e.g. start with 3 nodes, and want 500, saves on 10, 20, 30 ... instead of 3, 13, 23 ...
graph_list <- vector(mode = "list", length = floor( (n - init_graph_length)/thin))
for(node_index in (init_graph_length+1):n){
## Adds one node at a time, based on the specified function
## Sends the network from the previous iteration and any specific parameters
new_edges <- build_fxn(g = graph, ...)
## Adds vertex to graph, with new edges specified
graph <- graph %>% add_vertices(nv = 1) %>%
add_edges(c(rbind(rep(node_index, length(new_edges)), new_edges)))
## Saves the result if you're on the proper step
if(node_index %% thin == 0){
graph_list[[node_index/thin]] <- graph
}
}
return(graph_list)
}
cluster_build <- function(g, alpha = 2, edges = 2){
## Clustering building algorithm based on bagrow/brockmann paper
## alpha - exponent for clustering weighting
## edges - total number of edges to attach for each new node
sample(x = V(g),
size = edges,
prob = transitivity(g, type = "local")^alpha,
replace=FALSE)
}
degree_build <- function(g, alpha = 2, edges = 2){
## Clustering building algorithm based on bagrow/brockmann paper
## alpha - exponent for degree weighting
## edges - total number of edges to attach for each new node
sample(x = V(g),
size = edges,
prob = degree(g)^alpha,
replace=FALSE)
}
| /R/build_network_fxns.R | no_license | sjfox/network_evolution | R | false | false | 2,503 | r | #####################################
## Functions for building networks
#####################################
build_network <- function(graph, build_fxn, thin = 10, n = 500, ...){
## Function for building networks
## Each iteration of building increases the graph size by 1
## Parameters
## graph must begin with the desired starting graph
## build_fxn must be a function that takes in a graph and returns the edges that new node connects to
## thin describes how many iterations should pass before the graph is saved
## n is the final desired size of the graph
## ... describes all parameters that can be sent to the build_function
## Output
## A list of graphs generated from the creation process
init_graph_length <- length(V(g))
if(init_graph_length == 0){
stop("Must start with a graph that has at least one node")
}
## graph_list of length that accounts for the thinning
## Basically want to save every 10 iterations, but not necessarily from starting point (so use floor)
## e.g. start with 3 nodes, and want 500, saves on 10, 20, 30 ... instead of 3, 13, 23 ...
graph_list <- vector(mode = "list", length = floor( (n - init_graph_length)/thin))
for(node_index in (init_graph_length+1):n){
## Adds one node at a time, based on the specified function
## Sends the network from the previous iteration and any specific parameters
new_edges <- build_fxn(g = graph, ...)
## Adds vertex to graph, with new edges specified
graph <- graph %>% add_vertices(nv = 1) %>%
add_edges(c(rbind(rep(node_index, length(new_edges)), new_edges)))
## Saves the result if you're on the proper step
if(node_index %% thin == 0){
graph_list[[node_index/thin]] <- graph
}
}
return(graph_list)
}
cluster_build <- function(g, alpha = 2, edges = 2){
## Clustering building algorithm based on bagrow/brockmann paper
## alpha - exponent for clustering weighting
## edges - total number of edges to attach for each new node
sample(x = V(g),
size = edges,
prob = transitivity(g, type = "local")^alpha,
replace=FALSE)
}
degree_build <- function(g, alpha = 2, edges = 2){
## Clustering building algorithm based on bagrow/brockmann paper
## alpha - exponent for degree weighting
## edges - total number of edges to attach for each new node
sample(x = V(g),
size = edges,
prob = degree(g)^alpha,
replace=FALSE)
}
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129326L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result) | /IntervalSurgeon/inst/testfiles/rcpp_pile/AFL_rcpp_pile/rcpp_pile_valgrind_files/1609874909-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 729 | r | testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129326L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result) |
FILE_NAME <- "3150 DATASET.xlsx"
open_excel <- function() {
return (read_excel(file.choose(), skip=6))
}
main <- function() {
library("readxl")
people <- open_excel()
#seperate the male and female data
females <- people[which(people["Sex"] == 'F'),]
males <- people[which(people["Sex"] == 'M'),]
# View(males,"Male DATA")
# View(females, "Female DATA")
}
| /displayDF.R | no_license | zepthro/english3150_dataset | R | false | false | 412 | r |
FILE_NAME <- "3150 DATASET.xlsx"
open_excel <- function() {
return (read_excel(file.choose(), skip=6))
}
main <- function() {
library("readxl")
people <- open_excel()
#seperate the male and female data
females <- people[which(people["Sex"] == 'F'),]
males <- people[which(people["Sex"] == 'M'),]
# View(males,"Male DATA")
# View(females, "Female DATA")
}
|
## Wine ###PCA##
wine<-read.csv("C://Users//lenovo//Desktop//R studio//Data Science Assignments//PCA//wine.csv")
wine1<-scale(wine[,2:14])
PCA<-princomp(wine1)
summary(PCA)
PCA$scores
plot(PCA$scores,col='red',cex=0.2)
text(PCA$scores,labels = c(1:178),cex = 0.5) | /PCA.R | no_license | banthiyaprince/Data-Science | R | false | false | 270 | r | ## Wine ###PCA##
wine<-read.csv("C://Users//lenovo//Desktop//R studio//Data Science Assignments//PCA//wine.csv")
wine1<-scale(wine[,2:14])
PCA<-princomp(wine1)
summary(PCA)
PCA$scores
plot(PCA$scores,col='red',cex=0.2)
text(PCA$scores,labels = c(1:178),cex = 0.5) |
define_output_3d_viewer <- function(
outputId, title, surfaces = 'pial', multiple_subject = F,
message = 'Generate 3D Viewer ',
height = NULL, width = 12, order = 0, additional_ui = NULL,
hide_btn = FALSE, ...
){
# Generate reactives
output_call = paste0(outputId, '_widget')
output_btn = paste0(outputId, '_btn')
output_new = paste0(outputId, '_new')
output_fun = paste0(outputId, '_fun')
additional_ui = substitute(additional_ui)
quo = rlang::quo({
...local_env = new.env()
assign(!!outputId, function(){
clicked = shiny::isolate(input[[!!output_btn]])
if( !!hide_btn ){
btn = NULL
}else{
btn = tagList(htmltools::a(
id = ns(!!output_btn),
href = '#',
class = "action-button",
!!message
),
' | ')
}
if(is.null(!!height)){
client_size = get_client_size()
client_height = client_size$available_size[[2]] - 500
height = sprintf('%.0fpx', client_height)
}else{
height = !!height
}
htmltools::tagList(
htmltools::div(
btn,
htmltools::a(
id = ns(!!output_new),
href = '#',
class = "action-button",
' Open Viewer in a New Window '
),
' | ',
htmltools::a(
href = 'https://github.com/dipterix/threeBrain/blob/master/shortcuts.md',
target = '_blank', ' Keyboard Shortcuts ', rave::shiny_icons$external_link
),
eval(!!additional_ui)
),
htmltools::div(
style = 'margin: 0 -10px -10px -10px',
threeBrain::threejsBrainOutput(ns(!!output_call), height = height)
)
)
}, envir = environment())
local({
`%?<-%` <- dipsaus::`%?<-%`
input = getDefaultReactiveInput()
output = getDefaultReactiveOutput()
session = getDefaultReactiveDomain()
.env = environment()
.env$local_signal = 0
observeEvent(input[[!!output_new]], {
cat2('Opening a side window...')
if(!is.null(...local_env$widget)){
# generate url
session = getDefaultReactiveDomain()
rave_id = session$userData$rave_id
if(is.null(rave_id)){ rave_id = '' }
token = session$userData$token
if(is.null(token)){ token = '' }
globalId = ns(!!output_call)
query_str = list(
type = '3dviewer',
globalId = htmltools::urlEncodePath(globalId),
sessionId = htmltools::urlEncodePath(rave_id),
token = token
)
url = paste(sprintf('%s=%s', names(query_str), as.vector(query_str)), collapse = '&')
shinyjs::runjs(sprintf('window.open("/?%s");', url))
}
})
render_func = function( proxy ){
# Monitor subject change. If changed, then refresh!
if(!monitor_subject_change()){
return(NULL)
}
local_signal = input[[!!output_btn]]
render_value = length(local_signal) && (local_signal != 0)
# if( render_value ){
# .env$local_signal = local_signal
# }
# get render function
f = get0(!!output_fun, envir = ..runtime_env, ifnotfound = function(...){
dipsaus::cat2('3D Viewer', !!outputId, 'cannot find function', !!output_fun, level = 'INFO')
})
# get client size
client_size = get_client_size()
if(!is.null(client_size)){
side_width = min(ceiling((client_size$available_size[[2]] - 300) / 3), 300)
}else{
side_width = 250
}
...local_env$widget = NULL
re = f(render_value, side_width, ...local_env, proxy)
if(is.null(...local_env$widget)){
...local_env$widget = re
}
re
}
# Because monitor_subject_change needs execenv to be ready
eval_when_ready(function(...){
# Register render function
proxy <- threeBrain::brain_proxy(!!output_call)
output[[!!output_call]] <- threeBrain::renderBrain({
render_func( proxy )
})
})
# Register cross-session function so that other sessions can register the same output widget
session$userData$cross_session_funcs %?<-% list()
# ns must be defined, but in get_module(..., local=T) will raise error
# because we are not in shiny environment
ns %?<-% function(x){x}
session$userData$cross_session_funcs[[ns(!!output_call)]] = render_func
})
})
# generate output
df = rlang::quo({
define_output(
definition = customizedUI(!!outputId),
title = !!title,
width = !!width,
order = !!order
)
# https://github.com/r-lib/rlang/issues/772
# This seems to be an issue of rlang
# load_scripts(rlang::quo({!!quo})) will throw error of (Error: `arg` must be a symbol)
load_scripts(rlang::quo(!!quo))
})
eval(rlang::quo_squash(df), envir = parent.frame())
# evaluate
invisible(quo)
}
# define_output_3d_viewer <- function(
# outputId, title, surfaces = 'pial', multiple_subject = F,
# message = 'Generate 3D Viewer ',
# height = NULL, width = 12, order = 0, additional_ui = NULL,
# hide_btn = FALSE, ...
# ){
#
# # Generate reactives
# output_call = paste0(outputId, '_widget')
# output_btn = paste0(outputId, '_btn')
# output_new = paste0(outputId, '_new')
# output_fun = paste0(outputId, '_fun')
#
# additional_ui = substitute(additional_ui)
#
#
#
#
# quo = rlang::quo({
#
# ...local_env = new.env()
#
# assign(!!outputId, function(){
# clicked = shiny::isolate(input[[!!output_btn]])
#
# if( !!hide_btn ){
# btn = NULL
# }else{
# btn = tagList(htmltools::a(
# id = ns(!!output_btn),
# href = '#',
# class = "action-button",
# !!message
# ),
# ' | ')
# }
#
# if(is.null(!!height)){
# client_size = get_client_size()
# client_height = client_size$available_size[[2]] - 200
# height = sprintf('%.0fpx', client_height)
# }else{
# height = !!height
# }
#
#
# htmltools::tagList(
# htmltools::div(
# btn,
# htmltools::a(
# id = ns(!!output_new),
# href = '#',
# class = "action-button",
# ' Open Viewer in a New Window '
# ),
# ' | ',
# htmltools::a(
# href = 'https://github.com/dipterix/threeBrain/blob/dev/shortcuts.md',
# target = '_blank', ' Keyboard Shortcuts ', rave::shiny_icons$external_link
# ),
# eval(!!additional_ui)
# ),
# htmltools::div(
# style = 'margin: 0 -10px -10px -10px',
# threeBrain::threejsBrainOutput(ns(!!output_call), height = height)
# )
# )
# }, envir = environment())
# local({
# `%?<-%` <- dipsaus::`%?<-%`
# input = getDefaultReactiveInput()
# output = getDefaultReactiveOutput()
# session = getDefaultReactiveDomain()
# .env = environment()
# .env$local_signal = 0
#
# observeEvent(input[[!!output_new]], {
#
# cat2('Opening a side window...')
#
# if(!is.null(...local_env$widget)){
#
# # tryCatch({
# # widget = ...local_env$widget
# #
# # rave::send_to_daemon({
# # widget
# # }, type = 'threeBrain', outputId = ns(!!outputId),
# # save = c('widget'))
# # }, error = function(e){
# # showNotification(p('Failed to launch the side viewer. Error message: ', e), type = 'error')
# # })
#
# # generate url
# session = getDefaultReactiveDomain()
# rave_id = session$userData$rave_id
# if(is.null(rave_id)){ rave_id = '' }
# token = session$userData$token
# if(is.null(token)){ token = '' }
# globalId = ns(!!output_call)
#
# query_str = list(
# type = '3dviewer',
# globalId = htmltools::urlEncodePath(globalId),
# sessionId = htmltools::urlEncodePath(rave_id),
# token = token
# )
# url = paste(sprintf('%s=%s', names(query_str), as.vector(query_str)), collapse = '&')
#
# shinyjs::runjs(sprintf('window.open("/?%s");', url))
# }
#
# })
#
# render_func = function(){
# # threeBrain::renderBrain({
#
# # Monitor subject change. If changed, then refresh!
# if(!monitor_subject_change()){
# return(NULL)
# }
# local_signal = input[[!!output_btn]]
# # render_value = length(local_signal) && (local_signal > .env$local_signal)
# render_value = length(local_signal) && (local_signal != 0)
# if( render_value ){
# .env$local_signal = local_signal
# }
#
# # get render function
# f = get0(!!output_fun, envir = ..runtime_env, ifnotfound = function(...){
# cat2('3D Viewer', !!outputId, 'cannot find function', !!output_fun, level = 'INFO')
# })
#
# # get client size
# client_size = get_client_size()
# if(!is.null(client_size)){
# side_width = min(ceiling((client_size$available_size[[2]] - 300) / 3), 300)
# }else{
# side_width = 250
# }
# ...local_env$widget = NULL
# re = f(render_value, side_width, ...local_env)
# if(is.null(...local_env$widget)){
# ...local_env$widget = re
# }
# re
# #
# # brain = rave::rave_brain2(subject = subject, surfaces = !!surfaces)
# #
# # shiny::validate(
# # shiny::need(!is.null(brain), message = 'Cannot find surface/volume files')
# # )
# #
# # re = brain
# #
# #
# #
# # # Render function
# # if( length(local_signal) && local_signal > .env$local_signal ){
# # .env$local_signal = local_signal
# # f = get0(!!output_fun, envir = ..runtime_env, ifnotfound = function(...){
# # rutabaga::cat2('3D Viewer', !!outputId, 'cannot find function', !!output_fun, level = 'INFO')
# # })
# #
# # tryCatch({
# # re = f(brain)
# # }, error = function(e){
# # rave::logger(e, level = 'ERROR')
# # })
# #
# # }else{
# # ...local_env$widget = re$plot()
# # return(re$plot(side_display = FALSE))
# # }
# #
# # if('htmlwidget' %in% class(re)){
# # # User called $view() with additional params, directly call the widget
# # ...local_env$widget = re
# # re
# # }else if('R6' %in% class(re)){
# # # User just returned brain object
# # ...local_env$widget = re$plot()
# # re$plot(side_display = FALSE)
# # }else{
# # # User returned nothing
# # ...local_env$widget = brain$plot()
# # brain$plot(side_display = FALSE)
# # }
#
#
# # })
# }
#
# # Because monitor_subject_change needs execenv to be ready
# eval_when_ready(function(...){
# # Register render function
# output[[!!output_call]] <- threeBrain::renderBrain({
# render_func()
# })
# })
#
#
# # Register cross-session function so that other sessions can register the same output widget
# session$userData$cross_session_funcs %?<-% list()
# # ns must be defined, but in get_module(..., local=T) will raise error
# # because we are not in shiny environment
# ns %?<-% function(x){x}
# session$userData$cross_session_funcs[[ns(!!output_call)]] = render_func
# })
# })
#
# # generate output
# df = rlang::quo({
# define_output(
# definition = customizedUI(!!outputId),
# title = !!title,
# width = !!width,
# order = !!order
# )
#
# # https://github.com/r-lib/rlang/issues/772
# # This seems to be an issue of rlang
# # load_scripts(rlang::quo({!!quo})) will throw error of (Error: `arg` must be a symbol)
# load_scripts(rlang::quo(!!quo))
# })
# eval(rlang::quo_squash(df), envir = parent.frame())
# # evaluate
#
# invisible(quo)
#
# }
| /inst/tools/output_widgets.R | permissive | beauchamplab/ravebuiltins | R | false | false | 12,983 | r | define_output_3d_viewer <- function(
outputId, title, surfaces = 'pial', multiple_subject = F,
message = 'Generate 3D Viewer ',
height = NULL, width = 12, order = 0, additional_ui = NULL,
hide_btn = FALSE, ...
){
# Generate reactives
output_call = paste0(outputId, '_widget')
output_btn = paste0(outputId, '_btn')
output_new = paste0(outputId, '_new')
output_fun = paste0(outputId, '_fun')
additional_ui = substitute(additional_ui)
quo = rlang::quo({
...local_env = new.env()
assign(!!outputId, function(){
clicked = shiny::isolate(input[[!!output_btn]])
if( !!hide_btn ){
btn = NULL
}else{
btn = tagList(htmltools::a(
id = ns(!!output_btn),
href = '#',
class = "action-button",
!!message
),
' | ')
}
if(is.null(!!height)){
client_size = get_client_size()
client_height = client_size$available_size[[2]] - 500
height = sprintf('%.0fpx', client_height)
}else{
height = !!height
}
htmltools::tagList(
htmltools::div(
btn,
htmltools::a(
id = ns(!!output_new),
href = '#',
class = "action-button",
' Open Viewer in a New Window '
),
' | ',
htmltools::a(
href = 'https://github.com/dipterix/threeBrain/blob/master/shortcuts.md',
target = '_blank', ' Keyboard Shortcuts ', rave::shiny_icons$external_link
),
eval(!!additional_ui)
),
htmltools::div(
style = 'margin: 0 -10px -10px -10px',
threeBrain::threejsBrainOutput(ns(!!output_call), height = height)
)
)
}, envir = environment())
local({
`%?<-%` <- dipsaus::`%?<-%`
input = getDefaultReactiveInput()
output = getDefaultReactiveOutput()
session = getDefaultReactiveDomain()
.env = environment()
.env$local_signal = 0
observeEvent(input[[!!output_new]], {
cat2('Opening a side window...')
if(!is.null(...local_env$widget)){
# generate url
session = getDefaultReactiveDomain()
rave_id = session$userData$rave_id
if(is.null(rave_id)){ rave_id = '' }
token = session$userData$token
if(is.null(token)){ token = '' }
globalId = ns(!!output_call)
query_str = list(
type = '3dviewer',
globalId = htmltools::urlEncodePath(globalId),
sessionId = htmltools::urlEncodePath(rave_id),
token = token
)
url = paste(sprintf('%s=%s', names(query_str), as.vector(query_str)), collapse = '&')
shinyjs::runjs(sprintf('window.open("/?%s");', url))
}
})
render_func = function( proxy ){
# Monitor subject change. If changed, then refresh!
if(!monitor_subject_change()){
return(NULL)
}
local_signal = input[[!!output_btn]]
render_value = length(local_signal) && (local_signal != 0)
# if( render_value ){
# .env$local_signal = local_signal
# }
# get render function
f = get0(!!output_fun, envir = ..runtime_env, ifnotfound = function(...){
dipsaus::cat2('3D Viewer', !!outputId, 'cannot find function', !!output_fun, level = 'INFO')
})
# get client size
client_size = get_client_size()
if(!is.null(client_size)){
side_width = min(ceiling((client_size$available_size[[2]] - 300) / 3), 300)
}else{
side_width = 250
}
...local_env$widget = NULL
re = f(render_value, side_width, ...local_env, proxy)
if(is.null(...local_env$widget)){
...local_env$widget = re
}
re
}
# Because monitor_subject_change needs execenv to be ready
eval_when_ready(function(...){
# Register render function
proxy <- threeBrain::brain_proxy(!!output_call)
output[[!!output_call]] <- threeBrain::renderBrain({
render_func( proxy )
})
})
# Register cross-session function so that other sessions can register the same output widget
session$userData$cross_session_funcs %?<-% list()
# ns must be defined, but in get_module(..., local=T) will raise error
# because we are not in shiny environment
ns %?<-% function(x){x}
session$userData$cross_session_funcs[[ns(!!output_call)]] = render_func
})
})
# generate output
df = rlang::quo({
define_output(
definition = customizedUI(!!outputId),
title = !!title,
width = !!width,
order = !!order
)
# https://github.com/r-lib/rlang/issues/772
# This seems to be an issue of rlang
# load_scripts(rlang::quo({!!quo})) will throw error of (Error: `arg` must be a symbol)
load_scripts(rlang::quo(!!quo))
})
eval(rlang::quo_squash(df), envir = parent.frame())
# evaluate
invisible(quo)
}
# define_output_3d_viewer <- function(
# outputId, title, surfaces = 'pial', multiple_subject = F,
# message = 'Generate 3D Viewer ',
# height = NULL, width = 12, order = 0, additional_ui = NULL,
# hide_btn = FALSE, ...
# ){
#
# # Generate reactives
# output_call = paste0(outputId, '_widget')
# output_btn = paste0(outputId, '_btn')
# output_new = paste0(outputId, '_new')
# output_fun = paste0(outputId, '_fun')
#
# additional_ui = substitute(additional_ui)
#
#
#
#
# quo = rlang::quo({
#
# ...local_env = new.env()
#
# assign(!!outputId, function(){
# clicked = shiny::isolate(input[[!!output_btn]])
#
# if( !!hide_btn ){
# btn = NULL
# }else{
# btn = tagList(htmltools::a(
# id = ns(!!output_btn),
# href = '#',
# class = "action-button",
# !!message
# ),
# ' | ')
# }
#
# if(is.null(!!height)){
# client_size = get_client_size()
# client_height = client_size$available_size[[2]] - 200
# height = sprintf('%.0fpx', client_height)
# }else{
# height = !!height
# }
#
#
# htmltools::tagList(
# htmltools::div(
# btn,
# htmltools::a(
# id = ns(!!output_new),
# href = '#',
# class = "action-button",
# ' Open Viewer in a New Window '
# ),
# ' | ',
# htmltools::a(
# href = 'https://github.com/dipterix/threeBrain/blob/dev/shortcuts.md',
# target = '_blank', ' Keyboard Shortcuts ', rave::shiny_icons$external_link
# ),
# eval(!!additional_ui)
# ),
# htmltools::div(
# style = 'margin: 0 -10px -10px -10px',
# threeBrain::threejsBrainOutput(ns(!!output_call), height = height)
# )
# )
# }, envir = environment())
# local({
# `%?<-%` <- dipsaus::`%?<-%`
# input = getDefaultReactiveInput()
# output = getDefaultReactiveOutput()
# session = getDefaultReactiveDomain()
# .env = environment()
# .env$local_signal = 0
#
# observeEvent(input[[!!output_new]], {
#
# cat2('Opening a side window...')
#
# if(!is.null(...local_env$widget)){
#
# # tryCatch({
# # widget = ...local_env$widget
# #
# # rave::send_to_daemon({
# # widget
# # }, type = 'threeBrain', outputId = ns(!!outputId),
# # save = c('widget'))
# # }, error = function(e){
# # showNotification(p('Failed to launch the side viewer. Error message: ', e), type = 'error')
# # })
#
# # generate url
# session = getDefaultReactiveDomain()
# rave_id = session$userData$rave_id
# if(is.null(rave_id)){ rave_id = '' }
# token = session$userData$token
# if(is.null(token)){ token = '' }
# globalId = ns(!!output_call)
#
# query_str = list(
# type = '3dviewer',
# globalId = htmltools::urlEncodePath(globalId),
# sessionId = htmltools::urlEncodePath(rave_id),
# token = token
# )
# url = paste(sprintf('%s=%s', names(query_str), as.vector(query_str)), collapse = '&')
#
# shinyjs::runjs(sprintf('window.open("/?%s");', url))
# }
#
# })
#
# render_func = function(){
# # threeBrain::renderBrain({
#
# # Monitor subject change. If changed, then refresh!
# if(!monitor_subject_change()){
# return(NULL)
# }
# local_signal = input[[!!output_btn]]
# # render_value = length(local_signal) && (local_signal > .env$local_signal)
# render_value = length(local_signal) && (local_signal != 0)
# if( render_value ){
# .env$local_signal = local_signal
# }
#
# # get render function
# f = get0(!!output_fun, envir = ..runtime_env, ifnotfound = function(...){
# cat2('3D Viewer', !!outputId, 'cannot find function', !!output_fun, level = 'INFO')
# })
#
# # get client size
# client_size = get_client_size()
# if(!is.null(client_size)){
# side_width = min(ceiling((client_size$available_size[[2]] - 300) / 3), 300)
# }else{
# side_width = 250
# }
# ...local_env$widget = NULL
# re = f(render_value, side_width, ...local_env)
# if(is.null(...local_env$widget)){
# ...local_env$widget = re
# }
# re
# #
# # brain = rave::rave_brain2(subject = subject, surfaces = !!surfaces)
# #
# # shiny::validate(
# # shiny::need(!is.null(brain), message = 'Cannot find surface/volume files')
# # )
# #
# # re = brain
# #
# #
# #
# # # Render function
# # if( length(local_signal) && local_signal > .env$local_signal ){
# # .env$local_signal = local_signal
# # f = get0(!!output_fun, envir = ..runtime_env, ifnotfound = function(...){
# # rutabaga::cat2('3D Viewer', !!outputId, 'cannot find function', !!output_fun, level = 'INFO')
# # })
# #
# # tryCatch({
# # re = f(brain)
# # }, error = function(e){
# # rave::logger(e, level = 'ERROR')
# # })
# #
# # }else{
# # ...local_env$widget = re$plot()
# # return(re$plot(side_display = FALSE))
# # }
# #
# # if('htmlwidget' %in% class(re)){
# # # User called $view() with additional params, directly call the widget
# # ...local_env$widget = re
# # re
# # }else if('R6' %in% class(re)){
# # # User just returned brain object
# # ...local_env$widget = re$plot()
# # re$plot(side_display = FALSE)
# # }else{
# # # User returned nothing
# # ...local_env$widget = brain$plot()
# # brain$plot(side_display = FALSE)
# # }
#
#
# # })
# }
#
# # Because monitor_subject_change needs execenv to be ready
# eval_when_ready(function(...){
# # Register render function
# output[[!!output_call]] <- threeBrain::renderBrain({
# render_func()
# })
# })
#
#
# # Register cross-session function so that other sessions can register the same output widget
# session$userData$cross_session_funcs %?<-% list()
# # ns must be defined, but in get_module(..., local=T) will raise error
# # because we are not in shiny environment
# ns %?<-% function(x){x}
# session$userData$cross_session_funcs[[ns(!!output_call)]] = render_func
# })
# })
#
# # generate output
# df = rlang::quo({
# define_output(
# definition = customizedUI(!!outputId),
# title = !!title,
# width = !!width,
# order = !!order
# )
#
# # https://github.com/r-lib/rlang/issues/772
# # This seems to be an issue of rlang
# # load_scripts(rlang::quo({!!quo})) will throw error of (Error: `arg` must be a symbol)
# load_scripts(rlang::quo(!!quo))
# })
# eval(rlang::quo_squash(df), envir = parent.frame())
# # evaluate
#
# invisible(quo)
#
# }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readgraduates.R
\name{readgraduates}
\alias{readgraduates}
\title{Read Graduates}
\usage{
readgraduates( year )
}
\arguments{
\item{year}{Sets the year of graduates to be scanned}
}
\value{
A list of names of graduates from \code{year}
}
\description{
Scan for the names of graduates and create a list of names
}
| /man/readgraduates.Rd | no_license | chriswu1996/williamsgraduates | R | false | true | 408 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readgraduates.R
\name{readgraduates}
\alias{readgraduates}
\title{Read Graduates}
\usage{
readgraduates( year )
}
\arguments{
\item{year}{Sets the year of graduates to be scanned}
}
\value{
A list of names of graduates from \code{year}
}
\description{
Scan for the names of graduates and create a list of names
}
|
library(DBI)
library(RMySQL)
library(jsonlite)
id = fromJSON("/home/wayne/R/common_code/keys.json")$id
pw = fromJSON("/home/wayne/R/common_code/keys.json")$pw
con <- dbConnect(MySQL(), user=(id), password=(pw),
dbname="buster", host="192.168.1.21") | /common_code/MySQL-buster-con.R | no_license | cetanhota/R-Code | R | false | false | 268 | r | library(DBI)
library(RMySQL)
library(jsonlite)
id = fromJSON("/home/wayne/R/common_code/keys.json")$id
pw = fromJSON("/home/wayne/R/common_code/keys.json")$pw
con <- dbConnect(MySQL(), user=(id), password=(pw),
dbname="buster", host="192.168.1.21") |
## Matrix inversion is usually a costly computation and there may be some benefit to caching
## the inverse of a matrix rather than computing it repeatedly. Specifically, the
## makeCacheMatrix and cacheSolve functions can be used to cache the inverse of any matrix.
## Underlying assumption: the matrix supplied is always invertible.
## The makeCacheMatrix creates a special "vector" or a list containing a function.
## Specifically, it creates a special "matrix" object that can cache its inverse.
## This function will do the following:
## 1. Set the value of the vector;
## 2. Get the value of the vector;
## 3. Set the value of the inverse of the matrix; and
## 4. Get the value of the inverse of the matrix.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inv) inv <<- inverse
getinverse <- function() inv
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve computes the inverse of the special "matrix" returned by the function
## above. Assuming the inverse has already been calculated and the matrix has not changed,
## then this function should be able to retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
# If inverse has already been calculated
if (!is.null(inv)) {
#gets
message("getting cached data")
return(inv)
}
#
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | jmaeacielo/ProgrammingAssignment2 | R | false | false | 1,630 | r | ## Matrix inversion is usually a costly computation and there may be some benefit to caching
## the inverse of a matrix rather than computing it repeatedly. Specifically, the
## makeCacheMatrix and cacheSolve functions can be used to cache the inverse of any matrix.
## Underlying assumption: the matrix supplied is always invertible.
## The makeCacheMatrix creates a special "vector" or a list containing a function.
## Specifically, it creates a special "matrix" object that can cache its inverse.
## This function will do the following:
## 1. Set the value of the vector;
## 2. Get the value of the vector;
## 3. Set the value of the inverse of the matrix; and
## 4. Get the value of the inverse of the matrix.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inv) inv <<- inverse
getinverse <- function() inv
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve computes the inverse of the special "matrix" returned by the function
## above. Assuming the inverse has already been calculated and the matrix has not changed,
## then this function should be able to retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
# If inverse has already been calculated
if (!is.null(inv)) {
#gets
message("getting cached data")
return(inv)
}
#
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
|
\documentclass[a4paper, 11pt]{report}
\usepackage[left=1.5cm,right=1.5cm,top=2cm,bottom=4.5cm,a4paper]{geometry}
\usepackage[utf8]{inputenc}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{booktabs}
\usepackage{kotex}
\usepackage[hangul, nonfrench]{dhucs}
\usepackage{titlesec}
\renewcommand{\arraystretch}{1.2}
\def\thesection{\arabic{section}}
\def\thesubsection{\arabic{section}.\arabic{subsection}}
\def\thesubsubsection{\arabic{section}.\arabic{subsection}.\arabic{subsubsection}}
\titleformat{\paragraph}
{\normalfont\normalsize\bfseries}{\theparagraph}{1em}{}
\titlespacing*{\paragraph}
{0pt}{3.25ex plus 1ex minus .2ex}{1.5ex plus .2ex}
\usepackage{fancyhdr}
\pagestyle{fancy}\setlength\headheight{100pt}
\fancyhead[L]{\includegraphics[width=4cm]{C://Users//user//Desktop//APEX//Logo//APEX_logo.png}}
\fancyhead[R]{\textbf{Center for Advancing Cancer Therapeutics\\}}
\renewcommand{\headrulewidth}{2pt}
\renewcommand{\footrulewidth}{1pt}
\usepackage{longtable}
\usepackage{caption}
\usepackage{indentfirst}
%\parindent=1em
\usepackage{subcaption}
\setcounter{secnumdepth}{4}
\setcounter{tocdepth}{4}
\renewcommand{\contentsname}{TABLE OF CONTENTS}
%\newenvironment{knitrout}{}{}
\renewcommand{\contentsname}{목록}
\usepackage{alltt}
\usepackage{booktabs}
\usepackage{multirow}
\usepackage{makecell}
\usepackage{setspace}
\usepackage{type1cm}
\usepackage{array}
\usepackage{pdflscape, lipsum}
\usepackage{enumerate}
\usepackage{graphicx}
\DeclareGraphicsExtensions{.pdf,.png,.jpg}
\usepackage[table,xcdraw]{xcolor}
\usepackage{floatrow}
\linespread{1.5}
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
\begin{document}
\addtocounter{section}{5}
\section{결과 및 토의 }
<<echo=FALSE, warning=FALSE, error=FALSE, results='asis', message=F >>=
Round = function(x, n=0)
{
posneg = sign(x)
z = abs(x)*10^n
z = z + 0.5
z = trunc(z)
z = z/10^n
return(z*posneg)
}
getTGIPercent = function(drugName,time=timeDay,controlName="vehicle"){
deltaControl = mean(totalData$tumor_Volume[totalData$group==controlName & totalData$Time_Day==time]) - mean(totalData$tumor_Volume[totalData$group==controlName & totalData$Time_Day==0])
deltaDrug = mean(totalData$tumor_Volume[totalData$group==drugName & totalData$Time_Day==time]) - mean(totalData$tumor_Volume[totalData$group== drugName & totalData$Time_Day== 0 ])
return(Round((1-deltaDrug/deltaControl)*100,2))
}
getWeightChangePercent = function(drugName,time=timeDay){
deltaDrug = (mean(totalData$weight[totalData$group==drugName & totalData$Time_Day==time]) - mean(totalData$weight[totalData$group== drugName & totalData$Time_Day== 0 ]))/ mean(totalData$weight[totalData$group== drugName & totalData$Time_Day== 0 ])
return(Round(deltaDrug*100,2))
}
getWeightChangePercentByControl = function(drugName,time=timeDay,controlName="vehicle"){
deltaControl = mean(totalData$weight[totalData$group==controlName & totalData$Time_Day==time]) - mean(totalData$weight[totalData$group==controlName & totalData$Time_Day==0])
deltaDrug = mean(totalData$weight[totalData$group==drugName & totalData$Time_Day==time]) - mean(totalData$weight[totalData$group== drugName & totalData$Time_Day== 0 ])
return(Round((deltaDrug/deltaControl)*100,2))
}
prepCode<-function(fileName,sheetName,target="tumor"){
library(stringr)
library(readxl)
library(dplyr)
library(stringi)
library(latex2exp)
testcase=read_excel(fileName,sheetName)
range=vector()
newDataframe=data.frame()
roof_break=0
temp=0
time=0
repeated=0
#get Start index
for( j in 1:ncol(testcase)){
for ( i in 1:nrow(testcase)){
if (grepl("roup",testcase[j,i])) {
roof_break=1
start_row_index=j
start_col_index=i
break
}
}
if (roof_break==1){
break
}
}
repeated=seq(start_col_index,ncol(testcase),8)
# get Time start index
for( j in 1:ncol(testcase)){
for ( i in 1:nrow(testcase)){
if (grepl("treatment",testcase[j,i])) {
roof_break=2
time=j
break
}
}
if (roof_break==2){
break
}
}
## get Group range
while(TRUE){
if(!is.na(testcase[start_row_index,start_col_index+1])){
temp=temp+1
range[temp]=start_row_index
}
if(is.na(testcase[start_row_index,start_col_index+2])){
temp=temp+1
range[temp]=start_row_index
break
}
start_row_index=start_row_index+1
}
##cat("?????????????????? ?????? ????????? :" ,time , "?????? ?????? ?????? :" , "(",start_row_index ,",",start_col_index ,")", "????????? ????????? ??????: " ,range ,"??? ?????????????" )
edit(testcase)
##ans <-readline('insert plseas : ')
ans <-"yes"
if(ans == "yes"){
if(target == "tumor"){
for( j in 1:length(repeated)){
for(i in 1:(length(range)-1)){
tempDf=data.frame(testcase[range[i]:(range[i+1]-1),repeated[j]+2],testcase[time,repeated[j]],testcase[range[i]:(range[i+1]-1),repeated[j]+3],testcase[range[i]:(range[i+1]-1),repeated[j]+4],testcase[range[i],repeated[j]+1]) ## ????????? df??? V
colnames(tempDf)=c("ID","Time_Day","Long_mm","Short_mm","Treatment")
newDataframe=rbind(newDataframe,tempDf)
}
colnames(newDataframe)=c("ID","Time_Day","Long_mm","Short_mm","Treatment")
}
head(na.omit(newDataframe))
sample_n(newDataframe,20)
return(newDataframe)
}
else if(target=="weight"){
repeated=seq(start_col_index,ncol(testcase),6)
for( j in 1:length(repeated)){
for(i in 1:(length(range)-1)){
tempDf=data.frame(testcase[range[i]:(range[i+1]-1),repeated[j]+2],testcase[time,repeated[j]],testcase[range[i]:(range[i+1]-1),repeated[j]+3],testcase[range[i],repeated[j]+1]) ## ????????? df???
colnames(tempDf)=c("ID","Time_Day","Weight","Treatment")
newDataframe=rbind(newDataframe,tempDf)
}
colnames(newDataframe)=c("ID","Time_Day","Weight","Treatment")
}
head(na.omit(newDataframe))
return(newDataframe)
}
else{
stop("읽고자 하는 대상이 tumor data입니까 또는 weight 데이터 입니까");
}
}
else{
stop("원본 데이터 재확인해주세요. ")
}
}
setwd("/Users/user/Desktop/RR_pilot/")
tumorSize=prepCode("(CACT) --------------","tumor")
weight= prepCode("(CACT) ------------",target = "weight")
groupSeparation = read_excel("GroupSeparation.xlsx")
tumorSize$TumorVolume=as.numeric(tumorSize$Long_mm)*((as.numeric(tumorSize$Short_mm)/2)**2)*2
totalData=data.frame(tumorSize[,1],as.numeric(tumorSize$Time_Day),tumorSize[,3:4],tumorSize$TumorVolume,as.numeric(weight$Weight),weight$Treatment)
colnames(totalData)=c("ID","Time_Day","Long_mm^3","Short_mm^3","tumor_Volume","weight","group")
totalData$group = str_replace_all(totalData$group,"[\r\n]"," ")
totalData$group = stri_trim(totalData$group)
getAvgAndSd= function(groupSeparation){
temp = groupSeparation %>% group_by(Group) %>% summarise(Average=mean(tumorVolume)) %>% select(Group,tumorVolume=Average)
temp$ID="Mean"
temp2 = groupSeparation %>% group_by(Group) %>% summarise(SD=sd(tumorVolume)) %>% select(Group,tumorVolume=SD)
temp2$ID="SD"
tempDf = rbind(temp,temp2) %>% select(ID,tumorVolume,Group) %>% rbind(groupSeparation) %>% arrange(ID)
return(tempDf)
}
library(dplyr)
library(ggplot2)
library(knitr)
library(kableExtra)
library(xtable)
@
\subsection{유방암 ---- 모델}
\subsubsection{종양 성장 확인 및 군 분리}
\noindent -MDA-MB-231 세포 이식 후 33일째, 측정된 종양 부피의 평균이 \Sexpr{Round(mean(groupSeparation$tumorVolume),2)} $mm^3$ 오차 $\pm$ \Sexpr{Round(sd(groupSeparation$tumorVolume),2)} 일 때 군 분리를 하였음. 실험군의 개체별, 군별 종양부피 값은 다음 Table 1에 명시함
\newline
<<echo=FALSE, warning=FALSE, error=FALSE, message=F >>=
groupSeparation = getAvgAndSd(groupSeparation = groupSeparation)
groupSeparation$tumorVolume = Round(groupSeparation$tumorVolume,2)
tempList=list()
groupingColume=2
group=unique(groupSeparation$Group)
for(i in seq_along(group)){
tempList[[i]] = groupSeparation%>% filter(Group == group[i]) %>% select(ID,tumorVolume)
}
tempList=do.call(cbind, tempList)
kable(tempList,format="latex", booktabs=T, align="c" , caption="군 분리 시 종양 부피")%>% kable_styling(font_size = 12,latex_options =c("hold_position")) %>% add_header_above(c(
setNames(groupingColume,group[1]),setNames(groupingColume,group[2]),setNames(groupingColume,group[3]),setNames(groupingColume,group[4])
### 군분리 group별로 header를 추가합니다.
),bold = T, italic = T)
@
\subsubsection{종양 체적 측정 및 결과 분석}
<<echo=FALSE, warning=FALSE, error=FALSE, results='asis', message=F>>=
tumorTable=totalData %>% select(ID,Time_Day,`Long_mm^3`,`Short_mm^3`,tumor_Volume,group)
for(i in 3:5){
tumorTable[,i]=Round(as.numeric(tumorTable[,i]),2)
}
colnames(tumorTable)=c("ID","Time","Long(mm)","Short(mm)","Size(mm^3)","Group")
kable(
x=tumorTable
,format="latex", longtable=T, booktabs=T, align="c", caption="종양부피의 평균 및 표준편차") %>% kable_styling(font_size = 12,latex_options =c("hold_position","repeat_header"))
@
<<echo=FALSE, warning=FALSE, error=FALSE, results='asis', message=F ,fig.width=6.5,fig.height=4.5>>=
groupColor=c("purple","blue","red","black")
tumorGrowthGph =totalData %>% select(Time_Day,tumor_Volume,group) %>% group_by(Time_Day,group) %>% summarise( tumorVolAvg = mean(tumor_Volume),tumorVolSd = sd(tumor_Volume))
ggplot(tumorGrowthGph,aes(x=Time_Day,y=tumorVolAvg,group=group,color=group)) +
geom_point(size=2.5)+
geom_line(size=0.7)+
geom_errorbar(aes(ymin=tumorVolAvg-tumorVolSd,ymax=tumorVolAvg+tumorVolSd),width=0.3, alpha=1, size=0.4)+
labs(x=TeX("Days after treatment"),y=TeX("Tumor Volume $mm^3$ "))+
theme_bw(base_size = 10)+
scale_color_manual(values = groupColor)+
theme(legend.position = c(0.28,0.84),legend.text = element_text(size = 9),legend.title =element_text(size=8,face=4))
@
\centerline{Figure 1a. 그룹 별 종양 성장 곡선}
<<echo=FALSE, warning=FALSE, error=FALSE, results='asis', message=F ,fig.width=3, fig.height=2,fig.show='hold',fig.align='center'>>=
expGroup= unique(tumorGrowthGph$group)
tumorGrowthIndvidualGph=totalData %>% select(ID,Time_Day,tumor_Volume,group)%>% group_by(Time_Day,group) %>% arrange(group)
for(i in seq_along(expGroup)){
data=tumorGrowthIndvidualGph %>% filter(group==expGroup[i])
print(ggplot(data=data,aes(x=Time_Day,y=tumor_Volume,group=ID))+
geom_line(size=0.7,color=groupColor[i])+
theme_bw(base_size = 10)+
labs(x=TeX("Days after treatment"),y=TeX("Tumor Volume $mm^3$ "))+
theme(legend.position = c(0.28,0.84),legend.text = element_text(size = 9))+
ggtitle(as.character(data$group))+
theme(plot.title = element_text(size=9)))
}
@
\centerline{Figure 1b. 개체 별 종양 성장 곡선}
\newpage
<<echo=FALSE, warning=FALSE, error=FALSE, results='asis', message=F ,fig.align = "center",fig.show='hold' >>=
tgiTable= totalData %>% filter(Time_Day==35) %>% group_by(group) %>% summarise(TGI = getTGIPercent(drugName = group,time = Time_Day)) %>% filter(group != "vehicle")
colnames(tgiTable)=c("Group","TGI(%)")
kable(
x=tgiTable
,format="latex", longtable=F, booktabs=T, align="c", caption=" TGI") %>% kable_styling(font_size = 12,latex_options =c("hold_position","striped"),position = "center")
@
\paragraph{체중 측정 및 결과 분석}
<<echo=FALSE, warning=FALSE, error=FALSE, results='asis', message=F ,fig.align = "center",fig.show='hold' >>=
weightTable = totalData %>% select(ID,Time_Day,weight,group)
kable(
x=weightTable
,format="latex", longtable=T, booktabs=T, align="c", caption="체중의 평균 및 표준편차") %>% kable_styling(font_size = 12,latex_options =c("hold_position","repeat_header")) %>% column_spec(1, width = "3cm") %>% column_spec(2, width = "3cm") %>% column_spec(3, width = "3cm")
@
\noindent
\newline
<<echo=FALSE, warning=FALSE, error=FALSE, results='asis', message=F ,fig.width=8, fig.height=5,fig.show='hold',fig.align='center'>>=
weightGph = totalData %>% select(ID,Time_Day,weight,group) %>% group_by(Time_Day,group) %>% summarise(AVG=mean(weight),SD=sd(weight))
ggplot(weightGph,aes(x=Time_Day,y=AVG,group=group,color=group)) +
geom_point(size=2.5)+
geom_line(size=0.7)+
geom_errorbar(aes(ymin=AVG-SD,ymax=AVG+SD),width=0.3, alpha=1, size=0.4)+
labs(x=TeX("Days after treatment"),y=TeX("Body Weight(g)"))+
theme_bw(base_size = 10)+
scale_color_manual(values = groupColor)+
theme(legend.position = c(0.26,0.85),legend.text = element_text(size = 9),legend.title =element_text(size=8,face=4))+
ylim(c(6,30))
@
\centerline{Figure 2. 그룹별 체중 변화 곡선}
\newpage
\end{document}
| /prepCode.R | no_license | csbiy/AsanMedicalCenter | R | false | false | 13,244 | r |
\documentclass[a4paper, 11pt]{report}
\usepackage[left=1.5cm,right=1.5cm,top=2cm,bottom=4.5cm,a4paper]{geometry}
\usepackage[utf8]{inputenc}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{booktabs}
\usepackage{kotex}
\usepackage[hangul, nonfrench]{dhucs}
\usepackage{titlesec}
\renewcommand{\arraystretch}{1.2}
\def\thesection{\arabic{section}}
\def\thesubsection{\arabic{section}.\arabic{subsection}}
\def\thesubsubsection{\arabic{section}.\arabic{subsection}.\arabic{subsubsection}}
\titleformat{\paragraph}
{\normalfont\normalsize\bfseries}{\theparagraph}{1em}{}
\titlespacing*{\paragraph}
{0pt}{3.25ex plus 1ex minus .2ex}{1.5ex plus .2ex}
\usepackage{fancyhdr}
\pagestyle{fancy}\setlength\headheight{100pt}
\fancyhead[L]{\includegraphics[width=4cm]{C://Users//user//Desktop//APEX//Logo//APEX_logo.png}}
\fancyhead[R]{\textbf{Center for Advancing Cancer Therapeutics\\}}
\renewcommand{\headrulewidth}{2pt}
\renewcommand{\footrulewidth}{1pt}
\usepackage{longtable}
\usepackage{caption}
\usepackage{indentfirst}
%\parindent=1em
\usepackage{subcaption}
\setcounter{secnumdepth}{4}
\setcounter{tocdepth}{4}
\renewcommand{\contentsname}{TABLE OF CONTENTS}
%\newenvironment{knitrout}{}{}
\renewcommand{\contentsname}{목록}
\usepackage{alltt}
\usepackage{booktabs}
\usepackage{multirow}
\usepackage{makecell}
\usepackage{setspace}
\usepackage{type1cm}
\usepackage{array}
\usepackage{pdflscape, lipsum}
\usepackage{enumerate}
\usepackage{graphicx}
\DeclareGraphicsExtensions{.pdf,.png,.jpg}
\usepackage[table,xcdraw]{xcolor}
\usepackage{floatrow}
\linespread{1.5}
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
\begin{document}
\addtocounter{section}{5}
\section{결과 및 토의 }
<<echo=FALSE, warning=FALSE, error=FALSE, results='asis', message=F >>=
Round = function(x, n=0)
{
posneg = sign(x)
z = abs(x)*10^n
z = z + 0.5
z = trunc(z)
z = z/10^n
return(z*posneg)
}
getTGIPercent = function(drugName,time=timeDay,controlName="vehicle"){
deltaControl = mean(totalData$tumor_Volume[totalData$group==controlName & totalData$Time_Day==time]) - mean(totalData$tumor_Volume[totalData$group==controlName & totalData$Time_Day==0])
deltaDrug = mean(totalData$tumor_Volume[totalData$group==drugName & totalData$Time_Day==time]) - mean(totalData$tumor_Volume[totalData$group== drugName & totalData$Time_Day== 0 ])
return(Round((1-deltaDrug/deltaControl)*100,2))
}
getWeightChangePercent = function(drugName,time=timeDay){
deltaDrug = (mean(totalData$weight[totalData$group==drugName & totalData$Time_Day==time]) - mean(totalData$weight[totalData$group== drugName & totalData$Time_Day== 0 ]))/ mean(totalData$weight[totalData$group== drugName & totalData$Time_Day== 0 ])
return(Round(deltaDrug*100,2))
}
getWeightChangePercentByControl = function(drugName,time=timeDay,controlName="vehicle"){
deltaControl = mean(totalData$weight[totalData$group==controlName & totalData$Time_Day==time]) - mean(totalData$weight[totalData$group==controlName & totalData$Time_Day==0])
deltaDrug = mean(totalData$weight[totalData$group==drugName & totalData$Time_Day==time]) - mean(totalData$weight[totalData$group== drugName & totalData$Time_Day== 0 ])
return(Round((deltaDrug/deltaControl)*100,2))
}
prepCode<-function(fileName,sheetName,target="tumor"){
library(stringr)
library(readxl)
library(dplyr)
library(stringi)
library(latex2exp)
testcase=read_excel(fileName,sheetName)
range=vector()
newDataframe=data.frame()
roof_break=0
temp=0
time=0
repeated=0
#get Start index
for( j in 1:ncol(testcase)){
for ( i in 1:nrow(testcase)){
if (grepl("roup",testcase[j,i])) {
roof_break=1
start_row_index=j
start_col_index=i
break
}
}
if (roof_break==1){
break
}
}
repeated=seq(start_col_index,ncol(testcase),8)
# get Time start index
for( j in 1:ncol(testcase)){
for ( i in 1:nrow(testcase)){
if (grepl("treatment",testcase[j,i])) {
roof_break=2
time=j
break
}
}
if (roof_break==2){
break
}
}
## get Group range
while(TRUE){
if(!is.na(testcase[start_row_index,start_col_index+1])){
temp=temp+1
range[temp]=start_row_index
}
if(is.na(testcase[start_row_index,start_col_index+2])){
temp=temp+1
range[temp]=start_row_index
break
}
start_row_index=start_row_index+1
}
##cat("?????????????????? ?????? ????????? :" ,time , "?????? ?????? ?????? :" , "(",start_row_index ,",",start_col_index ,")", "????????? ????????? ??????: " ,range ,"??? ?????????????" )
edit(testcase)
##ans <-readline('insert plseas : ')
ans <-"yes"
if(ans == "yes"){
if(target == "tumor"){
for( j in 1:length(repeated)){
for(i in 1:(length(range)-1)){
tempDf=data.frame(testcase[range[i]:(range[i+1]-1),repeated[j]+2],testcase[time,repeated[j]],testcase[range[i]:(range[i+1]-1),repeated[j]+3],testcase[range[i]:(range[i+1]-1),repeated[j]+4],testcase[range[i],repeated[j]+1]) ## ????????? df??? V
colnames(tempDf)=c("ID","Time_Day","Long_mm","Short_mm","Treatment")
newDataframe=rbind(newDataframe,tempDf)
}
colnames(newDataframe)=c("ID","Time_Day","Long_mm","Short_mm","Treatment")
}
head(na.omit(newDataframe))
sample_n(newDataframe,20)
return(newDataframe)
}
else if(target=="weight"){
repeated=seq(start_col_index,ncol(testcase),6)
for( j in 1:length(repeated)){
for(i in 1:(length(range)-1)){
tempDf=data.frame(testcase[range[i]:(range[i+1]-1),repeated[j]+2],testcase[time,repeated[j]],testcase[range[i]:(range[i+1]-1),repeated[j]+3],testcase[range[i],repeated[j]+1]) ## ????????? df???
colnames(tempDf)=c("ID","Time_Day","Weight","Treatment")
newDataframe=rbind(newDataframe,tempDf)
}
colnames(newDataframe)=c("ID","Time_Day","Weight","Treatment")
}
head(na.omit(newDataframe))
return(newDataframe)
}
else{
stop("읽고자 하는 대상이 tumor data입니까 또는 weight 데이터 입니까");
}
}
else{
stop("원본 데이터 재확인해주세요. ")
}
}
setwd("/Users/user/Desktop/RR_pilot/")
tumorSize=prepCode("(CACT) --------------","tumor")
weight= prepCode("(CACT) ------------",target = "weight")
groupSeparation = read_excel("GroupSeparation.xlsx")
tumorSize$TumorVolume=as.numeric(tumorSize$Long_mm)*((as.numeric(tumorSize$Short_mm)/2)**2)*2
totalData=data.frame(tumorSize[,1],as.numeric(tumorSize$Time_Day),tumorSize[,3:4],tumorSize$TumorVolume,as.numeric(weight$Weight),weight$Treatment)
colnames(totalData)=c("ID","Time_Day","Long_mm^3","Short_mm^3","tumor_Volume","weight","group")
totalData$group = str_replace_all(totalData$group,"[\r\n]"," ")
totalData$group = stri_trim(totalData$group)
getAvgAndSd= function(groupSeparation){
temp = groupSeparation %>% group_by(Group) %>% summarise(Average=mean(tumorVolume)) %>% select(Group,tumorVolume=Average)
temp$ID="Mean"
temp2 = groupSeparation %>% group_by(Group) %>% summarise(SD=sd(tumorVolume)) %>% select(Group,tumorVolume=SD)
temp2$ID="SD"
tempDf = rbind(temp,temp2) %>% select(ID,tumorVolume,Group) %>% rbind(groupSeparation) %>% arrange(ID)
return(tempDf)
}
library(dplyr)
library(ggplot2)
library(knitr)
library(kableExtra)
library(xtable)
@
\subsection{유방암 ---- 모델}
\subsubsection{종양 성장 확인 및 군 분리}
\noindent -MDA-MB-231 세포 이식 후 33일째, 측정된 종양 부피의 평균이 \Sexpr{Round(mean(groupSeparation$tumorVolume),2)} $mm^3$ 오차 $\pm$ \Sexpr{Round(sd(groupSeparation$tumorVolume),2)} 일 때 군 분리를 하였음. 실험군의 개체별, 군별 종양부피 값은 다음 Table 1에 명시함
\newline
<<echo=FALSE, warning=FALSE, error=FALSE, message=F >>=
groupSeparation = getAvgAndSd(groupSeparation = groupSeparation)
groupSeparation$tumorVolume = Round(groupSeparation$tumorVolume,2)
tempList=list()
groupingColume=2
group=unique(groupSeparation$Group)
for(i in seq_along(group)){
tempList[[i]] = groupSeparation%>% filter(Group == group[i]) %>% select(ID,tumorVolume)
}
tempList=do.call(cbind, tempList)
kable(tempList,format="latex", booktabs=T, align="c" , caption="군 분리 시 종양 부피")%>% kable_styling(font_size = 12,latex_options =c("hold_position")) %>% add_header_above(c(
setNames(groupingColume,group[1]),setNames(groupingColume,group[2]),setNames(groupingColume,group[3]),setNames(groupingColume,group[4])
### 군분리 group별로 header를 추가합니다.
),bold = T, italic = T)
@
\subsubsection{종양 체적 측정 및 결과 분석}
<<echo=FALSE, warning=FALSE, error=FALSE, results='asis', message=F>>=
tumorTable=totalData %>% select(ID,Time_Day,`Long_mm^3`,`Short_mm^3`,tumor_Volume,group)
for(i in 3:5){
tumorTable[,i]=Round(as.numeric(tumorTable[,i]),2)
}
colnames(tumorTable)=c("ID","Time","Long(mm)","Short(mm)","Size(mm^3)","Group")
kable(
x=tumorTable
,format="latex", longtable=T, booktabs=T, align="c", caption="종양부피의 평균 및 표준편차") %>% kable_styling(font_size = 12,latex_options =c("hold_position","repeat_header"))
@
<<echo=FALSE, warning=FALSE, error=FALSE, results='asis', message=F ,fig.width=6.5,fig.height=4.5>>=
groupColor=c("purple","blue","red","black")
tumorGrowthGph =totalData %>% select(Time_Day,tumor_Volume,group) %>% group_by(Time_Day,group) %>% summarise( tumorVolAvg = mean(tumor_Volume),tumorVolSd = sd(tumor_Volume))
ggplot(tumorGrowthGph,aes(x=Time_Day,y=tumorVolAvg,group=group,color=group)) +
geom_point(size=2.5)+
geom_line(size=0.7)+
geom_errorbar(aes(ymin=tumorVolAvg-tumorVolSd,ymax=tumorVolAvg+tumorVolSd),width=0.3, alpha=1, size=0.4)+
labs(x=TeX("Days after treatment"),y=TeX("Tumor Volume $mm^3$ "))+
theme_bw(base_size = 10)+
scale_color_manual(values = groupColor)+
theme(legend.position = c(0.28,0.84),legend.text = element_text(size = 9),legend.title =element_text(size=8,face=4))
@
\centerline{Figure 1a. 그룹 별 종양 성장 곡선}
<<echo=FALSE, warning=FALSE, error=FALSE, results='asis', message=F ,fig.width=3, fig.height=2,fig.show='hold',fig.align='center'>>=
expGroup= unique(tumorGrowthGph$group)
tumorGrowthIndvidualGph=totalData %>% select(ID,Time_Day,tumor_Volume,group)%>% group_by(Time_Day,group) %>% arrange(group)
for(i in seq_along(expGroup)){
data=tumorGrowthIndvidualGph %>% filter(group==expGroup[i])
print(ggplot(data=data,aes(x=Time_Day,y=tumor_Volume,group=ID))+
geom_line(size=0.7,color=groupColor[i])+
theme_bw(base_size = 10)+
labs(x=TeX("Days after treatment"),y=TeX("Tumor Volume $mm^3$ "))+
theme(legend.position = c(0.28,0.84),legend.text = element_text(size = 9))+
ggtitle(as.character(data$group))+
theme(plot.title = element_text(size=9)))
}
@
\centerline{Figure 1b. 개체 별 종양 성장 곡선}
\newpage
<<echo=FALSE, warning=FALSE, error=FALSE, results='asis', message=F ,fig.align = "center",fig.show='hold' >>=
tgiTable= totalData %>% filter(Time_Day==35) %>% group_by(group) %>% summarise(TGI = getTGIPercent(drugName = group,time = Time_Day)) %>% filter(group != "vehicle")
colnames(tgiTable)=c("Group","TGI(%)")
kable(
x=tgiTable
,format="latex", longtable=F, booktabs=T, align="c", caption=" TGI") %>% kable_styling(font_size = 12,latex_options =c("hold_position","striped"),position = "center")
@
\paragraph{체중 측정 및 결과 분석}
<<echo=FALSE, warning=FALSE, error=FALSE, results='asis', message=F ,fig.align = "center",fig.show='hold' >>=
weightTable = totalData %>% select(ID,Time_Day,weight,group)
kable(
x=weightTable
,format="latex", longtable=T, booktabs=T, align="c", caption="체중의 평균 및 표준편차") %>% kable_styling(font_size = 12,latex_options =c("hold_position","repeat_header")) %>% column_spec(1, width = "3cm") %>% column_spec(2, width = "3cm") %>% column_spec(3, width = "3cm")
@
\noindent
\newline
<<echo=FALSE, warning=FALSE, error=FALSE, results='asis', message=F ,fig.width=8, fig.height=5,fig.show='hold',fig.align='center'>>=
weightGph = totalData %>% select(ID,Time_Day,weight,group) %>% group_by(Time_Day,group) %>% summarise(AVG=mean(weight),SD=sd(weight))
ggplot(weightGph,aes(x=Time_Day,y=AVG,group=group,color=group)) +
geom_point(size=2.5)+
geom_line(size=0.7)+
geom_errorbar(aes(ymin=AVG-SD,ymax=AVG+SD),width=0.3, alpha=1, size=0.4)+
labs(x=TeX("Days after treatment"),y=TeX("Body Weight(g)"))+
theme_bw(base_size = 10)+
scale_color_manual(values = groupColor)+
theme(legend.position = c(0.26,0.85),legend.text = element_text(size = 9),legend.title =element_text(size=8,face=4))+
ylim(c(6,30))
@
\centerline{Figure 2. 그룹별 체중 변화 곡선}
\newpage
\end{document}
|
rankall <- function (outcome, num = "best") {
outcomeData <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
outcomes <- table(row.names = c("heart attack", "heart failure", "pneumonia"))
outcomes[1] <- 11; outcomes[2] <- 17; outcomes[3] <- 23
for (i in 1:3) {
outcomeData[, outcomes[i]] <- as.numeric(outcomeData[, outcomes[i]])
}
if (!(outcome %in% row.names(outcomes))) {
stop("invalid outcome")
}
result <- data.frame(hospital = NA, state = NA)
for (state in unique(factor(outcomeData$State))) {
outcomeByState <- outcomeData[outcomeData$State == state, ]
outcomeByState <- outcomeByState[complete.cases(outcomeByState[, outcomes[outcome]]), ]
sortedOutcomeByState <- outcomeByState[order(outcomeByState[, outcomes[outcome]], outcomeByState$Hospital.Name), ]
if (num == "best")
result <- rbind(result, c(sortedOutcomeByState[1, "Hospital.Name"], state))
else if (num == "worst")
result <- rbind(result, c(sortedOutcomeByState[nrow(sortedOutcomeByState), "Hospital.Name"], state))
else if (is.numeric(num)) {
if (num <= nrow(sortedOutcomeByState))
result <- rbind(result, c(sortedOutcomeByState[num, "Hospital.Name"], state))
else
result <- rbind(result, c(NA, state))
}
}
result <- result[complete.cases(result$state), ]
result[order(result$state), ]
}
| /assign3/rankall.R | no_license | KalyanAkella/computing_data_analysis | R | false | false | 1,385 | r | rankall <- function (outcome, num = "best") {
outcomeData <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
outcomes <- table(row.names = c("heart attack", "heart failure", "pneumonia"))
outcomes[1] <- 11; outcomes[2] <- 17; outcomes[3] <- 23
for (i in 1:3) {
outcomeData[, outcomes[i]] <- as.numeric(outcomeData[, outcomes[i]])
}
if (!(outcome %in% row.names(outcomes))) {
stop("invalid outcome")
}
result <- data.frame(hospital = NA, state = NA)
for (state in unique(factor(outcomeData$State))) {
outcomeByState <- outcomeData[outcomeData$State == state, ]
outcomeByState <- outcomeByState[complete.cases(outcomeByState[, outcomes[outcome]]), ]
sortedOutcomeByState <- outcomeByState[order(outcomeByState[, outcomes[outcome]], outcomeByState$Hospital.Name), ]
if (num == "best")
result <- rbind(result, c(sortedOutcomeByState[1, "Hospital.Name"], state))
else if (num == "worst")
result <- rbind(result, c(sortedOutcomeByState[nrow(sortedOutcomeByState), "Hospital.Name"], state))
else if (is.numeric(num)) {
if (num <= nrow(sortedOutcomeByState))
result <- rbind(result, c(sortedOutcomeByState[num, "Hospital.Name"], state))
else
result <- rbind(result, c(NA, state))
}
}
result <- result[complete.cases(result$state), ]
result[order(result$state), ]
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/abbrev.R
\name{abbrev}
\alias{abbrev}
\title{Create vector of Initials}
\usage{
abbrev(x, exceptions = NULL)
}
\arguments{
\item{x}{vector of strings to be abbreviated.}
\item{exceptions}{list with two names vectors: \code{old}, a vector abbreviations to
be replaced and \code{new}, a vector of replacment values.}
}
\value{
a character vector of \code{length(x)}.
}
\description{
\code{abbrev} returns a character vector of each words first capital letters for each element of \code{x}.
}
\details{
This function returns a same-length character vector
that abbrevs an initial character vector, \code{x}. Abbreviation returns the first
capital letter of any words in each element of \code{x}. Users may additionally pass
\code{abbrev} and option list of exceptions that overides the default abbreviations.
The list of exceptions requires a vector of "old" values to be replaced by "new" values
}
\examples{
x<-c("KIPP Ascend Middle School", "KIPP Ascend Primary School",
"KIPP Create College Prep",
"KIPP Bloom College Prep" ,
"KIPP One Academy")
abbrev(x)
altnames<-list(old=c("KAPS", "KBCP", "KOA"),
new=c("KAP", "Bloom", "One"))
abbrev(x, excpetions=altnames)
}
| /man/abbrev.Rd | no_license | almartin82/MAP-visuals | R | false | false | 1,292 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/abbrev.R
\name{abbrev}
\alias{abbrev}
\title{Create vector of Initials}
\usage{
abbrev(x, exceptions = NULL)
}
\arguments{
\item{x}{vector of strings to be abbreviated.}
\item{exceptions}{list with two names vectors: \code{old}, a vector abbreviations to
be replaced and \code{new}, a vector of replacment values.}
}
\value{
a character vector of \code{length(x)}.
}
\description{
\code{abbrev} returns a character vector of each words first capital letters for each element of \code{x}.
}
\details{
This function returns a same-length character vector
that abbrevs an initial character vector, \code{x}. Abbreviation returns the first
capital letter of any words in each element of \code{x}. Users may additionally pass
\code{abbrev} and option list of exceptions that overides the default abbreviations.
The list of exceptions requires a vector of "old" values to be replaced by "new" values
}
\examples{
x<-c("KIPP Ascend Middle School", "KIPP Ascend Primary School",
"KIPP Create College Prep",
"KIPP Bloom College Prep" ,
"KIPP One Academy")
abbrev(x)
altnames<-list(old=c("KAPS", "KBCP", "KOA"),
new=c("KAP", "Bloom", "One"))
abbrev(x, excpetions=altnames)
}
|
## Coursera EDA, Course Project 2
## Data reading parts are common to all plot*.R files
## BEFORE USING: setwd('/directory/containing/dataset')
## Expecting the files summarySCC_PM25.rds and Source_Classification_Code.rds
# Read main dataset
NEI <- readRDS ("summarySCC_PM25.rds")
# Turn factorable things into factors
NEI <- transform (NEI,
fips=factor(fips),
SCC=factor(SCC),
Pollutant=as.factor(Pollutant),
type=as.factor(type),
year=as.factor(year))
# Read Source Classification Codes
SCC <- readRDS("Source_Classification_Code.rds")
## END of common part
## Statistical summary of data
vehTypes <- SCC[grepl('veh',SCC$EI.Sector,ignore.case=TRUE),]
baltiLosAngVeh <- NEI[(NEI$fips=='24510'|NEI$fips=='06037')&is.element(NEI$SCC,vehTypes$SCC),]
library('reshape2')
baltiLaMelt <- melt (baltiLosAngVeh, id.vars = c("type", "year", "fips", "SCC", "Pollutant"), value.name = "Emissions")
fipsYear <- dcast (baltiLaMelt, fips+year ~ variable, fun.aggregate = sum)
## Make the plot
library('ggplot2')
g<-ggplot(fipsYear,aes(year,Emissions))
g +
geom_bar(stat='identity') +
facet_grid( . ~ fips ) +
scale_y_log10(breaks = c(10,100,1000)) + ylab('Emissions -- Log Scale') +
ggtitle("LA & Baltimore Vehicle Emissions")
ggsave (filename = 'plot6.png' )
| /plot6.R | no_license | daviesbj/airpol | R | false | false | 1,351 | r | ## Coursera EDA, Course Project 2
## Data reading parts are common to all plot*.R files
## BEFORE USING: setwd('/directory/containing/dataset')
## Expecting the files summarySCC_PM25.rds and Source_Classification_Code.rds
# Read main dataset
NEI <- readRDS ("summarySCC_PM25.rds")
# Turn factorable things into factors
NEI <- transform (NEI,
fips=factor(fips),
SCC=factor(SCC),
Pollutant=as.factor(Pollutant),
type=as.factor(type),
year=as.factor(year))
# Read Source Classification Codes
SCC <- readRDS("Source_Classification_Code.rds")
## END of common part
## Statistical summary of data
vehTypes <- SCC[grepl('veh',SCC$EI.Sector,ignore.case=TRUE),]
baltiLosAngVeh <- NEI[(NEI$fips=='24510'|NEI$fips=='06037')&is.element(NEI$SCC,vehTypes$SCC),]
library('reshape2')
baltiLaMelt <- melt (baltiLosAngVeh, id.vars = c("type", "year", "fips", "SCC", "Pollutant"), value.name = "Emissions")
fipsYear <- dcast (baltiLaMelt, fips+year ~ variable, fun.aggregate = sum)
## Make the plot
library('ggplot2')
g<-ggplot(fipsYear,aes(year,Emissions))
g +
geom_bar(stat='identity') +
facet_grid( . ~ fips ) +
scale_y_log10(breaks = c(10,100,1000)) + ylab('Emissions -- Log Scale') +
ggtitle("LA & Baltimore Vehicle Emissions")
ggsave (filename = 'plot6.png' )
|
\name{textile}
\alias{textile}
\docType{data}
\title{
Textile Data
}
\description{
Number of Cycles to Failure of Worsted Yarn
}
\usage{data(textile)}
\format{
A data frame with 27 observations on the following variable.
\describe{
\item{\code{textile}}{a numeric vector for the number of cycles}
}
}
%\details{
%% ~~ If necessary, more details than the __description__ above ~~
%}
%\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
%}
\references{
Box, G. E. P., Cox, D. R. (1964). An Analysis of Transformations (with discussion). \emph{Journal of the Royal Statistical Society, Series B (Methodological)}, \bold{26}, 211--252.
}
\examples{
library(AID)
data(textile)
hist(textile[,1])
out <- boxcoxnc(textile[,1])
confInt(out)
}
\keyword{datasets}
| /man/textile.Rd | no_license | cran/AID | R | false | false | 836 | rd | \name{textile}
\alias{textile}
\docType{data}
\title{
Textile Data
}
\description{
Number of Cycles to Failure of Worsted Yarn
}
\usage{data(textile)}
\format{
A data frame with 27 observations on the following variable.
\describe{
\item{\code{textile}}{a numeric vector for the number of cycles}
}
}
%\details{
%% ~~ If necessary, more details than the __description__ above ~~
%}
%\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
%}
\references{
Box, G. E. P., Cox, D. R. (1964). An Analysis of Transformations (with discussion). \emph{Journal of the Royal Statistical Society, Series B (Methodological)}, \bold{26}, 211--252.
}
\examples{
library(AID)
data(textile)
hist(textile[,1])
out <- boxcoxnc(textile[,1])
confInt(out)
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/centrality.R
\name{hub_score}
\alias{hub.score}
\alias{hub_score}
\title{Kleinberg's hub centrality scores.}
\usage{
hub_score(graph, scale = TRUE, weights = NULL, options = arpack_defaults)
}
\arguments{
\item{graph}{The input graph.}
\item{scale}{Logical scalar, whether to scale the result to have a maximum
score of one. If no scaling is used then the result vector has unit length
in the Euclidean norm.}
\item{weights}{Optional positive weight vector for calculating weighted
scores. If the graph has a \code{weight} edge attribute, then this is used
by default.}
\item{options}{A named list, to override some ARPACK options. See
\code{\link{arpack}} for details.}
}
\value{
A named list with members:
\item{vector}{The authority/hub scores of the vertices.}
\item{value}{The corresponding eigenvalue of the calculated
principal eigenvector.}
\item{options}{Some information about the ARPACK computation, it has
the same members as the \code{options} member returned
by \code{\link{arpack}}, see that for documentation.}
}
\description{
The hub scores of the vertices are defined as the principal eigenvector
of \eqn{A A^T}{A*t(A)}, where \eqn{A} is the adjacency matrix of the
graph.
}
\details{
For undirected matrices the adjacency matrix is symmetric and the hub
scores are the same as authority scores, see
\code{\link{authority_score}}.
}
\examples{
## An in-star
g <- make_star(10)
hub_score(g)$vector
## A ring
g2 <- make_ring(10)
hub_score(g2)$vector
}
\references{
J. Kleinberg. Authoritative sources in a hyperlinked
environment. \emph{Proc. 9th ACM-SIAM Symposium on Discrete Algorithms},
1998. Extended version in \emph{Journal of the ACM} 46(1999). Also appears
as IBM Research Report RJ 10076, May 1997.
}
\seealso{
\code{\link{authority_score}},
\code{\link{eigen_centrality}} for eigenvector centrality,
\code{\link{page_rank}} for the Page Rank scores. \code{\link{arpack}} for
the underlining machinery of the computation.
}
| /man/hub_score.Rd | no_license | Ruchika8/Dgraph | R | false | true | 2,052 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/centrality.R
\name{hub_score}
\alias{hub.score}
\alias{hub_score}
\title{Kleinberg's hub centrality scores.}
\usage{
hub_score(graph, scale = TRUE, weights = NULL, options = arpack_defaults)
}
\arguments{
\item{graph}{The input graph.}
\item{scale}{Logical scalar, whether to scale the result to have a maximum
score of one. If no scaling is used then the result vector has unit length
in the Euclidean norm.}
\item{weights}{Optional positive weight vector for calculating weighted
scores. If the graph has a \code{weight} edge attribute, then this is used
by default.}
\item{options}{A named list, to override some ARPACK options. See
\code{\link{arpack}} for details.}
}
\value{
A named list with members:
\item{vector}{The authority/hub scores of the vertices.}
\item{value}{The corresponding eigenvalue of the calculated
principal eigenvector.}
\item{options}{Some information about the ARPACK computation, it has
the same members as the \code{options} member returned
by \code{\link{arpack}}, see that for documentation.}
}
\description{
The hub scores of the vertices are defined as the principal eigenvector
of \eqn{A A^T}{A*t(A)}, where \eqn{A} is the adjacency matrix of the
graph.
}
\details{
For undirected matrices the adjacency matrix is symmetric and the hub
scores are the same as authority scores, see
\code{\link{authority_score}}.
}
\examples{
## An in-star
g <- make_star(10)
hub_score(g)$vector
## A ring
g2 <- make_ring(10)
hub_score(g2)$vector
}
\references{
J. Kleinberg. Authoritative sources in a hyperlinked
environment. \emph{Proc. 9th ACM-SIAM Symposium on Discrete Algorithms},
1998. Extended version in \emph{Journal of the ACM} 46(1999). Also appears
as IBM Research Report RJ 10076, May 1997.
}
\seealso{
\code{\link{authority_score}},
\code{\link{eigen_centrality}} for eigenvector centrality,
\code{\link{page_rank}} for the Page Rank scores. \code{\link{arpack}} for
the underlining machinery of the computation.
}
|
library(shiny)
library(plotly)
shinyUI(fluidPage(
# Application title
titlePanel("Climbing Stats"),
# Show Routes per Grade
mainPanel(
plotlyOutput("routesPlot"),
plotlyOutput("bouldersPlot"),
plotlyOutput("pitchesMonthYearPlot"),
plotlyOutput("pitchesYearPlot"),
fluidRow(column(4, tableOutput('firstBoulderGradeTable')))
)
)
)
| /old/ui.R | no_license | stefaneng/climbingstats | R | false | false | 387 | r | library(shiny)
library(plotly)
shinyUI(fluidPage(
# Application title
titlePanel("Climbing Stats"),
# Show Routes per Grade
mainPanel(
plotlyOutput("routesPlot"),
plotlyOutput("bouldersPlot"),
plotlyOutput("pitchesMonthYearPlot"),
plotlyOutput("pitchesYearPlot"),
fluidRow(column(4, tableOutput('firstBoulderGradeTable')))
)
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/separate_drgs.R
\name{separate_drgs}
\alias{separate_drgs}
\title{Convert MSDRGs into a "base DRG" and complication level}
\usage{
separate_drgs(drgs, remove_age = FALSE)
}
\arguments{
\item{drgs}{character vector of MSDRG descriptions, e.g. MSDRGDSC}
\item{remove_age}{logical; if TRUE will remove age descriptions}
}
\value{
a tibble with three columns: msdrg: the input vector, base_msdrg, and
msdrg_complication
}
\description{
Convert MSDRGs into a "base DRG" and complication level
}
\details{
This function is not robust to different codings of complication in
DRG descriptions. If you have a coding other than "W CC" / "W MCC" / "W
CC/MCC" / "W/O CC" / "W/O MCC", please file an issue on Github and we'll
try to add support for your coding.
}
\examples{
MSDRGs <- c("ACUTE LEUKEMIA W/O MAJOR O.R. PROCEDURE W CC",
"ACUTE LEUKEMIA W/O MAJOR O.R. PROCEDURE W MCC",
"ACUTE LEUKEMIA W/O MAJOR O.R. PROCEDURE W/O CC/MCC",
"SIMPLE PNEUMONIA & PLEURISY",
"SIMPLE PNEUMONIA & PLEURISY AGE 0-17")
separate_drgs(MSDRGs, remove_age = TRUE)
}
| /man/separate_drgs.Rd | permissive | g3rley/healthcareai-r | R | false | true | 1,174 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/separate_drgs.R
\name{separate_drgs}
\alias{separate_drgs}
\title{Convert MSDRGs into a "base DRG" and complication level}
\usage{
separate_drgs(drgs, remove_age = FALSE)
}
\arguments{
\item{drgs}{character vector of MSDRG descriptions, e.g. MSDRGDSC}
\item{remove_age}{logical; if TRUE will remove age descriptions}
}
\value{
a tibble with three columns: msdrg: the input vector, base_msdrg, and
msdrg_complication
}
\description{
Convert MSDRGs into a "base DRG" and complication level
}
\details{
This function is not robust to different codings of complication in
DRG descriptions. If you have a coding other than "W CC" / "W MCC" / "W
CC/MCC" / "W/O CC" / "W/O MCC", please file an issue on Github and we'll
try to add support for your coding.
}
\examples{
MSDRGs <- c("ACUTE LEUKEMIA W/O MAJOR O.R. PROCEDURE W CC",
"ACUTE LEUKEMIA W/O MAJOR O.R. PROCEDURE W MCC",
"ACUTE LEUKEMIA W/O MAJOR O.R. PROCEDURE W/O CC/MCC",
"SIMPLE PNEUMONIA & PLEURISY",
"SIMPLE PNEUMONIA & PLEURISY AGE 0-17")
separate_drgs(MSDRGs, remove_age = TRUE)
}
|
#################
#### Plot 3 ###
#################
# Check if project directory exists, create one in current working directory if not
if(!file.exists("Project 1")){
dir.create("./Project 1")
}
# Store file URL for data set
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
# Check if file has been downloaded into project directory, download file if not
if(!file.exists("./Project 1/proj1.zip")){
download.file(fileURL, "./Project 1/proj1.zip")
unzip("./Project 1/proj1.zip", exdir = "./Project 1")
}
#Read all data into power.data data frame
power.data <- read.table("./Project 1/household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?")
# convert Date variable to date format for subsetting
power.data$Date <- as.Date(power.data$Date, format = "%d/%m/%Y")
# subset data for feb 1st and 2nd into feb.data data frame
feb.data <- power.data[power.data$Date == "2007-02-01" | power.data$Date == "2007-02-02",]
# remove power.data data frame from memory
rm(power.data)
# Create datetime variable by combining "Date" and "Time" columns
# and coverting resulting column into a Time/Date format
feb.data$datetime <- as.POSIXct(paste(feb.data$Date, feb.data$Time))
############################ Plot 3 ##########################################
# Open png device for plot 3 that saves in Project 1 directory
png("./Project 1/plot3.png", width = 480, height = 480)
# Plot all three lines with proper colors and label for the y-axis
with(feb.data, plot(datetime, Sub_metering_1, type ="l",
ylab = "Energy sub metering", xlab =""))
with(feb.data, lines(datetime, Sub_metering_2, type = "l", col = "red"))
with(feb.data, lines(datetime, Sub_metering_3, type = "l", col = "blue"))
# Create a legend in the top right for each line
legend("topright", col = c("black", "red", "blue"),
legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), lty = 1 )
# close plot3.png
dev.off()
| /plot3.R | no_license | Dmunslow/ExData_Plotting1 | R | false | false | 2,045 | r | #################
#### Plot 3 ###
#################
# Check if project directory exists, create one in current working directory if not
if(!file.exists("Project 1")){
dir.create("./Project 1")
}
# Store file URL for data set
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
# Check if file has been downloaded into project directory, download file if not
if(!file.exists("./Project 1/proj1.zip")){
download.file(fileURL, "./Project 1/proj1.zip")
unzip("./Project 1/proj1.zip", exdir = "./Project 1")
}
#Read all data into power.data data frame
power.data <- read.table("./Project 1/household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?")
# convert Date variable to date format for subsetting
power.data$Date <- as.Date(power.data$Date, format = "%d/%m/%Y")
# subset data for feb 1st and 2nd into feb.data data frame
feb.data <- power.data[power.data$Date == "2007-02-01" | power.data$Date == "2007-02-02",]
# remove power.data data frame from memory
rm(power.data)
# Create datetime variable by combining "Date" and "Time" columns
# and coverting resulting column into a Time/Date format
feb.data$datetime <- as.POSIXct(paste(feb.data$Date, feb.data$Time))
############################ Plot 3 ##########################################
# Open png device for plot 3 that saves in Project 1 directory
png("./Project 1/plot3.png", width = 480, height = 480)
# Plot all three lines with proper colors and label for the y-axis
with(feb.data, plot(datetime, Sub_metering_1, type ="l",
ylab = "Energy sub metering", xlab =""))
with(feb.data, lines(datetime, Sub_metering_2, type = "l", col = "red"))
with(feb.data, lines(datetime, Sub_metering_3, type = "l", col = "blue"))
# Create a legend in the top right for each line
legend("topright", col = c("black", "red", "blue"),
legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), lty = 1 )
# close plot3.png
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_pvalue.R
\name{calc_pvalue}
\alias{calc_pvalue}
\title{Calculate a p-value from a permutation test}
\usage{
calc_pvalue(observed_lod, maxlods)
}
\arguments{
\item{observed_lod}{the test statistic, on the lod scale}
\item{maxlods}{a vector of max lods from a collection of permutations}
}
\value{
a permutation test p-value
}
\description{
Calculate a p-value from a permutation test
}
\details{
If the observed_lod is greater than all values in the vector maxlods, then a p-value of zero is returned. In practice, it means that you need more permutations.
}
| /man/calc_pvalue.Rd | permissive | fboehm/qtl2effects | R | false | true | 642 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_pvalue.R
\name{calc_pvalue}
\alias{calc_pvalue}
\title{Calculate a p-value from a permutation test}
\usage{
calc_pvalue(observed_lod, maxlods)
}
\arguments{
\item{observed_lod}{the test statistic, on the lod scale}
\item{maxlods}{a vector of max lods from a collection of permutations}
}
\value{
a permutation test p-value
}
\description{
Calculate a p-value from a permutation test
}
\details{
If the observed_lod is greater than all values in the vector maxlods, then a p-value of zero is returned. In practice, it means that you need more permutations.
}
|
context("aesthetics")
test_that("we stop when data does not contain interactive variables",{
## In interactive testing, foo will be found and copied to
## ggplot_build(gg)$data, but on R CMD check, animint2dir only has
## access to its environment, so it will not have access to foo
## defined in the global environment, and so calling ggplot_build on
## this plot from inside animint2dir will result in a "foo not found"
## error. However, animint should check for the validity of its
## interactive variables BEFORE calling ggplot_build, so below we
## should generate an animint error, not a ggplot_build error.
foo <- 1
gg <- ggplot()+
geom_point(aes(Sepal.Length, Petal.Length, showSelected=foo),
data=iris)
viz <- list(scatter=gg)
expect_that({
info <- animint2dir(viz, open.browser=FALSE)
}, throws_error("data does not have interactive variables"))
})
| /tests/testthat/test-aes.R | no_license | tokareff/animint | R | false | false | 911 | r | context("aesthetics")
test_that("we stop when data does not contain interactive variables",{
## In interactive testing, foo will be found and copied to
## ggplot_build(gg)$data, but on R CMD check, animint2dir only has
## access to its environment, so it will not have access to foo
## defined in the global environment, and so calling ggplot_build on
## this plot from inside animint2dir will result in a "foo not found"
## error. However, animint should check for the validity of its
## interactive variables BEFORE calling ggplot_build, so below we
## should generate an animint error, not a ggplot_build error.
foo <- 1
gg <- ggplot()+
geom_point(aes(Sepal.Length, Petal.Length, showSelected=foo),
data=iris)
viz <- list(scatter=gg)
expect_that({
info <- animint2dir(viz, open.browser=FALSE)
}, throws_error("data does not have interactive variables"))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build_bin.R
\name{checkAvailablePackages}
\alias{checkAvailablePackages}
\title{compare available packages in two directories}
\usage{
checkAvailablePackages(src, bin)
}
\arguments{
\item{src}{character path to source directory}
\item{bin}{character path to binary directory}
}
\description{
compare available packages in two directories
}
| /man/checkAvailablePackages.Rd | permissive | jiwalker-usgs/grantools | R | false | true | 420 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build_bin.R
\name{checkAvailablePackages}
\alias{checkAvailablePackages}
\title{compare available packages in two directories}
\usage{
checkAvailablePackages(src, bin)
}
\arguments{
\item{src}{character path to source directory}
\item{bin}{character path to binary directory}
}
\description{
compare available packages in two directories
}
|
\name{cccma}
\alias{cccma}
\title{
Sample CanESM2 and CanRCM4 data
}
\description{
Sample CanESM2 (T63 grid) and CanRCM4 (0.22-deg grid) data
(122.5 deg W, 50 deg N).
\preformatted{pr: precipitation (mm day-1)
tas: average surface temperature (deg. C)
dtr: diurnal temperature range (deg. C)
sfcWind: surface wind speed (m s-1)
ps: surface pressure (ps)
huss: surface specific humidity (kg kg-1)
rsds: surface downwelling shortwave radiation (W m-2)
rlds: surface downwelling longwave radiation (W m-2)}
}
\value{
a list of with elements consisting of:
\item{gcm.c}{matrix of CanESM2 variables for the calibration period.}
\item{gcm.p}{matrix of CanESM2 variables for the validation period.}
\item{rcm.c}{matrix of CanRCM4 variables for the calibration period.}
\item{rcm.p}{matrix of CanRCM4 variables for the validation period.}
\item{ratio.seq}{vector of logical values indicating if samples are of a ratio quantity.}
\item{trace}{numeric values indicating trace thresholds for each ratio quantity.}
}
| /man/cccma.Rd | no_license | cran/MBC | R | false | false | 1,048 | rd | \name{cccma}
\alias{cccma}
\title{
Sample CanESM2 and CanRCM4 data
}
\description{
Sample CanESM2 (T63 grid) and CanRCM4 (0.22-deg grid) data
(122.5 deg W, 50 deg N).
\preformatted{pr: precipitation (mm day-1)
tas: average surface temperature (deg. C)
dtr: diurnal temperature range (deg. C)
sfcWind: surface wind speed (m s-1)
ps: surface pressure (ps)
huss: surface specific humidity (kg kg-1)
rsds: surface downwelling shortwave radiation (W m-2)
rlds: surface downwelling longwave radiation (W m-2)}
}
\value{
a list of with elements consisting of:
\item{gcm.c}{matrix of CanESM2 variables for the calibration period.}
\item{gcm.p}{matrix of CanESM2 variables for the validation period.}
\item{rcm.c}{matrix of CanRCM4 variables for the calibration period.}
\item{rcm.p}{matrix of CanRCM4 variables for the validation period.}
\item{ratio.seq}{vector of logical values indicating if samples are of a ratio quantity.}
\item{trace}{numeric values indicating trace thresholds for each ratio quantity.}
}
|
\name{SubLasso}
\alias{SubLasso}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Logistic model via Lasso penalty with a subset features
}
\description{
Fit a logistic model via Lasso penalty. A subset features can be fixed in the model.
}
\usage{
SubLasso(X, y, subset, nfold)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{gene expression matrix, column is sample, row is gene(probe sets).
}
\item{y}{category vector, 1 (positive, illness) or 0 (negative, normal).
}
\item{subset}{gene (probe sets) names must be included in the model; Default is null set.
}
\item{nfold}{number of cross-validation; Default is 5.
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
some details
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
\item{selname}{features selected by the model.}
\item{w}{the coefficient (weight) of feature in the model}
\item{valid}{sensitivity (Sn), specificity (Sp), Accuracy (Acc), and Matthews correlation coefficient(Mcc)}
\item{description}{the description statistics of selected features by group.}
\item{correlation}{the correlations between all selected features.}
}
\references{
%% ~put references to the literature/web site here ~
Friedman, J., Hastie, T. and Tibshirani, R. (2008) Regularization Paths for Generalized Linear Modelsvia Coordinate Descent, http://www.stanford.edu/~hastie/Papers/glmnet.pdf Journal of Statistical Software, Vol. 33(1), 1-22 Feb 2010. http://www.jstatsoft.org/v33/i01/
}
\author{
%% ~~who you are~~
Youxi Luo
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
glmnet
} \examples{
#screen device is not support in examples but SubLasso function use it to visualize results.
#when you want to test examples, please uncomment following code.
##### Example 1
#data(Golub_Merge)
#X <- Golub_Merge$X
#y <- Golub_Merge$y
#f1=SubLasso(X,y,nfold=10)
## predict.sublasso(f1,X[1:10,]) ##error predicted x
#predy=predict.sublasso(f1,X)
#predy=predict.sublasso(f1,X,type="class")
#predy=predict.sublasso(f1,X,type="link")
#predy=predict.sublasso(f1,X,type="response")
#predy=predict.sublasso(f1,X,type="response",s=0.05)
#subset=f1$selname
#f2=SubLasso(X,y,subset,nfold=10)
#subset=row.names(X)[1:10]
#f3=SubLasso(X,y,subset,nfold=10)
#predy=predict.sublasso(f3,X)
#predy=predict.sublasso(f3,X,type="class")
#predy=predict.sublasso(f3,X,type="link")
#predy=predict.sublasso(f3,X,type="response")
#predy=predict.sublasso(f3,X,type="response",s=0.05)
###Example 2
#data(Colon)
#X<-t(Colon$X)
#y_tmp<-Colon$Y
#y<-ifelse(y_tmp==1,1,0)
#f1=SubLasso(X,y,nfold=10)
#subset=f1$selname
#f2=SubLasso(X,y,subset,nfold=10)
#subset=row.names(X)[30:40]
#f3=SubLasso(X,y,subset,nfold=10)
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
}
| /man/SubLasso.Rd | no_license | cran/SubLasso | R | false | false | 3,010 | rd | \name{SubLasso}
\alias{SubLasso}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Logistic model via Lasso penalty with a subset features
}
\description{
Fit a logistic model via Lasso penalty. A subset features can be fixed in the model.
}
\usage{
SubLasso(X, y, subset, nfold)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{gene expression matrix, column is sample, row is gene(probe sets).
}
\item{y}{category vector, 1 (positive, illness) or 0 (negative, normal).
}
\item{subset}{gene (probe sets) names must be included in the model; Default is null set.
}
\item{nfold}{number of cross-validation; Default is 5.
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
some details
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
\item{selname}{features selected by the model.}
\item{w}{the coefficient (weight) of feature in the model}
\item{valid}{sensitivity (Sn), specificity (Sp), Accuracy (Acc), and Matthews correlation coefficient(Mcc)}
\item{description}{the description statistics of selected features by group.}
\item{correlation}{the correlations between all selected features.}
}
\references{
%% ~put references to the literature/web site here ~
Friedman, J., Hastie, T. and Tibshirani, R. (2008) Regularization Paths for Generalized Linear Modelsvia Coordinate Descent, http://www.stanford.edu/~hastie/Papers/glmnet.pdf Journal of Statistical Software, Vol. 33(1), 1-22 Feb 2010. http://www.jstatsoft.org/v33/i01/
}
\author{
%% ~~who you are~~
Youxi Luo
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
glmnet
} \examples{
#screen device is not support in examples but SubLasso function use it to visualize results.
#when you want to test examples, please uncomment following code.
##### Example 1
#data(Golub_Merge)
#X <- Golub_Merge$X
#y <- Golub_Merge$y
#f1=SubLasso(X,y,nfold=10)
## predict.sublasso(f1,X[1:10,]) ##error predicted x
#predy=predict.sublasso(f1,X)
#predy=predict.sublasso(f1,X,type="class")
#predy=predict.sublasso(f1,X,type="link")
#predy=predict.sublasso(f1,X,type="response")
#predy=predict.sublasso(f1,X,type="response",s=0.05)
#subset=f1$selname
#f2=SubLasso(X,y,subset,nfold=10)
#subset=row.names(X)[1:10]
#f3=SubLasso(X,y,subset,nfold=10)
#predy=predict.sublasso(f3,X)
#predy=predict.sublasso(f3,X,type="class")
#predy=predict.sublasso(f3,X,type="link")
#predy=predict.sublasso(f3,X,type="response")
#predy=predict.sublasso(f3,X,type="response",s=0.05)
###Example 2
#data(Colon)
#X<-t(Colon$X)
#y_tmp<-Colon$Y
#y<-ifelse(y_tmp==1,1,0)
#f1=SubLasso(X,y,nfold=10)
#subset=f1$selname
#f2=SubLasso(X,y,subset,nfold=10)
#subset=row.names(X)[30:40]
#f3=SubLasso(X,y,subset,nfold=10)
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
}
|
######################################
###########test Leucht################
######################################
#load old and new functions
#these are the 7 functions in sequentialnma2
#I load them with source because the old one is named sequentialnma as well
rm(list=ls())
source('C:/Users/nikolakopoulou/Desktop/sequentialnma2/R/sequentialnma.R')
source('C:/Users/nikolakopoulou/Desktop/sequentialnma2/R/fordelta.R')
source('C:/Users/nikolakopoulou/Desktop/sequentialnma2/R/alpha.R')
source('C:/Users/nikolakopoulou/Desktop/sequentialnma2/R/formatdata.R')
source('C:/Users/nikolakopoulou/Desktop/sequentialnma2/R/main.R')
source('C:/Users/nikolakopoulou/Desktop/sequentialnma2/R/rci.R')
source('C:/Users/nikolakopoulou/Desktop/sequentialnma2/R/plot.sequentialnma.R')
library(readr)
LeuchtID <- read_delim("C:/Users/nikolakopoulou/Desktop/LeuchtID.csv",
";", escape_double = FALSE, trim_ws = TRUE)
#View(LeuchtID)
leuchtseq1 <- sequentialnma(data=LeuchtID, perarm=FALSE, type="continuous", sm="SMD", tau.preset = 0.2213594,
comb.fixed=F, comb.random=T,
studlab="id",sortvar="year", TE="effect", seTE="se",
t1="treat1", t2="treat2")
install_github("esm-ispm-unibe-ch/sequentialnma")
library(sequentialnma)
y<-livenma(data=LeuchtID, level="study",type="continuous",effsize="SMD",tau.sq=0.049,delta=NA)
#check last step of leucht
y$output[y$output$ComparisonNetw=="HAL vs OLA",]
leuchtseq1$laststep$output["HAL:OLA",]
#this is figure 1 of main manuscript
plot(seqnmaobject=leuchtseq1,comparison="HAL:OLA",evidence="both",small.values=NA)
#results with old and new function
DirectZscore1 = as.vector(y$Prosp[68, 8, 1:(max(y$D$idyear) -1)], mode = "numeric")
NetworkZscore1 = as.vector(y$Prosp[68, 10, 1:(max(y$D$idyear) - 1)], mode = "numeric")
DirectT1 = as.vector(y$Prosp[68, 12, 1:(max(y$D$idyear) - 1)], mode = "numeric")
NetworkT1 = as.vector(y$Prosp[68, 13, 1:(max(y$D$idyear) - 1)], mode = "numeric")
DirectEfficacyB1 = as.vector(y$Prosp[68, 15, 1:(max(y$D$idyear) - 1)], mode = "numeric")
NetworkEfficacyB1 = as.vector(y$Prosp[68, 17, 1:(max(y$D$idyear) - 1)], mode = "numeric")
DirectTE1 = as.vector(y$Prosp[68, 2, 1:(max(y$D$idyear) - 1)], mode = "numeric")
NetworkTE1 = as.vector(y$Prosp[68, 5, 1:(max(y$D$idyear) - 1)], mode = "numeric")
DirectseTE1 = as.vector(y$Prosp[68, 3, 1:(max(y$D$idyear) - 1)], mode = "numeric")
NetworkseTE1 = as.vector(y$Prosp[68, 6, 1:(max(y$D$idyear) - 1)], mode = "numeric")
DirectZscore2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","DirectZscore"])},
1:length(leuchtseq1$result)))
NetworkZscore2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","NetworkZscore"])},
1:length(leuchtseq1$result)))
DirectT2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","DirectTaccum"])},
1:length(leuchtseq1$result)))
NetworkT2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","NetworkTaccum"])},
1:length(leuchtseq1$result)))
DirectEfficacyB2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","DirectBoundary"])},
1:length(leuchtseq1$result)))
NetworkEfficacyB2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","NetworkBoundary"])},
1:length(leuchtseq1$result)))
DirectTE2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","DirectTE"])},
1:length(leuchtseq1$result)))
NetworkTE2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","NetworkTE"])},
1:length(leuchtseq1$result)))
DirectseTE2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","DirectSE"])},
1:length(leuchtseq1$result)))
NetworkseTE2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","NetworkSE"])},
1:length(leuchtseq1$result)))
######################################
###########test Dong################
######################################
#data(Dong)
library(readr)
Dong05 <- read_delim("C:/Users/nikolakopoulou/Desktop/Dong05.csv",
";", escape_double = FALSE, trim_ws = TRUE)
Dongseq <- sequentialnma(data=Dong05, perarm=TRUE, type="binary", sm="OR", tau.preset = 0.1183216,
comb.fixed=F, comb.random=T,
studlab="id",sortvar="year")
install_github("esm-ispm-unibe-ch/sequentialnma")
library(sequentialnma)
Dong05=as.data.frame(Dong05)
x<-livenma(data=Dong05, level="arm",type="binary",effsize="OR",tau.sq=0.014,delta=NA)
#check last step of dong
x$output[x$output$ComparisonNetw=="ICS vs LABA-ICS",]
Dongseq$laststep$output["ICS:LABA-ICS",]
###
#results with old and new function
DirectZscore1 = as.vector(x$Prosp[2, 8, 1:(max(x$D$idyear) -1)], mode = "numeric")
NetworkZscore1 = as.vector(x$Prosp[2, 10, 1:(max(x$D$idyear) - 1)], mode = "numeric")
DirectT1 = as.vector(x$Prosp[2, 12, 1:(max(x$D$idyear) - 1)], mode = "numeric")
NetworkT1 = as.vector(x$Prosp[2, 13, 1:(max(x$D$idyear) - 1)], mode = "numeric")
DirectEfficacyB1 = as.vector(x$Prosp[2, 15, 1:(max(x$D$idyear) - 1)], mode = "numeric")
NetworkEfficacyB1 = as.vector(x$Prosp[2, 17, 1:(max(x$D$idyear) - 1)], mode = "numeric")
DirectTE1 = as.vector(y$Prosp[2, 2, 1:(max(y$D$idyear) - 1)], mode = "numeric")
NetworkTE1 = as.vector(y$Prosp[2, 5, 1:(max(y$D$idyear) - 1)], mode = "numeric")
DirectseTE1 = as.vector(y$Prosp[2, 3, 1:(max(y$D$idyear) - 1)], mode = "numeric")
NetworkseTE1 = as.vector(y$Prosp[2, 6, 1:(max(y$D$idyear) - 1)], mode = "numeric")
DirectZscore2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","DirectZscore"])},
1:length(Dongseq$result)))
NetworkZscore2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","NetworkZscore"])},
1:length(Dongseq$result)))
DirectT2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","DirectTaccum"])},
1:length(Dongseq$result)))
NetworkT2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","NetworkTaccum"])},
1:length(Dongseq$result)))
DirectEfficacyB2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","DirectBoundary"])},
1:length(Dongseq$result)))
NetworkEfficacyB2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","NetworkBoundary"])},
1:length(Dongseq$result)))
DirectTE2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","DirectTE"])},
1:length(Dongseq$result)))
NetworkTE2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","NetworkTE"])},
1:length(Dongseq$result)))
DirectseTE2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","DirectSE"])},
1:length(Dongseq$result)))
NetworkseTE2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","NetworkSE"])},
1:length(Dongseq$result)))
#sequential framework and repeated forest plot with both, compare with appendix figures
plot(seqnmaobject=Dongseq,comparison="ICS:LABA-ICS",evidence="both",small.values=NA)
repeatedCI(seqnmaobject=Dongseq,comparison="ICS:LABA-ICS",evidence="both.separate",small.values=NA)
###############
data(Dong)
| /test/testreal.R | no_license | tosku/sequentialnma2 | R | false | false | 7,706 | r | ######################################
###########test Leucht################
######################################
#load old and new functions
#these are the 7 functions in sequentialnma2
#I load them with source because the old one is named sequentialnma as well
rm(list=ls())
source('C:/Users/nikolakopoulou/Desktop/sequentialnma2/R/sequentialnma.R')
source('C:/Users/nikolakopoulou/Desktop/sequentialnma2/R/fordelta.R')
source('C:/Users/nikolakopoulou/Desktop/sequentialnma2/R/alpha.R')
source('C:/Users/nikolakopoulou/Desktop/sequentialnma2/R/formatdata.R')
source('C:/Users/nikolakopoulou/Desktop/sequentialnma2/R/main.R')
source('C:/Users/nikolakopoulou/Desktop/sequentialnma2/R/rci.R')
source('C:/Users/nikolakopoulou/Desktop/sequentialnma2/R/plot.sequentialnma.R')
library(readr)
LeuchtID <- read_delim("C:/Users/nikolakopoulou/Desktop/LeuchtID.csv",
";", escape_double = FALSE, trim_ws = TRUE)
#View(LeuchtID)
leuchtseq1 <- sequentialnma(data=LeuchtID, perarm=FALSE, type="continuous", sm="SMD", tau.preset = 0.2213594,
comb.fixed=F, comb.random=T,
studlab="id",sortvar="year", TE="effect", seTE="se",
t1="treat1", t2="treat2")
install_github("esm-ispm-unibe-ch/sequentialnma")
library(sequentialnma)
y<-livenma(data=LeuchtID, level="study",type="continuous",effsize="SMD",tau.sq=0.049,delta=NA)
#check last step of leucht
y$output[y$output$ComparisonNetw=="HAL vs OLA",]
leuchtseq1$laststep$output["HAL:OLA",]
#this is figure 1 of main manuscript
plot(seqnmaobject=leuchtseq1,comparison="HAL:OLA",evidence="both",small.values=NA)
#results with old and new function
DirectZscore1 = as.vector(y$Prosp[68, 8, 1:(max(y$D$idyear) -1)], mode = "numeric")
NetworkZscore1 = as.vector(y$Prosp[68, 10, 1:(max(y$D$idyear) - 1)], mode = "numeric")
DirectT1 = as.vector(y$Prosp[68, 12, 1:(max(y$D$idyear) - 1)], mode = "numeric")
NetworkT1 = as.vector(y$Prosp[68, 13, 1:(max(y$D$idyear) - 1)], mode = "numeric")
DirectEfficacyB1 = as.vector(y$Prosp[68, 15, 1:(max(y$D$idyear) - 1)], mode = "numeric")
NetworkEfficacyB1 = as.vector(y$Prosp[68, 17, 1:(max(y$D$idyear) - 1)], mode = "numeric")
DirectTE1 = as.vector(y$Prosp[68, 2, 1:(max(y$D$idyear) - 1)], mode = "numeric")
NetworkTE1 = as.vector(y$Prosp[68, 5, 1:(max(y$D$idyear) - 1)], mode = "numeric")
DirectseTE1 = as.vector(y$Prosp[68, 3, 1:(max(y$D$idyear) - 1)], mode = "numeric")
NetworkseTE1 = as.vector(y$Prosp[68, 6, 1:(max(y$D$idyear) - 1)], mode = "numeric")
DirectZscore2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","DirectZscore"])},
1:length(leuchtseq1$result)))
NetworkZscore2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","NetworkZscore"])},
1:length(leuchtseq1$result)))
DirectT2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","DirectTaccum"])},
1:length(leuchtseq1$result)))
NetworkT2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","NetworkTaccum"])},
1:length(leuchtseq1$result)))
DirectEfficacyB2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","DirectBoundary"])},
1:length(leuchtseq1$result)))
NetworkEfficacyB2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","NetworkBoundary"])},
1:length(leuchtseq1$result)))
DirectTE2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","DirectTE"])},
1:length(leuchtseq1$result)))
NetworkTE2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","NetworkTE"])},
1:length(leuchtseq1$result)))
DirectseTE2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","DirectSE"])},
1:length(leuchtseq1$result)))
NetworkseTE2 = unlist(mapply(function(i){(leuchtseq1$result[[i]]$output["HAL:OLA","NetworkSE"])},
1:length(leuchtseq1$result)))
######################################
###########test Dong################
######################################
#data(Dong)
library(readr)
Dong05 <- read_delim("C:/Users/nikolakopoulou/Desktop/Dong05.csv",
";", escape_double = FALSE, trim_ws = TRUE)
Dongseq <- sequentialnma(data=Dong05, perarm=TRUE, type="binary", sm="OR", tau.preset = 0.1183216,
comb.fixed=F, comb.random=T,
studlab="id",sortvar="year")
install_github("esm-ispm-unibe-ch/sequentialnma")
library(sequentialnma)
Dong05=as.data.frame(Dong05)
x<-livenma(data=Dong05, level="arm",type="binary",effsize="OR",tau.sq=0.014,delta=NA)
#check last step of dong
x$output[x$output$ComparisonNetw=="ICS vs LABA-ICS",]
Dongseq$laststep$output["ICS:LABA-ICS",]
###
#results with old and new function
DirectZscore1 = as.vector(x$Prosp[2, 8, 1:(max(x$D$idyear) -1)], mode = "numeric")
NetworkZscore1 = as.vector(x$Prosp[2, 10, 1:(max(x$D$idyear) - 1)], mode = "numeric")
DirectT1 = as.vector(x$Prosp[2, 12, 1:(max(x$D$idyear) - 1)], mode = "numeric")
NetworkT1 = as.vector(x$Prosp[2, 13, 1:(max(x$D$idyear) - 1)], mode = "numeric")
DirectEfficacyB1 = as.vector(x$Prosp[2, 15, 1:(max(x$D$idyear) - 1)], mode = "numeric")
NetworkEfficacyB1 = as.vector(x$Prosp[2, 17, 1:(max(x$D$idyear) - 1)], mode = "numeric")
DirectTE1 = as.vector(y$Prosp[2, 2, 1:(max(y$D$idyear) - 1)], mode = "numeric")
NetworkTE1 = as.vector(y$Prosp[2, 5, 1:(max(y$D$idyear) - 1)], mode = "numeric")
DirectseTE1 = as.vector(y$Prosp[2, 3, 1:(max(y$D$idyear) - 1)], mode = "numeric")
NetworkseTE1 = as.vector(y$Prosp[2, 6, 1:(max(y$D$idyear) - 1)], mode = "numeric")
DirectZscore2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","DirectZscore"])},
1:length(Dongseq$result)))
NetworkZscore2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","NetworkZscore"])},
1:length(Dongseq$result)))
DirectT2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","DirectTaccum"])},
1:length(Dongseq$result)))
NetworkT2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","NetworkTaccum"])},
1:length(Dongseq$result)))
DirectEfficacyB2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","DirectBoundary"])},
1:length(Dongseq$result)))
NetworkEfficacyB2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","NetworkBoundary"])},
1:length(Dongseq$result)))
DirectTE2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","DirectTE"])},
1:length(Dongseq$result)))
NetworkTE2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","NetworkTE"])},
1:length(Dongseq$result)))
DirectseTE2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","DirectSE"])},
1:length(Dongseq$result)))
NetworkseTE2 = unlist(mapply(function(i){(Dongseq$result[[i]]$output["ICS:LABA-ICS","NetworkSE"])},
1:length(Dongseq$result)))
#sequential framework and repeated forest plot with both, compare with appendix figures
plot(seqnmaobject=Dongseq,comparison="ICS:LABA-ICS",evidence="both",small.values=NA)
repeatedCI(seqnmaobject=Dongseq,comparison="ICS:LABA-ICS",evidence="both.separate",small.values=NA)
###############
data(Dong)
|
extract_ss_from_gr = function(tx){
## tx is granges object with blocks
myblocks = blocks(tx)
nexons = lengths(myblocks)
idx = cumsum(nexons)
allstarts = unlist(start(myblocks))
allends = unlist(end(myblocks))
myseqnames = rep(as.character(seqnames(tx)), nexons-1)
qei = GRanges( myseqnames, IRanges(allends[-idx], width=1) )
qie = GRanges( myseqnames, IRanges(allstarts[-c(1,(idx[-length(idx)]+1))],width=1) )
qei$qname = qie$qname = rep(tx$name, nexons-1)
list( qei=qei, qie=qie )
}
get_cluster_optimised = function(x){
ssSites = extract_ss_from_gr(x)
nintrons = lengths(blocks(x))-1
names(nintrons) = x$name
ovlpsEI = as.matrix(findOverlaps(ssSites$qei, rei,type="within",ignore.strand=myignoreStrand))
ovlpsIE = as.matrix(findOverlaps(ssSites$qie, rie,type="within",ignore.strand=myignoreStrand))
stopifnot(all(ssSites$qei$qname == ssSites$qie$qname))
ovlpsEIans = list(qname = ssSites$qei$qname[ovlpsEI[,1]], refidx = ovlpsEI[,2])
ovlpsEIansSplt = split(ovlpsEIans$refidx, factor(ovlpsEIans$qname, unique(ssSites$qei$qname)))
ovlpsIEans = list(qname = ssSites$qie$qname[ovlpsIE[,1]], refidx = ovlpsIE[,2])
ovlpsIEansSplt = split(ovlpsIEans$refidx, factor(ovlpsIEans$qname, unique(ssSites$qie$qname)))
isClustered = which((lengths(ovlpsEIansSplt) == nintrons[names(ovlpsEIansSplt)]) &
(lengths(ovlpsIEansSplt) == nintrons[names(ovlpsIEansSplt)]))
ans = rep("unclustered", length(x))
names(ans) = x$name
if(length(isClustered)){
## intron interval FIXME try to use the block class?
prenames = paste(
rei$name[ unlist(ovlpsEIansSplt[isClustered]) ],
rie$name[ unlist(ovlpsIEansSplt[isClustered]) ],
sep="-")
myqnames = rep(names(ovlpsEIansSplt[isClustered]),lengths(ovlpsEIansSplt[isClustered]))
myclnames= tapply(prenames, factor(myqnames,unique(myqnames)),
paste, collapse=";")
ans[names(myclnames)] = myclnames
}
names(ans)=NULL
ans
}
library(tidyverse)
library(rtracklayer)
myignoreStrand = TRUE
ncpu=30
args = commandArgs(trailingOnly = TRUE)
#
# ### two inputs both as bed files, ei and ie splice sites
infile = args[1] ## bed file
eiFile = args[2] ## ref ei file
ieFile = args[3] ## ref ie file
rei = import(eiFile)
rie = import(ieFile)
rei$name = paste(seqnames(rei), rei$name,sep="_")
rie$name = paste(seqnames(rie), rie$name,sep="_")
fileBasename= file.path(dirname(infile), strsplit(basename(infile),"\\.")[[1]][1])
outfile = paste0(fileBasename,".clustered.rda")
outfile_clustered = paste0(fileBasename,".clustered.txt.gz")
outfile_singleton = paste0(fileBasename,".singleton.txt.gz")
outfile_unclustered = paste0(fileBasename,".unclustered.txt.gz")
#if(!file.exists(outfile)) {
message("Reading ",basename(fileBasename),"......\n")
myreads = import(infile)
if(anyDuplicated(myreads$name)){
stop("Reads cannot map to multiple loci. This confuses the clustering")
}
myreads$rcluster="unclustered"
###check singleton
isSingleton = which(elementNROWS(myreads$blocks)==1)
myreads$rcluster[isSingleton] = "singleton"
testIdx = which(elementNROWS(myreads$blocks)>1)
message("Clustering ",basename(fileBasename),"......\n")
myreads$rcluster[testIdx] = unlist(mclapply(split(testIdx, cut(seq_len(length(testIdx)), ncpu)), function(i) {
x = myreads[i]
get_cluster_optimised(x)
}, mc.cores=ncpu))
save(myreads,file=outfile)
## export clustered reads as a table
readToClusterMapping = tibble(qname=myreads$name, clname=myreads$rcluster,
chr=as.character(seqnames(myreads)),
rstart=as.integer(start(myreads)), rend= as.integer(end(myreads)),
nexon=lengths(blocks(myreads)) )
isUnclusterd = readToClusterMapping$clname == "unclustered"
isSingleton = readToClusterMapping$clname == "singleton"
write_tsv(readToClusterMapping[(!isUnclusterd) & (!isSingleton), ], outfile_clustered)
write_tsv(readToClusterMapping[isUnclusterd, ], outfile_unclustered)
write_tsv(readToClusterMapping[isSingleton, ], outfile_singleton)
#}
# get_cluster = function(thisReads){
# unlist(mclapply(1:length(thisReads), function(i){
# x = thisReads[i]
# mystarts = start(x$blocks[[1]])
# myends = end(x$blocks[[1]])
# qei = GRanges(seqnames(x),IRanges(start(x) + myends[-length(myends)] , width=1))
# qie = GRanges(seqnames(x),IRanges(start(x) + mystarts[-1]-1, width=1))
#
# ovlpsEI = as.list(findOverlaps(qei, rei,type="within",ignore.strand=myignoreStrand))
# ovlpsIE = as.list(findOverlaps(qie, rie,type="within",ignore.strand=myignoreStrand))
# isCluster = all(elementNROWS(ovlpsEI) == 1) & all(elementNROWS(ovlpsIE) == 1)
# if(isCluster){
# ## intron interval FIXME try to use the block class?
# paste(paste(rei$name[unlist(ovlpsEI)],rie$name[unlist(ovlpsIE)],sep="-"),collapse=";")
# } else {
# "unclustered"
# }
# },mc.cores=ncpu))
# }
| /sw/clustering_walking.R | no_license | czhu/FulQuant | R | false | false | 5,024 | r | extract_ss_from_gr = function(tx){
## tx is granges object with blocks
myblocks = blocks(tx)
nexons = lengths(myblocks)
idx = cumsum(nexons)
allstarts = unlist(start(myblocks))
allends = unlist(end(myblocks))
myseqnames = rep(as.character(seqnames(tx)), nexons-1)
qei = GRanges( myseqnames, IRanges(allends[-idx], width=1) )
qie = GRanges( myseqnames, IRanges(allstarts[-c(1,(idx[-length(idx)]+1))],width=1) )
qei$qname = qie$qname = rep(tx$name, nexons-1)
list( qei=qei, qie=qie )
}
get_cluster_optimised = function(x){
ssSites = extract_ss_from_gr(x)
nintrons = lengths(blocks(x))-1
names(nintrons) = x$name
ovlpsEI = as.matrix(findOverlaps(ssSites$qei, rei,type="within",ignore.strand=myignoreStrand))
ovlpsIE = as.matrix(findOverlaps(ssSites$qie, rie,type="within",ignore.strand=myignoreStrand))
stopifnot(all(ssSites$qei$qname == ssSites$qie$qname))
ovlpsEIans = list(qname = ssSites$qei$qname[ovlpsEI[,1]], refidx = ovlpsEI[,2])
ovlpsEIansSplt = split(ovlpsEIans$refidx, factor(ovlpsEIans$qname, unique(ssSites$qei$qname)))
ovlpsIEans = list(qname = ssSites$qie$qname[ovlpsIE[,1]], refidx = ovlpsIE[,2])
ovlpsIEansSplt = split(ovlpsIEans$refidx, factor(ovlpsIEans$qname, unique(ssSites$qie$qname)))
isClustered = which((lengths(ovlpsEIansSplt) == nintrons[names(ovlpsEIansSplt)]) &
(lengths(ovlpsIEansSplt) == nintrons[names(ovlpsIEansSplt)]))
ans = rep("unclustered", length(x))
names(ans) = x$name
if(length(isClustered)){
## intron interval FIXME try to use the block class?
prenames = paste(
rei$name[ unlist(ovlpsEIansSplt[isClustered]) ],
rie$name[ unlist(ovlpsIEansSplt[isClustered]) ],
sep="-")
myqnames = rep(names(ovlpsEIansSplt[isClustered]),lengths(ovlpsEIansSplt[isClustered]))
myclnames= tapply(prenames, factor(myqnames,unique(myqnames)),
paste, collapse=";")
ans[names(myclnames)] = myclnames
}
names(ans)=NULL
ans
}
library(tidyverse)
library(rtracklayer)
myignoreStrand = TRUE
ncpu=30
args = commandArgs(trailingOnly = TRUE)
#
# ### two inputs both as bed files, ei and ie splice sites
infile = args[1] ## bed file
eiFile = args[2] ## ref ei file
ieFile = args[3] ## ref ie file
rei = import(eiFile)
rie = import(ieFile)
rei$name = paste(seqnames(rei), rei$name,sep="_")
rie$name = paste(seqnames(rie), rie$name,sep="_")
fileBasename= file.path(dirname(infile), strsplit(basename(infile),"\\.")[[1]][1])
outfile = paste0(fileBasename,".clustered.rda")
outfile_clustered = paste0(fileBasename,".clustered.txt.gz")
outfile_singleton = paste0(fileBasename,".singleton.txt.gz")
outfile_unclustered = paste0(fileBasename,".unclustered.txt.gz")
#if(!file.exists(outfile)) {
message("Reading ",basename(fileBasename),"......\n")
myreads = import(infile)
if(anyDuplicated(myreads$name)){
stop("Reads cannot map to multiple loci. This confuses the clustering")
}
myreads$rcluster="unclustered"
###check singleton
isSingleton = which(elementNROWS(myreads$blocks)==1)
myreads$rcluster[isSingleton] = "singleton"
testIdx = which(elementNROWS(myreads$blocks)>1)
message("Clustering ",basename(fileBasename),"......\n")
myreads$rcluster[testIdx] = unlist(mclapply(split(testIdx, cut(seq_len(length(testIdx)), ncpu)), function(i) {
x = myreads[i]
get_cluster_optimised(x)
}, mc.cores=ncpu))
save(myreads,file=outfile)
## export clustered reads as a table
readToClusterMapping = tibble(qname=myreads$name, clname=myreads$rcluster,
chr=as.character(seqnames(myreads)),
rstart=as.integer(start(myreads)), rend= as.integer(end(myreads)),
nexon=lengths(blocks(myreads)) )
isUnclusterd = readToClusterMapping$clname == "unclustered"
isSingleton = readToClusterMapping$clname == "singleton"
write_tsv(readToClusterMapping[(!isUnclusterd) & (!isSingleton), ], outfile_clustered)
write_tsv(readToClusterMapping[isUnclusterd, ], outfile_unclustered)
write_tsv(readToClusterMapping[isSingleton, ], outfile_singleton)
#}
# get_cluster = function(thisReads){
# unlist(mclapply(1:length(thisReads), function(i){
# x = thisReads[i]
# mystarts = start(x$blocks[[1]])
# myends = end(x$blocks[[1]])
# qei = GRanges(seqnames(x),IRanges(start(x) + myends[-length(myends)] , width=1))
# qie = GRanges(seqnames(x),IRanges(start(x) + mystarts[-1]-1, width=1))
#
# ovlpsEI = as.list(findOverlaps(qei, rei,type="within",ignore.strand=myignoreStrand))
# ovlpsIE = as.list(findOverlaps(qie, rie,type="within",ignore.strand=myignoreStrand))
# isCluster = all(elementNROWS(ovlpsEI) == 1) & all(elementNROWS(ovlpsIE) == 1)
# if(isCluster){
# ## intron interval FIXME try to use the block class?
# paste(paste(rei$name[unlist(ovlpsEI)],rie$name[unlist(ovlpsIE)],sep="-"),collapse=";")
# } else {
# "unclustered"
# }
# },mc.cores=ncpu))
# }
|
# Initialize libraries.
library(caret); library(randomForest)
# Getting the data
# Some of the data fields contain "#DIV/0!" or are blank (""). These fields will be filled with "NA".
train.path <- "https://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv"
test.path <- "https://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv"
train.set <- read.csv(train.path, head=TRUE, sep=',', na.strings = c("NA", "#DIV/0!", ""))
test.set <- read.csv(test.path, head=TRUE, sep=',', na.strings = c("NA", "#DIV/0!", ""))
dim(train.set); dim(test.set)
head(train.set, 3); head(test.set, 3)
# We see that some variables are not relevant for the analysis we are asked to do. These - non-relevant - variables are X, user_name, raw_timestamp_part_1, raw_timestamp_part_2, cvtd_timestamp, new_window, and num_window. These are the first 7 columns.
train.set <- train.set[ , -c(1:7)]
test.set <- test.set[ , -c(1:7)]
dim(train.set); dim(test.set)
# We remove columns that contain NA.
train.set <- train.set[,colSums(is.na(train.set)) == 0]
test.set <- test.set[,colSums(is.na(test.set)) == 0]
# Just to ensure there is no NA left in the data, we sum if there are any,
sum(is.na(train.set))
sum(is.na(test.set))
# We check dimension/columns of the remaining data.
dim(train.set); dim(test.set)
# Cross-validation
# The training set is split in to 2 subsets in order to conduct cross-validation. The sub.training set will get 75% of the data, while the sub.test set will get the remaining 25% of the data. Assignment will be done using random sampling without replacement.
sub.set <- createDataPartition(y = train.set$classe, p=0.75, list=FALSE)
sub.train.set <- train.set[ sub.set, ]
sub.test.set <- train.set[ -sub.set, ]
dim(sub.train.set); dim(sub.test.set)
head(sub.train.set, 3); head(sub.train.set, 3)
# Let's review the variable "classe". The three tables show how the levels are distributed.
table(train.set$classe)
table(sub.train.set$classe)
table(sub.test.set$classe)
# Prediction Model
# Random Forest - Random Forest package
# Model build
model.rfp <- randomForest(classe ~. , data = sub.train.set, importance = TRUE)
# Prediction
pred.rfp <- predict(model.rfp, sub.test.set)
# Test results on subTesting data set:
confusionMatrix(pred.rfp, sub.test.set$classe)$table
confusionMatrix(pred.rfp, sub.test.set$classe)$overall[1]
# Random Forest - Caret package
# Model build
model.rf <- train(classe ~. , data = sub.train.set, method = 'rf')
# Prediction
pred.rf <- predict(model.rf, sub.test.set)
# Test results on subTesting data set:
confusionMatrix(pred.rf, sub.test.set$classe)$table
confusionMatrix(pred.rf, sub.test.set$classe)$overall[1]
# Boosting with Trees - Caret package
# Model build
model.gbm <- train(classe ~. , data = sub.train.set, method = 'gbm')
# Prediction
pred.gbm <- predict(model.gbm, sub.test.set)
# Test results on subTesting data set:
confusionMatrix(pred.gbm, sub.test.set$classe)$table
confusionMatrix(pred.gbm, sub.test.set$classe)$overall[1]
# Linear Discriminant Analysis - Caret package
# Model build
model.lda <- train(classe ~. , data = sub.train.set, method = 'lda')
# Prediction
pred.lda <- predict(model.lda, sub.test.set)
# Test results on subTesting data set:
confusionMatrix(pred.lda, sub.test.set$classe)$table
confusionMatrix(pred.lda, sub.test.set$classe)$overall[1]
# Linear Discriminant Analysis - Caret package
# Combine the predicted data from the various models.
data.comb <- data.frame(pred.rfp, pred.rf, pred.gbm, pred.lda, classe = sub.test.set$classe)
# Model build.
model.comb <- train(classe ~ ., data = data.comb, method="rf")
# Prediction.
pred.comb <- predict(model.comb, sub.test.set)
# Test results on subTesting data set:
confusionMatrix(pred.comb, sub.test.set$classe)$table
confusionMatrix(pred.comb, sub.test.set$classe)$overall[1]
| /8.Practical Machine Learning/End Assignment - v1.R | no_license | pieterov/DataScienceSpecialisation | R | false | false | 3,832 | r |
# Initialize libraries.
library(caret); library(randomForest)
# Getting the data
# Some of the data fields contain "#DIV/0!" or are blank (""). These fields will be filled with "NA".
train.path <- "https://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv"
test.path <- "https://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv"
train.set <- read.csv(train.path, head=TRUE, sep=',', na.strings = c("NA", "#DIV/0!", ""))
test.set <- read.csv(test.path, head=TRUE, sep=',', na.strings = c("NA", "#DIV/0!", ""))
dim(train.set); dim(test.set)
head(train.set, 3); head(test.set, 3)
# We see that some variables are not relevant for the analysis we are asked to do. These - non-relevant - variables are X, user_name, raw_timestamp_part_1, raw_timestamp_part_2, cvtd_timestamp, new_window, and num_window. These are the first 7 columns.
train.set <- train.set[ , -c(1:7)]
test.set <- test.set[ , -c(1:7)]
dim(train.set); dim(test.set)
# We remove columns that contain NA.
train.set <- train.set[,colSums(is.na(train.set)) == 0]
test.set <- test.set[,colSums(is.na(test.set)) == 0]
# Just to ensure there is no NA left in the data, we sum if there are any,
sum(is.na(train.set))
sum(is.na(test.set))
# We check dimension/columns of the remaining data.
dim(train.set); dim(test.set)
# Cross-validation
# The training set is split in to 2 subsets in order to conduct cross-validation. The sub.training set will get 75% of the data, while the sub.test set will get the remaining 25% of the data. Assignment will be done using random sampling without replacement.
sub.set <- createDataPartition(y = train.set$classe, p=0.75, list=FALSE)
sub.train.set <- train.set[ sub.set, ]
sub.test.set <- train.set[ -sub.set, ]
dim(sub.train.set); dim(sub.test.set)
head(sub.train.set, 3); head(sub.train.set, 3)
# Let's review the variable "classe". The three tables show how the levels are distributed.
table(train.set$classe)
table(sub.train.set$classe)
table(sub.test.set$classe)
# Prediction Model
# Random Forest - Random Forest package
# Model build
model.rfp <- randomForest(classe ~. , data = sub.train.set, importance = TRUE)
# Prediction
pred.rfp <- predict(model.rfp, sub.test.set)
# Test results on subTesting data set:
confusionMatrix(pred.rfp, sub.test.set$classe)$table
confusionMatrix(pred.rfp, sub.test.set$classe)$overall[1]
# Random Forest - Caret package
# Model build
model.rf <- train(classe ~. , data = sub.train.set, method = 'rf')
# Prediction
pred.rf <- predict(model.rf, sub.test.set)
# Test results on subTesting data set:
confusionMatrix(pred.rf, sub.test.set$classe)$table
confusionMatrix(pred.rf, sub.test.set$classe)$overall[1]
# Boosting with Trees - Caret package
# Model build
model.gbm <- train(classe ~. , data = sub.train.set, method = 'gbm')
# Prediction
pred.gbm <- predict(model.gbm, sub.test.set)
# Test results on subTesting data set:
confusionMatrix(pred.gbm, sub.test.set$classe)$table
confusionMatrix(pred.gbm, sub.test.set$classe)$overall[1]
# Linear Discriminant Analysis - Caret package
# Model build
model.lda <- train(classe ~. , data = sub.train.set, method = 'lda')
# Prediction
pred.lda <- predict(model.lda, sub.test.set)
# Test results on subTesting data set:
confusionMatrix(pred.lda, sub.test.set$classe)$table
confusionMatrix(pred.lda, sub.test.set$classe)$overall[1]
# Linear Discriminant Analysis - Caret package
# Combine the predicted data from the various models.
data.comb <- data.frame(pred.rfp, pred.rf, pred.gbm, pred.lda, classe = sub.test.set$classe)
# Model build.
model.comb <- train(classe ~ ., data = data.comb, method="rf")
# Prediction.
pred.comb <- predict(model.comb, sub.test.set)
# Test results on subTesting data set:
confusionMatrix(pred.comb, sub.test.set$classe)$table
confusionMatrix(pred.comb, sub.test.set$classe)$overall[1]
|
install.packages("XLConnect") #installs package to read Excel spreadsheets
install.packages("tidyverse") #installs very powerful visualization package
library(readxl)
library(tidyverse)
age_labor_data <- read_excel("C:/Users/mazhi/Documents/R/cpsaat03.xlsx",
sheet = "ByAge") #puts data in Global Environment#
View(age_labor_data)
# summary statistics #
summary(age_labor_data)
# rename variables #
NLF <-(age_labor_data$`Civilian labor force Not in Labor force`)
NIP <-(age_labor_data$`Civilian NI pop`)
CLF<-(age_labor_data$`Civilian labor force Total`)
# create percentages #
NLFPer<-NLF/NIP
NLFPer<-NLFPer*100
NLFPer<-NLFPer
CLFPer <-CLF/NIP
CLFPer<-CLFPer*100
# Bar Chart of Civilian Labor Force Total with labels by Age #
barplot(age_labor_data$`Civilian labor force Total`, names.arg = age_labor_data$Age, xlab="Age")
# Scatter Plot of Age and Civilian Labor Force by Percent of Population #
ggplot(data=age_labor_data)+geom_point(mapping = aes(x=Age, y=CLFPer), size=3)
ggplot(data=age_labor_data)+geom_point(mapping = aes(x=Age, y=CLFPer, size=3), color="green")
ggplot(data=age_labor_data)+geom_point(mapping = aes(x=Age, y=`Civilian labor force, percent of population`))
# Scatter Plot of Civilian Labor Force (Percent), Age, and Civilian Labor Force (Employed) #
CLFPercent <- age_labor_data$`Civilian labor force, percent of population`
CLFEmployed <- age_labor_data$`Civilian labor force employed total`
ggplot(data=age_labor_data)+geom_point(mapping = aes(x=Age, y=CLFPercent, size=CLFEmployed))
# Scatter Plot of Percent of Population Not in Labor Force by Age
# with different color dots for Ages with labor force non-participation over 50%
age_labor_data_color <- cut(NLFPer, breaks = c(-Inf,49.99,Inf), labels = c("black","red"))
ggplot(data = age_labor_data)+geom_point(mapping = aes(x=Age, y=NLFPer), color=age_labor_data_color)
attach(age_labor_data)
names(age_labor_data)
class(Age)
cor(`Civilian labor force employed total`, `Civilian labor force Total`)
plot(`Civilian labor force employed total`,`Civilian labor force Total')
linear_model<-lm('Civilian labor force employed total'~'Civilian labor force Total')
summary(linear_model)
| /DataExplore_lm.R | no_license | zhijunm/Applied_Regression | R | false | false | 2,206 | r | install.packages("XLConnect") #installs package to read Excel spreadsheets
install.packages("tidyverse") #installs very powerful visualization package
library(readxl)
library(tidyverse)
age_labor_data <- read_excel("C:/Users/mazhi/Documents/R/cpsaat03.xlsx",
sheet = "ByAge") #puts data in Global Environment#
View(age_labor_data)
# summary statistics #
summary(age_labor_data)
# rename variables #
NLF <-(age_labor_data$`Civilian labor force Not in Labor force`)
NIP <-(age_labor_data$`Civilian NI pop`)
CLF<-(age_labor_data$`Civilian labor force Total`)
# create percentages #
NLFPer<-NLF/NIP
NLFPer<-NLFPer*100
NLFPer<-NLFPer
CLFPer <-CLF/NIP
CLFPer<-CLFPer*100
# Bar Chart of Civilian Labor Force Total with labels by Age #
barplot(age_labor_data$`Civilian labor force Total`, names.arg = age_labor_data$Age, xlab="Age")
# Scatter Plot of Age and Civilian Labor Force by Percent of Population #
ggplot(data=age_labor_data)+geom_point(mapping = aes(x=Age, y=CLFPer), size=3)
ggplot(data=age_labor_data)+geom_point(mapping = aes(x=Age, y=CLFPer, size=3), color="green")
ggplot(data=age_labor_data)+geom_point(mapping = aes(x=Age, y=`Civilian labor force, percent of population`))
# Scatter Plot of Civilian Labor Force (Percent), Age, and Civilian Labor Force (Employed) #
CLFPercent <- age_labor_data$`Civilian labor force, percent of population`
CLFEmployed <- age_labor_data$`Civilian labor force employed total`
ggplot(data=age_labor_data)+geom_point(mapping = aes(x=Age, y=CLFPercent, size=CLFEmployed))
# Scatter Plot of Percent of Population Not in Labor Force by Age
# with different color dots for Ages with labor force non-participation over 50%
age_labor_data_color <- cut(NLFPer, breaks = c(-Inf,49.99,Inf), labels = c("black","red"))
ggplot(data = age_labor_data)+geom_point(mapping = aes(x=Age, y=NLFPer), color=age_labor_data_color)
attach(age_labor_data)
names(age_labor_data)
class(Age)
cor(`Civilian labor force employed total`, `Civilian labor force Total`)
plot(`Civilian labor force employed total`,`Civilian labor force Total')
linear_model<-lm('Civilian labor force employed total'~'Civilian labor force Total')
summary(linear_model)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/4-model-selection.R
\name{stages_kmeans}
\alias{stages_kmeans}
\title{Learn a staged tree with k-means clustering}
\usage{
stages_kmeans(
object,
k = length(object$tree[[1]]),
algorithm = "Hartigan-Wong",
transform = sqrt,
ignore = object$name_unobserved,
limit = length(object$tree),
scope = NULL,
nstart = 1
)
}
\arguments{
\item{object}{an object of class \code{sevt} with fitted probabilities and
data, as returned by \code{full} or \code{sevt_fit}.}
\item{k}{integer or (named) vector: number of clusters, that is stages per variable.
Values will be recycled if needed.}
\item{algorithm}{character: as in \code{\link{kmeans}}.}
\item{transform}{function applied to the probabilities before clustering.}
\item{ignore}{vector of stages which will be ignored and left untouched,
by default the name of the unobserved stages stored in
\code{object$name_unobserved}.}
\item{limit}{the maximum number of variables to consider.}
\item{scope}{names of the variables to consider.}
\item{nstart}{as in \code{\link{kmeans}}}
}
\value{
A staged event tree.
}
\description{
Build a stage event tree with \code{k} stages for each variable
by clustering (transformed) probabilities with k-means.
}
\details{
\code{kmenas_sevt} performs k-means clustering
to aggregate the stage probabilities of the initial
staged tree \code{object}.
Different values for k can be specified by supplying a
(named) vector to \code{k}.
\code{\link{kmeans}} from the \code{stats} package is used
internally and arguments \code{algorithm} and \code{nstart}
refer to the same arguments as \code{\link{kmeans}}.
}
\examples{
data("Titanic")
model <- stages_kmeans(full(Titanic, join_unobserved = TRUE, lambda = 1), k = 2)
summary(model)
}
| /man/stages_kmeans.Rd | permissive | FedericoCarli/stagedtrees | R | false | true | 1,807 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/4-model-selection.R
\name{stages_kmeans}
\alias{stages_kmeans}
\title{Learn a staged tree with k-means clustering}
\usage{
stages_kmeans(
object,
k = length(object$tree[[1]]),
algorithm = "Hartigan-Wong",
transform = sqrt,
ignore = object$name_unobserved,
limit = length(object$tree),
scope = NULL,
nstart = 1
)
}
\arguments{
\item{object}{an object of class \code{sevt} with fitted probabilities and
data, as returned by \code{full} or \code{sevt_fit}.}
\item{k}{integer or (named) vector: number of clusters, that is stages per variable.
Values will be recycled if needed.}
\item{algorithm}{character: as in \code{\link{kmeans}}.}
\item{transform}{function applied to the probabilities before clustering.}
\item{ignore}{vector of stages which will be ignored and left untouched,
by default the name of the unobserved stages stored in
\code{object$name_unobserved}.}
\item{limit}{the maximum number of variables to consider.}
\item{scope}{names of the variables to consider.}
\item{nstart}{as in \code{\link{kmeans}}}
}
\value{
A staged event tree.
}
\description{
Build a stage event tree with \code{k} stages for each variable
by clustering (transformed) probabilities with k-means.
}
\details{
\code{kmenas_sevt} performs k-means clustering
to aggregate the stage probabilities of the initial
staged tree \code{object}.
Different values for k can be specified by supplying a
(named) vector to \code{k}.
\code{\link{kmeans}} from the \code{stats} package is used
internally and arguments \code{algorithm} and \code{nstart}
refer to the same arguments as \code{\link{kmeans}}.
}
\examples{
data("Titanic")
model <- stages_kmeans(full(Titanic, join_unobserved = TRUE, lambda = 1), k = 2)
summary(model)
}
|
rm(list=ls())
pacman::p_load(INSP, ggplot2, data.table, dplyr, INLA, rgeos, rgdal)
latlong <- gCentroid(mx.sp.df, byid=T)
mesh <- latlong %>% inla.mesh.create
spde <- inla.spde2.matern(mesh)
plot(mesh)
points(latlong@coords[,"x"], latlong@coords[,"y"], pch=20, col="red")
sigma0 <- .3 ## Standard deviation
range0 <- 0.2 ## Spatial range
## Convert into tau and kappa:
kappa0 <- sqrt(8)/range0
tau0 <- 1/(sqrt(4*pi)*kappa0*sigma0)
spde <- inla.spde2.matern(mesh)
Q <- inla.spde2.precision(spde, theta=c(log(tau0), log(kappa0)))
spde2 <- inla.spde2.matern(mesh, B.tau=cbind(log(tau0),1,0),
B.kappa=cbind(log(kappa0),0,1),
theta.prior.mean=c(0,0), theta.prior.prec=c(0.1,1))
Q2 <- inla.spde2.precision(spde, theta=c(0, 0))
x <- as.vector(inla.qsample(n=1, Q))
proj <- inla.mesh.projector(mesh, xlim=c(-117, -86), ylim=c(14, 33),
dims=c(800,800))
DFproj <- data.table(x=rep(proj$y, times=length(proj$x)),
y=rep(proj$x, each=length(proj$y)),
z=c(inla.mesh.project(proj, field=x[1:mesh$n])))
DFproj <- subset(DFproj, !is.na(z))
ggplot(DFproj, aes(x, y, z=z)) + geom_tile(aes(fill = z)) + theme_bw() +
scale_fill_gradientn(colours=topo.colors(10))
| /simulation/sim_spde.R | no_license | wangdafacai/MXU5MR | R | false | false | 1,285 | r | rm(list=ls())
pacman::p_load(INSP, ggplot2, data.table, dplyr, INLA, rgeos, rgdal)
latlong <- gCentroid(mx.sp.df, byid=T)
mesh <- latlong %>% inla.mesh.create
spde <- inla.spde2.matern(mesh)
plot(mesh)
points(latlong@coords[,"x"], latlong@coords[,"y"], pch=20, col="red")
sigma0 <- .3 ## Standard deviation
range0 <- 0.2 ## Spatial range
## Convert into tau and kappa:
kappa0 <- sqrt(8)/range0
tau0 <- 1/(sqrt(4*pi)*kappa0*sigma0)
spde <- inla.spde2.matern(mesh)
Q <- inla.spde2.precision(spde, theta=c(log(tau0), log(kappa0)))
spde2 <- inla.spde2.matern(mesh, B.tau=cbind(log(tau0),1,0),
B.kappa=cbind(log(kappa0),0,1),
theta.prior.mean=c(0,0), theta.prior.prec=c(0.1,1))
Q2 <- inla.spde2.precision(spde, theta=c(0, 0))
x <- as.vector(inla.qsample(n=1, Q))
proj <- inla.mesh.projector(mesh, xlim=c(-117, -86), ylim=c(14, 33),
dims=c(800,800))
DFproj <- data.table(x=rep(proj$y, times=length(proj$x)),
y=rep(proj$x, each=length(proj$y)),
z=c(inla.mesh.project(proj, field=x[1:mesh$n])))
DFproj <- subset(DFproj, !is.na(z))
ggplot(DFproj, aes(x, y, z=z)) + geom_tile(aes(fill = z)) + theme_bw() +
scale_fill_gradientn(colours=topo.colors(10))
|
# Set working directory
setwd('/Users/maggiesaavedra/GitHub/gplace_inR/')
# Load require packages
library(googleway)
library(data.table)
library(mongolite)
# Set googple places api keys
# key <- c('AIzaSyD6iU9O03sGdnSARGSlrLcLCncmRn3Ejes') # maggie
# key <- c('AIzaSyCaKsmC0yi880Xbxp6-Tc6fd9Df8u2fGaw') # gay marie
key <- c('AIzaSyBAGtd1QtvJXMLrIRHWCZJPcqWS8R_BGSc')
# Randomize long lat for default long lat entry
# ll <- c(14.620448, 121.053393) # cubao
# ll <- c(14.556595, 121.024139) # makati
# ll <- c(14.576569, 121.052659) # ortigas
# ll <- c(14.577128, 121.033677) # mandaluyong
# ll <- c(14.606519, 120.984254) # manila
# ll <- c(14.536578, 120.991551) # pasay
# ll <- c(14.573249, 121.082198) # pasig
# ll <- c(14.650645, 121.049363) # quezon memorial cirle
# ll <- c(14.602690, 121.033207) # san juan city (restaurant next)
# ll <- c(14.650988, 121.115031) # marikina
# ll <- c(14.552853, 121.051530) # BGC
# ll <- c(14.545514, 121.068274) # pateros
# ll <- c(14.517990, 121.049635) # taguig
ll <- c(14.605458, 121.079994) # eastwood
# Set default radius in meters
rad <- 10000
# Relevant Place Type
p.types <- c('night_club')
# Dir for the radar data
radar_dir <- paste0('../../Moonlight/google-places/g_radar_', 'night_club.csv')
# connect to the db
mnew <- mongo(collection = 'g_place', db = 'googleplaces')
# Dir for the details data
details_dir <- paste0('../../Moonlight/google-places/g_places_', 'night_club.csv')
| /gplace_inR/init.R | no_license | ggsaavedra/google-places-in-R | R | false | false | 1,444 | r | # Set working directory
setwd('/Users/maggiesaavedra/GitHub/gplace_inR/')
# Load require packages
library(googleway)
library(data.table)
library(mongolite)
# Set googple places api keys
# key <- c('AIzaSyD6iU9O03sGdnSARGSlrLcLCncmRn3Ejes') # maggie
# key <- c('AIzaSyCaKsmC0yi880Xbxp6-Tc6fd9Df8u2fGaw') # gay marie
key <- c('AIzaSyBAGtd1QtvJXMLrIRHWCZJPcqWS8R_BGSc')
# Randomize long lat for default long lat entry
# ll <- c(14.620448, 121.053393) # cubao
# ll <- c(14.556595, 121.024139) # makati
# ll <- c(14.576569, 121.052659) # ortigas
# ll <- c(14.577128, 121.033677) # mandaluyong
# ll <- c(14.606519, 120.984254) # manila
# ll <- c(14.536578, 120.991551) # pasay
# ll <- c(14.573249, 121.082198) # pasig
# ll <- c(14.650645, 121.049363) # quezon memorial cirle
# ll <- c(14.602690, 121.033207) # san juan city (restaurant next)
# ll <- c(14.650988, 121.115031) # marikina
# ll <- c(14.552853, 121.051530) # BGC
# ll <- c(14.545514, 121.068274) # pateros
# ll <- c(14.517990, 121.049635) # taguig
ll <- c(14.605458, 121.079994) # eastwood
# Set default radius in meters
rad <- 10000
# Relevant Place Type
p.types <- c('night_club')
# Dir for the radar data
radar_dir <- paste0('../../Moonlight/google-places/g_radar_', 'night_club.csv')
# connect to the db
mnew <- mongo(collection = 'g_place', db = 'googleplaces')
# Dir for the details data
details_dir <- paste0('../../Moonlight/google-places/g_places_', 'night_club.csv')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/servicecatalog_operations.R
\name{servicecatalog_list_resources_for_tag_option}
\alias{servicecatalog_list_resources_for_tag_option}
\title{Lists the resources associated with the specified TagOption}
\usage{
servicecatalog_list_resources_for_tag_option(
TagOptionId,
ResourceType = NULL,
PageSize = NULL,
PageToken = NULL
)
}
\arguments{
\item{TagOptionId}{[required] The TagOption identifier.}
\item{ResourceType}{The resource type.
\itemize{
\item \code{Portfolio}
\item \code{Product}
}}
\item{PageSize}{The maximum number of items to return with this call.}
\item{PageToken}{The page token for the next set of results. To retrieve the first set of
results, use null.}
}
\description{
Lists the resources associated with the specified TagOption.
See \url{https://www.paws-r-sdk.com/docs/servicecatalog_list_resources_for_tag_option/} for full documentation.
}
\keyword{internal}
| /cran/paws.management/man/servicecatalog_list_resources_for_tag_option.Rd | permissive | paws-r/paws | R | false | true | 973 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/servicecatalog_operations.R
\name{servicecatalog_list_resources_for_tag_option}
\alias{servicecatalog_list_resources_for_tag_option}
\title{Lists the resources associated with the specified TagOption}
\usage{
servicecatalog_list_resources_for_tag_option(
TagOptionId,
ResourceType = NULL,
PageSize = NULL,
PageToken = NULL
)
}
\arguments{
\item{TagOptionId}{[required] The TagOption identifier.}
\item{ResourceType}{The resource type.
\itemize{
\item \code{Portfolio}
\item \code{Product}
}}
\item{PageSize}{The maximum number of items to return with this call.}
\item{PageToken}{The page token for the next set of results. To retrieve the first set of
results, use null.}
}
\description{
Lists the resources associated with the specified TagOption.
See \url{https://www.paws-r-sdk.com/docs/servicecatalog_list_resources_for_tag_option/} for full documentation.
}
\keyword{internal}
|
library(tidyverse)
library(reshape)
install.packages("reshape")
library(ggplot2)
housing = read.csv('C:/Users/andal/OneDrive/Documents/Code/GitHub/github.io/data-science/handson-ml/datasets/housing/housing.csv')
head(housing)
ggplot(data = melt(housing), mapping = aes(x = value)) +
geom_histogram(bins = 30) + facet_wrap(~variable, scales = 'free_x')
housing$total_bedrooms[is.na(housing$total_bedrooms)] = median(housing$total_bedrooms , na.rm = TRUE)
housing$mean_bedrooms = housing$total_bedrooms/housing$households
housing$mean_rooms = housing$total_rooms/housing$households
drops = c('total_bedrooms', 'total_rooms')
housing = housing[ , !(names(housing) %in% drops)]
head(housing)
categories = unique(housing$ocean_proximity)
#split the categories off
cat_housing = data.frame(ocean_proximity = housing$ocean_proximity)
for(cat in categories){
cat_housing[,cat] = rep(0, times= nrow(cat_housing))
}
head(cat_housing) #see the new columns on the right
for(i in 1:length(cat_housing$ocean_proximity)){
cat = as.character(cat_housing$ocean_proximity[i])
cat_housing[,cat][i] = 1
}
head(cat_housing)
cat_columns = names(cat_housing)
keep_columns = cat_columns[cat_columns != 'ocean_proximity']
cat_housing = select(cat_housing,one_of(keep_columns))
tail(cat_housing)
drops = c('ocean_proximity','median_house_value')
housing_num = housing[ , !(names(housing) %in% drops)]
head(housing_num)
scaled_housing_num = scale(housing_num)
head(scaled_housing_num)
cleaned_housing = cbind(cat_housing, scaled_housing_num, median_house_value=housing$median_house_value)
head(cleaned_housing)
set.seed(1738) # Set a random seed so that same sample can be reproduced in future runs
sample = sample.int(n = nrow(cleaned_housing), size = floor(.8*nrow(cleaned_housing)), replace = F)
train = cleaned_housing[sample, ] #just the samples
test = cleaned_housing[-sample, ] #everything but the samples
head(train)
nrow(train) + nrow(test) == nrow(cleaned_housing)
library('boot')
?cv.glm # note the K option for K fold cross validation
glm_house = glm(median_house_value~median_income+mean_rooms+population, data=cleaned_housing)
k_fold_cv_error = cv.glm(cleaned_housing , glm_house, K=5)
k_fold_cv_error$delta
glm_cv_rmse = sqrt(k_fold_cv_error$delta)[1]
glm_cv_rmse #off by about $83,000... it is a start
names(glm_house) #what parts of the model are callable?
glm_house$coefficients
library('randomForest')
install.packages("randomForest")
names(train)
set.seed(1738)
train_y = train[,'median_house_value']
train_x = train[, names(train) !='median_house_value']
head(train_y)
head(train_x)
#some people like weird r format like this... I find it causes headaches
#rf_model = randomForest(median_house_value~. , data = train, ntree =500, importance = TRUE)
rf_model = randomForest(train_x, y = train_y , ntree = 1500, importance = TRUE)
names(rf_model) #these are all the different things you can call from the model.
rf_model$importance
oob_prediction = predict(rf_model) #leaving out a data source forces OOB predictions
train_mse = mean(as.numeric((oob_prediction - train_y)^2))
oob_rmse = sqrt(train_mse)
oob_rmse
test_y = test[,'median_house_value']
test_x = test[, names(test) !='median_house_value']
y_pred = predict(rf_model , test_x)
test_mse = mean(((y_pred - test_y)^2))
test_rmse = sqrt(test_mse)
test_rmse
| /housingDrill.R | no_license | andalexis/data-science | R | false | false | 3,342 | r | library(tidyverse)
library(reshape)
install.packages("reshape")
library(ggplot2)
housing = read.csv('C:/Users/andal/OneDrive/Documents/Code/GitHub/github.io/data-science/handson-ml/datasets/housing/housing.csv')
head(housing)
ggplot(data = melt(housing), mapping = aes(x = value)) +
geom_histogram(bins = 30) + facet_wrap(~variable, scales = 'free_x')
housing$total_bedrooms[is.na(housing$total_bedrooms)] = median(housing$total_bedrooms , na.rm = TRUE)
housing$mean_bedrooms = housing$total_bedrooms/housing$households
housing$mean_rooms = housing$total_rooms/housing$households
drops = c('total_bedrooms', 'total_rooms')
housing = housing[ , !(names(housing) %in% drops)]
head(housing)
categories = unique(housing$ocean_proximity)
#split the categories off
cat_housing = data.frame(ocean_proximity = housing$ocean_proximity)
for(cat in categories){
cat_housing[,cat] = rep(0, times= nrow(cat_housing))
}
head(cat_housing) #see the new columns on the right
for(i in 1:length(cat_housing$ocean_proximity)){
cat = as.character(cat_housing$ocean_proximity[i])
cat_housing[,cat][i] = 1
}
head(cat_housing)
cat_columns = names(cat_housing)
keep_columns = cat_columns[cat_columns != 'ocean_proximity']
cat_housing = select(cat_housing,one_of(keep_columns))
tail(cat_housing)
drops = c('ocean_proximity','median_house_value')
housing_num = housing[ , !(names(housing) %in% drops)]
head(housing_num)
scaled_housing_num = scale(housing_num)
head(scaled_housing_num)
cleaned_housing = cbind(cat_housing, scaled_housing_num, median_house_value=housing$median_house_value)
head(cleaned_housing)
set.seed(1738) # Set a random seed so that same sample can be reproduced in future runs
sample = sample.int(n = nrow(cleaned_housing), size = floor(.8*nrow(cleaned_housing)), replace = F)
train = cleaned_housing[sample, ] #just the samples
test = cleaned_housing[-sample, ] #everything but the samples
head(train)
nrow(train) + nrow(test) == nrow(cleaned_housing)
library('boot')
?cv.glm # note the K option for K fold cross validation
glm_house = glm(median_house_value~median_income+mean_rooms+population, data=cleaned_housing)
k_fold_cv_error = cv.glm(cleaned_housing , glm_house, K=5)
k_fold_cv_error$delta
glm_cv_rmse = sqrt(k_fold_cv_error$delta)[1]
glm_cv_rmse #off by about $83,000... it is a start
names(glm_house) #what parts of the model are callable?
glm_house$coefficients
library('randomForest')
install.packages("randomForest")
names(train)
set.seed(1738)
train_y = train[,'median_house_value']
train_x = train[, names(train) !='median_house_value']
head(train_y)
head(train_x)
#some people like weird r format like this... I find it causes headaches
#rf_model = randomForest(median_house_value~. , data = train, ntree =500, importance = TRUE)
rf_model = randomForest(train_x, y = train_y , ntree = 1500, importance = TRUE)
names(rf_model) #these are all the different things you can call from the model.
rf_model$importance
oob_prediction = predict(rf_model) #leaving out a data source forces OOB predictions
train_mse = mean(as.numeric((oob_prediction - train_y)^2))
oob_rmse = sqrt(train_mse)
oob_rmse
test_y = test[,'median_house_value']
test_x = test[, names(test) !='median_house_value']
y_pred = predict(rf_model , test_x)
test_mse = mean(((y_pred - test_y)^2))
test_rmse = sqrt(test_mse)
test_rmse
|
\name{spectra}
\alias{spectra.Speclib}
\alias{spectra}
\alias{spectra<-}
\alias{spectra,Speclib-method}
\alias{spectra<-,Speclib,data.frame-method}
\alias{spectra<-,Speclib,matrix-method}
\alias{spectra<-,Speclib,numeric-method}
\alias{spectra<-,Speclib,RasterBrick-method}
\alias{[,.Spectra,ANY,ANY,ANY-method}
\alias{show,.Spectra-method}
\alias{print,.Spectra-method}
% Die Raster-methods sollten noch separat ausgegliedert werden
\alias{cellFromCol,Speclib-method}
\alias{cellFromLine,Speclib-method}
\alias{cellFromPolygon,Speclib-method}
\alias{cellFromRow,Speclib-method}
\alias{cellFromRowCol,Speclib-method}
\alias{cellFromRowColCombine,Speclib-method}
\alias{cellFromXY,Speclib-method}
\alias{colFromX,Speclib-method}
\alias{fourCellsFromXY,Speclib-method}
\alias{rowFromY,Speclib-method}
\alias{readAll,Speclib-method}
\title{
Handling spectra
}
\description{
Returning and setting spectra in Speclib
}
\usage{
\S4method{spectra}{Speclib}(object, i, j, ...)
\S4method{spectra}{Speclib,data.frame}(object) <- value
\S4method{spectra}{Speclib,matrix}(object) <- value
\S4method{spectra}{Speclib,numeric}(object) <- value
\S4method{spectra}{Speclib,RasterBrick}(object) <- value
}
\arguments{
\item{object}{
Object of class \code{Speclib}.
}
\item{i}{
Index of spectra to return. If missing all spectra are returned.
}
\item{j}{
Index of bands to return. If missing all bands are returned.
}
\item{...}{
Passed to internal function. Currently only one parameter is accepted: \code{return_names}: Logical indicating, if names of columns and rows should be set to \code{\link{bandnames}} and \code{\link{idSpeclib}}.
}
\item{value}{
Matrix or RasterBrick-object containing spectral values. If value is a matrix, columns are band values and rows are spectra.
}
}
\details{
For \code{spectra<-}, the function does not check if dimensions of spectra match dimensions of Speclib. Additionally, no conversion into \code{matrix} is performed! If spectra are not correctly stored, errors in other functions may arise. Thus check always carefully, if spectra are modified by hand.
}
\value{
For \code{spectra<-}, the updated object. Otherwise a matrix of the spectra in x is returned.
}
\author{
Lukas Lehnert
}
\seealso{
\code{\linkS4class{Speclib}}
}
\examples{
data(spectral_data)
## Manual plot of the first spectrum
plot(wavelength(spectral_data), spectra(spectral_data)[1,], type="l")
}
\keyword{utilities}
| /man/spectra.Rd | no_license | cran/hsdar | R | false | false | 2,429 | rd | \name{spectra}
\alias{spectra.Speclib}
\alias{spectra}
\alias{spectra<-}
\alias{spectra,Speclib-method}
\alias{spectra<-,Speclib,data.frame-method}
\alias{spectra<-,Speclib,matrix-method}
\alias{spectra<-,Speclib,numeric-method}
\alias{spectra<-,Speclib,RasterBrick-method}
\alias{[,.Spectra,ANY,ANY,ANY-method}
\alias{show,.Spectra-method}
\alias{print,.Spectra-method}
% Die Raster-methods sollten noch separat ausgegliedert werden
\alias{cellFromCol,Speclib-method}
\alias{cellFromLine,Speclib-method}
\alias{cellFromPolygon,Speclib-method}
\alias{cellFromRow,Speclib-method}
\alias{cellFromRowCol,Speclib-method}
\alias{cellFromRowColCombine,Speclib-method}
\alias{cellFromXY,Speclib-method}
\alias{colFromX,Speclib-method}
\alias{fourCellsFromXY,Speclib-method}
\alias{rowFromY,Speclib-method}
\alias{readAll,Speclib-method}
\title{
Handling spectra
}
\description{
Returning and setting spectra in Speclib
}
\usage{
\S4method{spectra}{Speclib}(object, i, j, ...)
\S4method{spectra}{Speclib,data.frame}(object) <- value
\S4method{spectra}{Speclib,matrix}(object) <- value
\S4method{spectra}{Speclib,numeric}(object) <- value
\S4method{spectra}{Speclib,RasterBrick}(object) <- value
}
\arguments{
\item{object}{
Object of class \code{Speclib}.
}
\item{i}{
Index of spectra to return. If missing all spectra are returned.
}
\item{j}{
Index of bands to return. If missing all bands are returned.
}
\item{...}{
Passed to internal function. Currently only one parameter is accepted: \code{return_names}: Logical indicating, if names of columns and rows should be set to \code{\link{bandnames}} and \code{\link{idSpeclib}}.
}
\item{value}{
Matrix or RasterBrick-object containing spectral values. If value is a matrix, columns are band values and rows are spectra.
}
}
\details{
For \code{spectra<-}, the function does not check if dimensions of spectra match dimensions of Speclib. Additionally, no conversion into \code{matrix} is performed! If spectra are not correctly stored, errors in other functions may arise. Thus check always carefully, if spectra are modified by hand.
}
\value{
For \code{spectra<-}, the updated object. Otherwise a matrix of the spectra in x is returned.
}
\author{
Lukas Lehnert
}
\seealso{
\code{\linkS4class{Speclib}}
}
\examples{
data(spectral_data)
## Manual plot of the first spectrum
plot(wavelength(spectral_data), spectra(spectral_data)[1,], type="l")
}
\keyword{utilities}
|
#===============================================================================
# Name : estadisticas_ichthyop
# Author : Jorge Flores
# Date :
# Version:
# Aim : make a lm and anova for ouputs of ichthyop simulations
# URL :
#===============================================================================
# Test estadistico para simulacion con vientos mensuales y diarios
dirpath <- 'F:/ichthyop_output_analysis/RUN2/csv_files/recruited/'
# out_path <- 'C:/Users/ASUS/Desktop/ichthyop_output_analysis/'
source_path <- 'D:/ICHTHYOP/scripts/'
winds <- 'clim'
simu <- 'lobos'
simulacion <- paste0(winds,'_',simu)
# dataset = NULL
# for(i in 1:length(winds)){
# simu = paste0(winds[i],'_',simu)
# dat = read.csv(paste0(dirpath,simu,".csv"), sep= ",")
# winds_index = rep(winds[i], times = length(dat[,1]))
# dat = cbind(dat,winds_index)
# dataset = rbind(dataset,dat)
# }
dataset <- read.table(paste0(dirpath,simulacion, '.csv'),header = T)
# # lm para los factores originales
# mod <- lm(Recruitprop ~ factor(winds_index)+ factor(Year) + factor(Day) + factor(Depth) + factor(Age)
# + factor(winds_index):factor(Year)+factor(winds_index):factor(Day)+factor(winds_index):factor(Depth)+factor(winds_index):factor(Age)
# + factor(Year):factor(Day) + factor(Year):factor(Depth) + factor(Year):factor(Age)
# + factor(Day):factor(Depth)+ factor(Day):factor(Age)
# + factor(Depth):factor(Age) , data = dataset)
# lm para los factores originales
mod <- lm(Recruitprop ~ factor(Year) + factor(Day) + factor(Depth) + factor(Age)
+ factor(Year):factor(Day) + factor(Year):factor(Depth) + factor(Year):factor(Age)
+ factor(Day):factor(Depth)+ factor(Day):factor(Age)
+ factor(Depth):factor(Age) , data = dataset)
# summary(mod)
aov = anova(mod)
print(aov)
print(100 * aov[2] / sum(aov[2]))
aov_sum <- (100 * aov[2] / sum(aov[2])); colnames(aov_sum) <- '%Exp'
aov <- cbind(aov,aov_sum)
rownames(aov) <- c('year','Day','depth','age',
'year x Day','year x depth','year x age',
'Day x depth','Day x age',
'depth x age','residuals')
# print(aov)
# write.csv(as.matrix(aov), file = paste0(out_path,simulacion, '_ANOVA.csv'), na = "")
# write.csv(as.matrix(aov_sum), file = paste0(out_path,simulacion, '_ANOVA_SUM.csv'), na = "")
mod <- aov(Recruitprop ~ factor(Year) + factor(Day) + factor(Depth) + factor(Age)
+ factor(Year):factor(Day) + factor(Year):factor(Depth) + factor(Year):factor(Age)
+ factor(Day):factor(Depth)+ factor(Day):factor(Age)
+ factor(Depth):factor(Age) , data = dataset)
print(100 * mod[2] / sum(mod[2]))
| /R/special_scripts/estadisticas_ichthyop.R | no_license | jfloresvaliente/ichthyop_analysis | R | false | false | 2,718 | r | #===============================================================================
# Name : estadisticas_ichthyop
# Author : Jorge Flores
# Date :
# Version:
# Aim : make a lm and anova for ouputs of ichthyop simulations
# URL :
#===============================================================================
# Test estadistico para simulacion con vientos mensuales y diarios
dirpath <- 'F:/ichthyop_output_analysis/RUN2/csv_files/recruited/'
# out_path <- 'C:/Users/ASUS/Desktop/ichthyop_output_analysis/'
source_path <- 'D:/ICHTHYOP/scripts/'
winds <- 'clim'
simu <- 'lobos'
simulacion <- paste0(winds,'_',simu)
# dataset = NULL
# for(i in 1:length(winds)){
# simu = paste0(winds[i],'_',simu)
# dat = read.csv(paste0(dirpath,simu,".csv"), sep= ",")
# winds_index = rep(winds[i], times = length(dat[,1]))
# dat = cbind(dat,winds_index)
# dataset = rbind(dataset,dat)
# }
dataset <- read.table(paste0(dirpath,simulacion, '.csv'),header = T)
# # lm para los factores originales
# mod <- lm(Recruitprop ~ factor(winds_index)+ factor(Year) + factor(Day) + factor(Depth) + factor(Age)
# + factor(winds_index):factor(Year)+factor(winds_index):factor(Day)+factor(winds_index):factor(Depth)+factor(winds_index):factor(Age)
# + factor(Year):factor(Day) + factor(Year):factor(Depth) + factor(Year):factor(Age)
# + factor(Day):factor(Depth)+ factor(Day):factor(Age)
# + factor(Depth):factor(Age) , data = dataset)
# lm para los factores originales
mod <- lm(Recruitprop ~ factor(Year) + factor(Day) + factor(Depth) + factor(Age)
+ factor(Year):factor(Day) + factor(Year):factor(Depth) + factor(Year):factor(Age)
+ factor(Day):factor(Depth)+ factor(Day):factor(Age)
+ factor(Depth):factor(Age) , data = dataset)
# summary(mod)
aov = anova(mod)
print(aov)
print(100 * aov[2] / sum(aov[2]))
aov_sum <- (100 * aov[2] / sum(aov[2])); colnames(aov_sum) <- '%Exp'
aov <- cbind(aov,aov_sum)
rownames(aov) <- c('year','Day','depth','age',
'year x Day','year x depth','year x age',
'Day x depth','Day x age',
'depth x age','residuals')
# print(aov)
# write.csv(as.matrix(aov), file = paste0(out_path,simulacion, '_ANOVA.csv'), na = "")
# write.csv(as.matrix(aov_sum), file = paste0(out_path,simulacion, '_ANOVA_SUM.csv'), na = "")
mod <- aov(Recruitprop ~ factor(Year) + factor(Day) + factor(Depth) + factor(Age)
+ factor(Year):factor(Day) + factor(Year):factor(Depth) + factor(Year):factor(Age)
+ factor(Day):factor(Depth)+ factor(Day):factor(Age)
+ factor(Depth):factor(Age) , data = dataset)
print(100 * mod[2] / sum(mod[2]))
|
##########################################################
# Reads hourly precipitation from KDVH and computes
# annual max 3h precipitation for GEV analysis
# AVD, MET, Jun-2014
##########################################################
#stations <- c(3030,3810,4781,12290,17870,18020,18320,18701,19490,19510,19710,20300,26890,30310,39150,44730,47240,60940,64300)
stations <- c(18020,18320,18701,19490,19510,19710) #Osloarea
pluvio <- TRUE
#For pluviostations (RR_1)
if(pluvio) {
AM <- seq(1970,2014)
WF <- seq(1970,2014)
WI <- seq(1970,2014)
for(n in 1:length(stations)) {
station <- stations[n]
print(station)
am3h <- list(Year=c(),pr=c(),wf=c(),wi=c()) #3-hr pr, wet event frequency, mean wet event intensity
sm3h <- list(Year=c(),pr=c())
fm3h <- list(Year=c(),pr=c())
for (year in 1970:2014) {
print(year)
am3h$Year <- c(am3h$Year,year)
sm3h$Year <- c(sm3h$Year,year)
fm3h$Year <- c(fm3h$Year,year)
#msp$Year <- c(msp$Year,year)
#wetf$Year <- c(wetf$Year,year)
data <- "http://klapp/metnopub/production/metno?re=17&nmt=0&p=RR_1&h=0&h=1&h=2&h=3&h=4&h=5&h=6&h=7&h=8&h=9&h=10&h=11&h=12&h=13&h=14&h=15&h=16&h=17&h=18&h=19&h=20&h=21&h=22&h=23&ddel=dot&del=;&ct=text/plain&split=1&nod=NA"
TD = paste("31.12.",year,sep="")
FD = paste("01.01.",year,sep="")
data <- paste(data,"&fd=",FD,"&td=",TD,"&s=",station,sep="")
pr1h <- try(read.table(data, header = TRUE, na.strings = "NA"),silent=T)
if(class(pr1h)=="try-error") {
am3h$pr = c(am3h$pr,NA)
am3h$wf = c(am3h$wf,NA)
am3h$wi = c(am3h$wi,NA)
sm3h$pr = c(sm3h$pr,NA)
fm3h$pr = c(fm3h$pr,NA)
#msp$pr = c(msp$pr,NA)
#wetf$pr = c(wetf$pr,NA)
next()
}
#if(length(which(is.na(pr1h$RR_1[which(pr1h$Month>=5 & pr1h$Month<=10)])))>4000) {
# am3h$pr = c(am3h$pr,NA)
# next()
#}
if(is.factor(pr1h$RR_1)) {
pr <- as.numeric(levels(pr1h$RR_1))[pr1h$RR_1]
}else {
pr <- pr1h$RR_1
}
if(length(pr[which(pr1h$Month>4 & pr1h$Month<10)]) < 720) {
am3h$pr = c(am3h$pr,NA)
am3h$wf = c(am3h$wf,NA)
am3h$wi = c(am3h$wi,NA)
sm3h$pr = c(sm3h$pr,NA)
fm3h$pr = c(fm3h$pr,NA)
#msp$pr = c(msp$pr,NA)
#wetf$pr = c(wetf$pr,NA)
next()
}
pr[which(is.na(pr))] = 0
if(length(pr[which(is.na(pr) | pr==0)]) == length(pr)) {
am3h$pr = c(am3h$pr,NA)
am3h$wf = c(am3h$wf,NA)
am3h$wi = c(am3h$wi,NA)
sm3h$pr = c(sm3h$pr,NA)
fm3h$pr = c(fm3h$pr,NA)
#msp$pr = c(msp$pr,NA)
#wetf$pr = c(wetf$pr,NA)
next()
}
am <- 0
sm <- 0
fm <- 0
ssp <- 0 #sum of summer precipitation
wf <- 0 #number of wet events
for (i in seq(1,length(pr)-2,by=3)) {
pr3h <- pr[i]+pr[i+1]+pr[i+2]
am <- max(pr3h,am,na.rm=T)
if(am<2) am <- NA
if(pr1h$Month[i+1]>4 & pr1h$Month[i+1]<9) sm <- max(pr3h,sm,na.rm=T,na.rm=T)
if(pr1h$Month[i+1]>8 & pr1h$Month[i+1]<12) fm <- max(pr3h,fm,na.rm=T,na.rm=T)
if(pr1h$Month[i]>4 & pr1h$Month[i+2]<10) { #May through September
if(pr3h > 0.5) {
ssp <- ssp + pr3h
wf <- wf + 1
}
}
}
am3h$pr <- c(am3h$pr,am)
am3h$wf <- c(am3h$wf,wf)
am3h$wi <- c(am3h$wi,round(ssp/wf,digits=2))
sm3h$pr <- c(sm3h$pr,sm)
fm3h$pr <- c(fm3h$pr,fm)
#msp$pr <- c(msp$pr,round(ssp/wf,digits=2)) #mean summer 3-h precipitation, only counting wet events
#wetf$pr <- c(wetf$pr,wf) #wet event frequency
}
source("~/PhD//Future//R/correct_obs.R")
AM <- cbind(AM,am3h$pr)
WF <- cbind(WF,am3h$wf)
WI <- cbind(WI,am3h$wi)
}
}
#################################################################################
#For geonor stations (RA)
if(!pluvio) {
for (year in 1970:2014) {
print(year)
data <- "http://klapp/metnopub/production/metno?re=17&p=RA&h=0&h=1&h=2&h=3&h=4&h=5&h=6&h=7&h=8&h=9&h=10&h=11&h=12&h=13&h=14&h=15&h=16&h=17&h=18&h=19&h=20&h=21&h=22&h=23&ddel=dot&del=;&ct=text/plain&split=1&nod=NA"
TD = paste("31.12.",year,sep="")
FD = paste("01.01.",year,sep="")
data <- paste(data,"&fd=",FD,"&td=",TD,"&s=",station,sep="")
pr1h <- read.table(data, header = TRUE, na.strings = "NA")
pr1hcor <- pr1h
for (j in 1:(length(pr1h$RA-1))) {
if(is.na(pr1h$RA[j+1]-pr1h$RA[j])) next()
if((pr1h$RA[j+1]-pr1h$RA[j] <= -1) & (pr1h$RA[j+1]-pr1h$RA[j] > -100)) pr1hcor$RA[j+1] = NA
}
rm(pr1h)
am <- 0
for (i in seq(1,length(pr1hcor$RA)-2,by=1)) {
pr3h <- pr1hcor$RA[i+2]-pr1hcor$RA[i]
#if(!is.na(pr3h) & !is.na(pr1hcor$RA[i+1]) & ((pr1hcor$RA[i+2]-pr1hcor$RA[i+1]) <= -1 )) pr3h = NA
#if(!is.na(pr3h) & !is.na(pr1hcor$RA[i+1]) & ((pr1hcor$RA[i+1]-pr1hcor$RA[i]) <= -1)) pr3h = NA
#if(!is.na(pr3h) & !is.na(pr1hcor$RA[i+1]) & ((pr1hcor$RA[i+2]-pr1hcor$RA[i]) <= -1)) pr3h = NA
if(!is.na(pr3h) & pr3h > 100) pr3h = NA
if(!is.na(pr3h) & is.na(pr1hcor$RA[i+1])) pr3h = NA
am <- max(pr3h,am,na.rm=T)
}
am3h$Year <- c(am3h$Year,year)
am3h$pr <- c(am3h$pr,am)
}
}
#am3h$pr[which(am3h$Year==2007)] = NA #18950 | /Future/R/create_obs_pr3hr_OLD.R | no_license | anitavd/PhD | R | false | false | 5,156 | r | ##########################################################
# Reads hourly precipitation from KDVH and computes
# annual max 3h precipitation for GEV analysis
# AVD, MET, Jun-2014
##########################################################
#stations <- c(3030,3810,4781,12290,17870,18020,18320,18701,19490,19510,19710,20300,26890,30310,39150,44730,47240,60940,64300)
stations <- c(18020,18320,18701,19490,19510,19710) #Osloarea
pluvio <- TRUE
#For pluviostations (RR_1)
if(pluvio) {
AM <- seq(1970,2014)
WF <- seq(1970,2014)
WI <- seq(1970,2014)
for(n in 1:length(stations)) {
station <- stations[n]
print(station)
am3h <- list(Year=c(),pr=c(),wf=c(),wi=c()) #3-hr pr, wet event frequency, mean wet event intensity
sm3h <- list(Year=c(),pr=c())
fm3h <- list(Year=c(),pr=c())
for (year in 1970:2014) {
print(year)
am3h$Year <- c(am3h$Year,year)
sm3h$Year <- c(sm3h$Year,year)
fm3h$Year <- c(fm3h$Year,year)
#msp$Year <- c(msp$Year,year)
#wetf$Year <- c(wetf$Year,year)
data <- "http://klapp/metnopub/production/metno?re=17&nmt=0&p=RR_1&h=0&h=1&h=2&h=3&h=4&h=5&h=6&h=7&h=8&h=9&h=10&h=11&h=12&h=13&h=14&h=15&h=16&h=17&h=18&h=19&h=20&h=21&h=22&h=23&ddel=dot&del=;&ct=text/plain&split=1&nod=NA"
TD = paste("31.12.",year,sep="")
FD = paste("01.01.",year,sep="")
data <- paste(data,"&fd=",FD,"&td=",TD,"&s=",station,sep="")
pr1h <- try(read.table(data, header = TRUE, na.strings = "NA"),silent=T)
if(class(pr1h)=="try-error") {
am3h$pr = c(am3h$pr,NA)
am3h$wf = c(am3h$wf,NA)
am3h$wi = c(am3h$wi,NA)
sm3h$pr = c(sm3h$pr,NA)
fm3h$pr = c(fm3h$pr,NA)
#msp$pr = c(msp$pr,NA)
#wetf$pr = c(wetf$pr,NA)
next()
}
#if(length(which(is.na(pr1h$RR_1[which(pr1h$Month>=5 & pr1h$Month<=10)])))>4000) {
# am3h$pr = c(am3h$pr,NA)
# next()
#}
if(is.factor(pr1h$RR_1)) {
pr <- as.numeric(levels(pr1h$RR_1))[pr1h$RR_1]
}else {
pr <- pr1h$RR_1
}
if(length(pr[which(pr1h$Month>4 & pr1h$Month<10)]) < 720) {
am3h$pr = c(am3h$pr,NA)
am3h$wf = c(am3h$wf,NA)
am3h$wi = c(am3h$wi,NA)
sm3h$pr = c(sm3h$pr,NA)
fm3h$pr = c(fm3h$pr,NA)
#msp$pr = c(msp$pr,NA)
#wetf$pr = c(wetf$pr,NA)
next()
}
pr[which(is.na(pr))] = 0
if(length(pr[which(is.na(pr) | pr==0)]) == length(pr)) {
am3h$pr = c(am3h$pr,NA)
am3h$wf = c(am3h$wf,NA)
am3h$wi = c(am3h$wi,NA)
sm3h$pr = c(sm3h$pr,NA)
fm3h$pr = c(fm3h$pr,NA)
#msp$pr = c(msp$pr,NA)
#wetf$pr = c(wetf$pr,NA)
next()
}
am <- 0
sm <- 0
fm <- 0
ssp <- 0 #sum of summer precipitation
wf <- 0 #number of wet events
for (i in seq(1,length(pr)-2,by=3)) {
pr3h <- pr[i]+pr[i+1]+pr[i+2]
am <- max(pr3h,am,na.rm=T)
if(am<2) am <- NA
if(pr1h$Month[i+1]>4 & pr1h$Month[i+1]<9) sm <- max(pr3h,sm,na.rm=T,na.rm=T)
if(pr1h$Month[i+1]>8 & pr1h$Month[i+1]<12) fm <- max(pr3h,fm,na.rm=T,na.rm=T)
if(pr1h$Month[i]>4 & pr1h$Month[i+2]<10) { #May through September
if(pr3h > 0.5) {
ssp <- ssp + pr3h
wf <- wf + 1
}
}
}
am3h$pr <- c(am3h$pr,am)
am3h$wf <- c(am3h$wf,wf)
am3h$wi <- c(am3h$wi,round(ssp/wf,digits=2))
sm3h$pr <- c(sm3h$pr,sm)
fm3h$pr <- c(fm3h$pr,fm)
#msp$pr <- c(msp$pr,round(ssp/wf,digits=2)) #mean summer 3-h precipitation, only counting wet events
#wetf$pr <- c(wetf$pr,wf) #wet event frequency
}
source("~/PhD//Future//R/correct_obs.R")
AM <- cbind(AM,am3h$pr)
WF <- cbind(WF,am3h$wf)
WI <- cbind(WI,am3h$wi)
}
}
#################################################################################
#For geonor stations (RA)
if(!pluvio) {
for (year in 1970:2014) {
print(year)
data <- "http://klapp/metnopub/production/metno?re=17&p=RA&h=0&h=1&h=2&h=3&h=4&h=5&h=6&h=7&h=8&h=9&h=10&h=11&h=12&h=13&h=14&h=15&h=16&h=17&h=18&h=19&h=20&h=21&h=22&h=23&ddel=dot&del=;&ct=text/plain&split=1&nod=NA"
TD = paste("31.12.",year,sep="")
FD = paste("01.01.",year,sep="")
data <- paste(data,"&fd=",FD,"&td=",TD,"&s=",station,sep="")
pr1h <- read.table(data, header = TRUE, na.strings = "NA")
pr1hcor <- pr1h
for (j in 1:(length(pr1h$RA-1))) {
if(is.na(pr1h$RA[j+1]-pr1h$RA[j])) next()
if((pr1h$RA[j+1]-pr1h$RA[j] <= -1) & (pr1h$RA[j+1]-pr1h$RA[j] > -100)) pr1hcor$RA[j+1] = NA
}
rm(pr1h)
am <- 0
for (i in seq(1,length(pr1hcor$RA)-2,by=1)) {
pr3h <- pr1hcor$RA[i+2]-pr1hcor$RA[i]
#if(!is.na(pr3h) & !is.na(pr1hcor$RA[i+1]) & ((pr1hcor$RA[i+2]-pr1hcor$RA[i+1]) <= -1 )) pr3h = NA
#if(!is.na(pr3h) & !is.na(pr1hcor$RA[i+1]) & ((pr1hcor$RA[i+1]-pr1hcor$RA[i]) <= -1)) pr3h = NA
#if(!is.na(pr3h) & !is.na(pr1hcor$RA[i+1]) & ((pr1hcor$RA[i+2]-pr1hcor$RA[i]) <= -1)) pr3h = NA
if(!is.na(pr3h) & pr3h > 100) pr3h = NA
if(!is.na(pr3h) & is.na(pr1hcor$RA[i+1])) pr3h = NA
am <- max(pr3h,am,na.rm=T)
}
am3h$Year <- c(am3h$Year,year)
am3h$pr <- c(am3h$pr,am)
}
}
#am3h$pr[which(am3h$Year==2007)] = NA #18950 |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/occ_download_list.R
\name{occ_download_list}
\alias{occ_download_list}
\title{Lists the downloads created by a user.}
\usage{
occ_download_list(
user = NULL,
pwd = NULL,
limit = 20,
start = 0,
curlopts = list()
)
}
\arguments{
\item{user}{(character) User name within GBIF's website. Required. See
Details.}
\item{pwd}{(character) User password within GBIF's website. Required. See
Details.}
\item{limit}{(integer/numeric) Number of records to return. Default: 20,
Max: 1000}
\item{start}{(integer/numeric) Record number to start at. Default: 0}
\item{curlopts}{list of named curl options passed on to
\code{\link[crul]{HttpClient}}. see \code{curl::curl_options}
for curl options}
}
\value{
a list with two slots:
\itemize{
\item meta: a single row data.frame with columns: \code{offset}, \code{limit},
\code{endofrecords}, \code{count}
\item results: a tibble with the nested data flattened, with many
columns with the same \code{request.} prefix
}
}
\description{
Lists the downloads created by a user.
}
\note{
see \link{downloads} for an overview of GBIF downloads methods
}
\examples{
\dontrun{
occ_download_list(user="sckott")
occ_download_list(user="sckott", limit = 5)
occ_download_list(user="sckott", start = 21)
}
}
\seealso{
Other downloads:
\code{\link{download_predicate_dsl}},
\code{\link{occ_download_cached}()},
\code{\link{occ_download_cancel}()},
\code{\link{occ_download_dataset_activity}()},
\code{\link{occ_download_datasets}()},
\code{\link{occ_download_get}()},
\code{\link{occ_download_import}()},
\code{\link{occ_download_meta}()},
\code{\link{occ_download_queue}()},
\code{\link{occ_download_wait}()},
\code{\link{occ_download}()}
}
\concept{downloads}
| /man/occ_download_list.Rd | no_license | cran/rgbif | R | false | true | 1,840 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/occ_download_list.R
\name{occ_download_list}
\alias{occ_download_list}
\title{Lists the downloads created by a user.}
\usage{
occ_download_list(
user = NULL,
pwd = NULL,
limit = 20,
start = 0,
curlopts = list()
)
}
\arguments{
\item{user}{(character) User name within GBIF's website. Required. See
Details.}
\item{pwd}{(character) User password within GBIF's website. Required. See
Details.}
\item{limit}{(integer/numeric) Number of records to return. Default: 20,
Max: 1000}
\item{start}{(integer/numeric) Record number to start at. Default: 0}
\item{curlopts}{list of named curl options passed on to
\code{\link[crul]{HttpClient}}. see \code{curl::curl_options}
for curl options}
}
\value{
a list with two slots:
\itemize{
\item meta: a single row data.frame with columns: \code{offset}, \code{limit},
\code{endofrecords}, \code{count}
\item results: a tibble with the nested data flattened, with many
columns with the same \code{request.} prefix
}
}
\description{
Lists the downloads created by a user.
}
\note{
see \link{downloads} for an overview of GBIF downloads methods
}
\examples{
\dontrun{
occ_download_list(user="sckott")
occ_download_list(user="sckott", limit = 5)
occ_download_list(user="sckott", start = 21)
}
}
\seealso{
Other downloads:
\code{\link{download_predicate_dsl}},
\code{\link{occ_download_cached}()},
\code{\link{occ_download_cancel}()},
\code{\link{occ_download_dataset_activity}()},
\code{\link{occ_download_datasets}()},
\code{\link{occ_download_get}()},
\code{\link{occ_download_import}()},
\code{\link{occ_download_meta}()},
\code{\link{occ_download_queue}()},
\code{\link{occ_download_wait}()},
\code{\link{occ_download}()}
}
\concept{downloads}
|
# white clusters ----------------------------------------------------------
library(ISLR)
library(kernlab)
library(gridExtra)
library(ggdendro)
library(magrittr)
library(janitor)
library(skimr)
library(tidyverse)
library(parameters)
library(conflicted)
library(cluster)
library(splus2R)
library(fpc)
# managing conflicts
conflict_prefer("filter", "dplyr")
conflict_prefer("alpha", "kernlab")
conflict_prefer("combine", "gridExtra")
conflict_prefer("extract", "magrittr")
conflict_prefer("map", "purrr")
#load data
white_wine <- read.csv("winequality-white.csv",
sep=";", header=TRUE) %>%
clean_names()
set.seed(100) # setting seed
white_wine$id <- 1:nrow(white_wine)
train <- white_wine %>% dplyr::sample_frac(.75)
test <- dplyr::anti_join(white_wine, train, by = 'id')
#default dist is Euclidian
help_hclust <- function(data, meth){
data <- data %>% select(-id)
return(hclust(dist(data), method = meth))
}
#hf to cut the dendogram
cut_hclust <- function(hclust_obj, ncuts){
return(cutree(hclust_obj, ncuts))
}
#scaling the data- this is gonna help for part c
scale_data <- function(df) {
id <- df %>% select(id)
df_scaled <- df %>% select(-id) %>% scale() %>% as_tibble()
return(bind_cols(df_scaled, id))
}
white_wine_hclust <- tibble(data = list(train,
train %>% scale_data())) %>%
mutate(hclust = map(data, help_hclust, meth = 'complete'),
graph = map(hclust, ggdendrogram),
clusters = map(hclust, cut_hclust, ncuts = 3))
#Cut the dendrogram at a height that results in three distinct clusters.
#Which states belong to which clusters?
white_wine_hclust %>%
pluck('graph', 1)
unscaled_clusters <- white_wine_hclust %>% pluck('data', 1) %>%
bind_cols(cluster = white_wine_hclust %>% pluck('clusters', 1))
# K-Means Clustering with 5 clusters
fit <- kmeans(unscaled_clusters, 5)
# Cluster Plot against 1st 2 principal components
# vary parameters for most readable graph
library(cluster)
clusplot(unscaled_clusters, fit$cluster, color=TRUE, shade=TRUE,
labels=2, lines=0)
# Centroid Plot against 1st 2 discriminant functions
library(fpc)
plotcluster(unscaled_clusters, fit$cluster)
# library(mclust)
# fit <- Mclust(unscaled_clusters)
# plot(fit) # plot results
# summary(fit) # display the best model
#Hierarchically cluster the states using complete linkage and Euclidean distance,
#after scaling the variables to have standard deviation one.
scaled_clusters <- white_wine_hclust %>% pluck('data', 2) %>%
bind_cols(cluster = white_wine_hclust %>% pluck('clusters', 2))
#plot us map
#### Exercise 2 - KMEANS
#cluster SS
get_within_ss <- function(kmean_obj){
return(kmean_obj$tot.withinss)
}
#cluster labels for the data
lab_cluster <- function(x, clust_obj){
if(class(clust_obj) == "kmeans"){
clust = clust_obj$cluster
} else {
clust = clust_obj
}
out = x %>%
mutate(cluster = clust)
return(out)
}
white_wine_categorical <- white_wine %>%
select(-quality) %>%
onehot::onehot() %>%
predict(white_wine) %>% as_tibble()
white_wine_scaled <- white_wine %>%
select_if(is.numeric) %>%
scale() %>% as_tibble()
white_wine_one_hot <- white_wine_scaled %>%
bind_cols(white_wine_categorical)
add_id <- function(df){
id <- white_wine %>% select(id)
cluster_with_id <- df %>% bind_cols(id)
return(cluster_with_id)
}
add_id <- function(df){
id <- white_wine %>% select(id)
cluster_with_id <- df %>% bind_cols(id)
return(cluster_with_id)
}
white_wine_clustering <- tibble(data = list(white_wine_one_hot)) %>%
crossing(k = seq(2, 6, 1)) %>%
mutate(k_mean = map2(data, k, kmeans, nstart = 20), # to reintialize the rand start 20 times
within_clust_ss = map_dbl(k_mean, get_within_ss), # extract the ss within each cluster
cluster = map2(data, k_mean, lab_cluster), # to get the label for the cluster
cluster = map(.x = cluster,
.f = function(x) {
cluster_fac <- x %>% mutate(cluster = as.factor(cluster))
return(cluster_fac)
}),
cluster_with_id = map(cluster, add_id))
#PCA on original data to plot 2D tensor
pca_out <- white_wine %>%
select_if(is.numeric) %>%
prcomp(scale = TRUE)
two_principle_components <- pca_out$x[,1] %>% enframe(name = NULL) %>%
rename(PC1 = value) %>%
bind_cols(pca_out$x[,2] %>% enframe(name = NULL) %>%
rename(PC2 = value))
#adding pc to df
add_pc <- function(df) {
df_components <- df %>% bind_cols(two_principle_components)
return(df_components)
}
#plotting principle components and clusters
plot_cluster <- function(df) {
plot <- df %>%
ggplot(aes(x = PC1, y = PC2, color = cluster)) +
geom_point(alpha = 0.4)
return(plot)
}
white_wine_clustering <- white_wine_clustering %>%
mutate(white_wine_pc = map(cluster_with_id, add_pc),
cluster_plots = map(white_wine_pc, plot_cluster))
white_wine_clustering %>%
pluck('cluster_plots') %>%
grid.arrange(grobs = .)
#if k = 4, there's 4 seperable groups
white_wine_clustering %>%
ggplot(aes(x = k, y = within_clust_ss)) +
geom_line()
plot_kmeans <- white_wine_clustering %>%
filter(k == 4) %>%
select(cluster_plots) %>%
rename(plots = cluster_plots)
run_hclust_white_wine <- function(x, meth){
return(hclust(dist(x), method = meth))
}
#hierarchical clustering
hc_data <- tibble(data = list(white_wine_one_hot)) %>%
crossing(k = seq(2, 6, 1)) %>%
mutate(hclust = map(data, run_hclust_white_wine, meth = 'complete'),
cluster = map2(hclust, k, cut_hclust),
cluster = map(cluster, as.factor),
cluster_data = map2(data, cluster, lab_cluster),
cluster_addid = map(cluster_data, add_id),
cluster_pc = map(cluster_addid, add_pc),
plots = map(cluster_pc, plot_cluster)) # plots of the clusters
hc_data %>%
pluck('plots') %>%
grid.arrange(grobs = .)
plot_hierarchical <- hc_data %>%
filter(k == 4) %>%
select(plots)
# spec_clust <- tibble(data = list(white_wine_one_hot)) %>%
# mutate(spec = map(.x = data,
# .f = function(x) specc(as.matrix(x), centers = 4)))
#
#
# spec_clust <- spec_clust %>%
# mutate(cluster_data = map2(data, spec, lab_cluster),
# cluster = map(.x = cluster_data,
# .f = function(x) {
# cluster_fac <- x %>% mutate(cluster = as.factor(cluster))
# return(cluster_fac)
# }),
# cluster_addid = map(cluster, add_id),
# cluster_pc = map(cluster_addid, add_pc),
# plots = map(cluster_pc, plot_cluster))
#
# spec_clust %>%
# pluck('plots')
# plot_spectral <- spec_clust %>%
# select(plots)
#dont use the one-hot encoded data
mixed_clusters <- tibble(data = list(white_wine)) %>%
mutate(dissimilarity = map(data, daisy), # daisy returns a matrix of dissim of the data points
cluster = map(dissimilarity, pam, k = 3))
cluster_data <- mixed_clusters %>% pluck('cluster')
clusters_data <- white_wine %>% bind_cols(cluster_data[[1]]$clustering %>% enframe(name = NULL) %>%
rename(cluster = value)) %>%
mutate(cluster = as_factor(cluster))
mixed_clusters <- tibble(data = list(clusters_data)) %>%
mutate(cluster_pc = map(data, add_pc),
plots = map(cluster_pc, plot_cluster))
mixed_clusters %>%
pluck('plots')
| /white clusters.R | no_license | mariaarias74/wine-final | R | false | false | 7,539 | r |
# white clusters ----------------------------------------------------------
library(ISLR)
library(kernlab)
library(gridExtra)
library(ggdendro)
library(magrittr)
library(janitor)
library(skimr)
library(tidyverse)
library(parameters)
library(conflicted)
library(cluster)
library(splus2R)
library(fpc)
# managing conflicts
conflict_prefer("filter", "dplyr")
conflict_prefer("alpha", "kernlab")
conflict_prefer("combine", "gridExtra")
conflict_prefer("extract", "magrittr")
conflict_prefer("map", "purrr")
#load data
white_wine <- read.csv("winequality-white.csv",
sep=";", header=TRUE) %>%
clean_names()
set.seed(100) # setting seed
white_wine$id <- 1:nrow(white_wine)
train <- white_wine %>% dplyr::sample_frac(.75)
test <- dplyr::anti_join(white_wine, train, by = 'id')
#default dist is Euclidian
help_hclust <- function(data, meth){
data <- data %>% select(-id)
return(hclust(dist(data), method = meth))
}
#hf to cut the dendogram
cut_hclust <- function(hclust_obj, ncuts){
return(cutree(hclust_obj, ncuts))
}
#scaling the data- this is gonna help for part c
scale_data <- function(df) {
id <- df %>% select(id)
df_scaled <- df %>% select(-id) %>% scale() %>% as_tibble()
return(bind_cols(df_scaled, id))
}
white_wine_hclust <- tibble(data = list(train,
train %>% scale_data())) %>%
mutate(hclust = map(data, help_hclust, meth = 'complete'),
graph = map(hclust, ggdendrogram),
clusters = map(hclust, cut_hclust, ncuts = 3))
#Cut the dendrogram at a height that results in three distinct clusters.
#Which states belong to which clusters?
white_wine_hclust %>%
pluck('graph', 1)
unscaled_clusters <- white_wine_hclust %>% pluck('data', 1) %>%
bind_cols(cluster = white_wine_hclust %>% pluck('clusters', 1))
# K-Means Clustering with 5 clusters
fit <- kmeans(unscaled_clusters, 5)
# Cluster Plot against 1st 2 principal components
# vary parameters for most readable graph
library(cluster)
clusplot(unscaled_clusters, fit$cluster, color=TRUE, shade=TRUE,
labels=2, lines=0)
# Centroid Plot against 1st 2 discriminant functions
library(fpc)
plotcluster(unscaled_clusters, fit$cluster)
# library(mclust)
# fit <- Mclust(unscaled_clusters)
# plot(fit) # plot results
# summary(fit) # display the best model
#Hierarchically cluster the states using complete linkage and Euclidean distance,
#after scaling the variables to have standard deviation one.
scaled_clusters <- white_wine_hclust %>% pluck('data', 2) %>%
bind_cols(cluster = white_wine_hclust %>% pluck('clusters', 2))
#plot us map
#### Exercise 2 - KMEANS
#cluster SS
get_within_ss <- function(kmean_obj){
return(kmean_obj$tot.withinss)
}
#cluster labels for the data
lab_cluster <- function(x, clust_obj){
if(class(clust_obj) == "kmeans"){
clust = clust_obj$cluster
} else {
clust = clust_obj
}
out = x %>%
mutate(cluster = clust)
return(out)
}
white_wine_categorical <- white_wine %>%
select(-quality) %>%
onehot::onehot() %>%
predict(white_wine) %>% as_tibble()
white_wine_scaled <- white_wine %>%
select_if(is.numeric) %>%
scale() %>% as_tibble()
white_wine_one_hot <- white_wine_scaled %>%
bind_cols(white_wine_categorical)
add_id <- function(df){
id <- white_wine %>% select(id)
cluster_with_id <- df %>% bind_cols(id)
return(cluster_with_id)
}
add_id <- function(df){
id <- white_wine %>% select(id)
cluster_with_id <- df %>% bind_cols(id)
return(cluster_with_id)
}
white_wine_clustering <- tibble(data = list(white_wine_one_hot)) %>%
crossing(k = seq(2, 6, 1)) %>%
mutate(k_mean = map2(data, k, kmeans, nstart = 20), # to reintialize the rand start 20 times
within_clust_ss = map_dbl(k_mean, get_within_ss), # extract the ss within each cluster
cluster = map2(data, k_mean, lab_cluster), # to get the label for the cluster
cluster = map(.x = cluster,
.f = function(x) {
cluster_fac <- x %>% mutate(cluster = as.factor(cluster))
return(cluster_fac)
}),
cluster_with_id = map(cluster, add_id))
#PCA on original data to plot 2D tensor
pca_out <- white_wine %>%
select_if(is.numeric) %>%
prcomp(scale = TRUE)
two_principle_components <- pca_out$x[,1] %>% enframe(name = NULL) %>%
rename(PC1 = value) %>%
bind_cols(pca_out$x[,2] %>% enframe(name = NULL) %>%
rename(PC2 = value))
#adding pc to df
add_pc <- function(df) {
df_components <- df %>% bind_cols(two_principle_components)
return(df_components)
}
#plotting principle components and clusters
plot_cluster <- function(df) {
plot <- df %>%
ggplot(aes(x = PC1, y = PC2, color = cluster)) +
geom_point(alpha = 0.4)
return(plot)
}
white_wine_clustering <- white_wine_clustering %>%
mutate(white_wine_pc = map(cluster_with_id, add_pc),
cluster_plots = map(white_wine_pc, plot_cluster))
white_wine_clustering %>%
pluck('cluster_plots') %>%
grid.arrange(grobs = .)
#if k = 4, there's 4 seperable groups
white_wine_clustering %>%
ggplot(aes(x = k, y = within_clust_ss)) +
geom_line()
plot_kmeans <- white_wine_clustering %>%
filter(k == 4) %>%
select(cluster_plots) %>%
rename(plots = cluster_plots)
run_hclust_white_wine <- function(x, meth){
return(hclust(dist(x), method = meth))
}
#hierarchical clustering
hc_data <- tibble(data = list(white_wine_one_hot)) %>%
crossing(k = seq(2, 6, 1)) %>%
mutate(hclust = map(data, run_hclust_white_wine, meth = 'complete'),
cluster = map2(hclust, k, cut_hclust),
cluster = map(cluster, as.factor),
cluster_data = map2(data, cluster, lab_cluster),
cluster_addid = map(cluster_data, add_id),
cluster_pc = map(cluster_addid, add_pc),
plots = map(cluster_pc, plot_cluster)) # plots of the clusters
hc_data %>%
pluck('plots') %>%
grid.arrange(grobs = .)
plot_hierarchical <- hc_data %>%
filter(k == 4) %>%
select(plots)
# spec_clust <- tibble(data = list(white_wine_one_hot)) %>%
# mutate(spec = map(.x = data,
# .f = function(x) specc(as.matrix(x), centers = 4)))
#
#
# spec_clust <- spec_clust %>%
# mutate(cluster_data = map2(data, spec, lab_cluster),
# cluster = map(.x = cluster_data,
# .f = function(x) {
# cluster_fac <- x %>% mutate(cluster = as.factor(cluster))
# return(cluster_fac)
# }),
# cluster_addid = map(cluster, add_id),
# cluster_pc = map(cluster_addid, add_pc),
# plots = map(cluster_pc, plot_cluster))
#
# spec_clust %>%
# pluck('plots')
# plot_spectral <- spec_clust %>%
# select(plots)
#dont use the one-hot encoded data
mixed_clusters <- tibble(data = list(white_wine)) %>%
mutate(dissimilarity = map(data, daisy), # daisy returns a matrix of dissim of the data points
cluster = map(dissimilarity, pam, k = 3))
cluster_data <- mixed_clusters %>% pluck('cluster')
clusters_data <- white_wine %>% bind_cols(cluster_data[[1]]$clustering %>% enframe(name = NULL) %>%
rename(cluster = value)) %>%
mutate(cluster = as_factor(cluster))
mixed_clusters <- tibble(data = list(clusters_data)) %>%
mutate(cluster_pc = map(data, add_pc),
plots = map(cluster_pc, plot_cluster))
mixed_clusters %>%
pluck('plots')
|
\name{wavk.test}
\alias{wavk.test}
\title{WAVK trend test}
\description{
Non-parametric test to detect possibly non-monotonic parametric trend in a time series.
}
\usage{
wavk.test(x, factor.length=c("user.defined", "adaptive.selection"),
Window=round(0.1*length(x)), q=3/4, j=c(8:11), B=1000,
H0=c("no trend","linear"), method=c("boot", "asympt"),
ar.order=NULL, BIC=TRUE, robust=TRUE, out=FALSE)
}
\arguments{
\item{x}{univariate time series. Missing values are not allowed.}
\item{factor.length}{method to define the length of local windows (factors). Default option \code{"user.defined"} allows to set only one value of the argument \code{Window}. The option \code{"adaptive.selection"} sets \code{method = "boot"} and employs heuristic \eqn{m}-out-of-\eqn{n} subsampling algorithm (Bickel and Sakov, 2008) to select an optimal window from the set of possible windows \code{length(x)*q^j} whose values are mapped to the largest previous integer and greater than 2.}
\item{Window}{length of the local window (factor), default is \code{round(0.1*length(x))}. This argument is ignored if \code{factor.length = "adaptive.selection"}.}
\item{q}{scalar from 0 to 1 to define the set of possible windows when \code{factor.length =} \code{"adaptive.selection"}. Default is \eqn{3/4}. This argument is ignored if \code{factor.length =} \code{"user.defined"}.}
\item{j}{numeric vector to define the set of possible windows when \code{factor.length =} \code{"adaptive.selection"}. Default is \code{c(8:11)}. This argument is ignored if \code{factor.length =} \code{"user.defined"}.}
\item{B}{number of bootstrap simulations to obtain empirical critical values. Default is 1000.}
\item{H0}{null hypothesis: \code{"no trend"} (default) for testing the absence of trend (in other words, constant trend) vs. any, possibly non-monotonic, trend; \code{"linear"} for testing the presence of parametric linear trend vs. alternative nonlinear trend.}
\item{method}{method of obtaining critical values: from asymptotical (\code{"asympt"}) or bootstrap (\code{"boot"}) distribution. If \code{factor.length =} \code{"adaptive.selection"} the option \code{"boot"} is used.}
\item{ar.order}{order of autoregressive filter when \code{BIC = FALSE}, or the maximal order for BIC-based filtering. Default is \code{floor(10*log10(length(x)))}.}
\item{BIC}{logical value indicates whether the order of autoregressive filter should be selected by Bayesian information criterion (BIC). If \code{TRUE} (default), models of orders 1,...,\code{ar.order} or 1,...,\code{floor(10*log10(length(x)))} are be considered, depending on whether \code{ar.order} is defined or not.}
\item{robust}{logical value indicates whether to use robust estimates of autoregression coefficients using \code{\link{HVK}} function (default), or to use Yule-Walker estimates delivered by \command{ar} function.}
\item{out}{logical value indicates whether full output should be shown. Default is \code{FALSE}.}
}
\value{
A list with class htest containing the following components:
\item{method}{name of the method.}
\item{data.name}{name of the data.}
\item{statistic}{value of the test statistic.}
\item{p.value}{\eqn{p}-value of the test.}
\item{alternative}{alternative hypothesis.}
\item{parameter}{window that was used.}
\item{estimate}{list, containing the estimated coefficients of linear trend (if \code{H0="linear"}); estimated AR coefficients; test results for all considered windows (if \code{factor.length =} \code{"adaptive.selection"}).}
}
\references{
Bickel, P. J. and Sakov, A. (2008) On the choice of \eqn{m} in the \eqn{m} out of \eqn{n} bootstrap and confidence bounds for extrema. \emph{Statistica Sinica} 18, 967--985.
Lyubchich, V., Gel, Y. R. and El-Shaarawi, A. (2013) On detecting non-monotonic trends in environmental time series: a fusion of local regression and bootstrap. \emph{Environmetrics} 24, 209--226.
Wang, L., Akritas, M. G. and Van Keilegom, I. (2008) An ANOVA-type nonparametric diagnostic test for heteroscedastic regression models. \emph{Journal of Nonparametric Statistics} 20(5), 365--382.
Wang, L. and Van Keilegom, I. (2007) Nonparametric test for the form of parametric regression with time series errors. \emph{Statistica Sinica} 17, 369--386.
}
\author{Yulia R. Gel, Vyacheslav Lyubchich}
\seealso{\code{\link{HVK}},
\code{\link{WAVK}},
\code{\link{sync.test}}.
}
\examples{
# Fix seed for reproduceable simulations.
set.seed(123)
# Simulate autoregressive time series of length n with linear trend 1+2*t,
# where t is a regular sequence on the interval (0,1].
n <- 100
t <- c(1:n)/n
U <- 1+2*t + arima.sim(n=n, list(order = c(2,0,0), ar = c(-0.7, -0.1)))
# Test for linear trend with output of all results.
\dontrun{
wavk.test(U, factor.length = "adaptive.selection", H0="linear", out=TRUE, B=1000)
}
# Sample output:
##
## Trend test by Wang, Akritas and Van Keilegom
##
##data: U
##WAVK test statistic = 0.8562, adaptively selected window = 4, p-value = 0.356
##alternative hypothesis: presence of a nonlinear trend
##sample estimates:
##$linear_trend_coefficients
##(Intercept) t
## 0.9917251 2.0224272
##
##$AR_coefficients
## phi_1 phi_2
##-0.6814546 -0.2404422
##
##$all_considered_windows
## Window WAVK-statistic p-value
## 4 0.8561654 0.356
## 5 0.8620023 0.320
## 7 0.8691870 0.288
## 10 0.6837790 0.306
# Test H0 of absence of a trend using asymptotic distribution of statistic.
wavk.test(U, method="asympt")
# Sample output:
##
## Trend test by Wang, Akritas and Van Keilegom
##
##data: U
##WAVK test statistic = 18.4712, user-defined window = 10, p-value < 2.2e-16
##alternative hypothesis: presence of a trend
}
\keyword{ts}
\keyword{htest}
\keyword{trend}
| /funtimes.Rcheck/00_pkg_src/funtimes/man/wavk.test.Rd | no_license | ESchaeffer13/funtimes | R | false | false | 5,859 | rd | \name{wavk.test}
\alias{wavk.test}
\title{WAVK trend test}
\description{
Non-parametric test to detect possibly non-monotonic parametric trend in a time series.
}
\usage{
wavk.test(x, factor.length=c("user.defined", "adaptive.selection"),
Window=round(0.1*length(x)), q=3/4, j=c(8:11), B=1000,
H0=c("no trend","linear"), method=c("boot", "asympt"),
ar.order=NULL, BIC=TRUE, robust=TRUE, out=FALSE)
}
\arguments{
\item{x}{univariate time series. Missing values are not allowed.}
\item{factor.length}{method to define the length of local windows (factors). Default option \code{"user.defined"} allows to set only one value of the argument \code{Window}. The option \code{"adaptive.selection"} sets \code{method = "boot"} and employs heuristic \eqn{m}-out-of-\eqn{n} subsampling algorithm (Bickel and Sakov, 2008) to select an optimal window from the set of possible windows \code{length(x)*q^j} whose values are mapped to the largest previous integer and greater than 2.}
\item{Window}{length of the local window (factor), default is \code{round(0.1*length(x))}. This argument is ignored if \code{factor.length = "adaptive.selection"}.}
\item{q}{scalar from 0 to 1 to define the set of possible windows when \code{factor.length =} \code{"adaptive.selection"}. Default is \eqn{3/4}. This argument is ignored if \code{factor.length =} \code{"user.defined"}.}
\item{j}{numeric vector to define the set of possible windows when \code{factor.length =} \code{"adaptive.selection"}. Default is \code{c(8:11)}. This argument is ignored if \code{factor.length =} \code{"user.defined"}.}
\item{B}{number of bootstrap simulations to obtain empirical critical values. Default is 1000.}
\item{H0}{null hypothesis: \code{"no trend"} (default) for testing the absence of trend (in other words, constant trend) vs. any, possibly non-monotonic, trend; \code{"linear"} for testing the presence of parametric linear trend vs. alternative nonlinear trend.}
\item{method}{method of obtaining critical values: from asymptotical (\code{"asympt"}) or bootstrap (\code{"boot"}) distribution. If \code{factor.length =} \code{"adaptive.selection"} the option \code{"boot"} is used.}
\item{ar.order}{order of autoregressive filter when \code{BIC = FALSE}, or the maximal order for BIC-based filtering. Default is \code{floor(10*log10(length(x)))}.}
\item{BIC}{logical value indicates whether the order of autoregressive filter should be selected by Bayesian information criterion (BIC). If \code{TRUE} (default), models of orders 1,...,\code{ar.order} or 1,...,\code{floor(10*log10(length(x)))} are be considered, depending on whether \code{ar.order} is defined or not.}
\item{robust}{logical value indicates whether to use robust estimates of autoregression coefficients using \code{\link{HVK}} function (default), or to use Yule-Walker estimates delivered by \command{ar} function.}
\item{out}{logical value indicates whether full output should be shown. Default is \code{FALSE}.}
}
\value{
A list with class htest containing the following components:
\item{method}{name of the method.}
\item{data.name}{name of the data.}
\item{statistic}{value of the test statistic.}
\item{p.value}{\eqn{p}-value of the test.}
\item{alternative}{alternative hypothesis.}
\item{parameter}{window that was used.}
\item{estimate}{list, containing the estimated coefficients of linear trend (if \code{H0="linear"}); estimated AR coefficients; test results for all considered windows (if \code{factor.length =} \code{"adaptive.selection"}).}
}
\references{
Bickel, P. J. and Sakov, A. (2008) On the choice of \eqn{m} in the \eqn{m} out of \eqn{n} bootstrap and confidence bounds for extrema. \emph{Statistica Sinica} 18, 967--985.
Lyubchich, V., Gel, Y. R. and El-Shaarawi, A. (2013) On detecting non-monotonic trends in environmental time series: a fusion of local regression and bootstrap. \emph{Environmetrics} 24, 209--226.
Wang, L., Akritas, M. G. and Van Keilegom, I. (2008) An ANOVA-type nonparametric diagnostic test for heteroscedastic regression models. \emph{Journal of Nonparametric Statistics} 20(5), 365--382.
Wang, L. and Van Keilegom, I. (2007) Nonparametric test for the form of parametric regression with time series errors. \emph{Statistica Sinica} 17, 369--386.
}
\author{Yulia R. Gel, Vyacheslav Lyubchich}
\seealso{\code{\link{HVK}},
\code{\link{WAVK}},
\code{\link{sync.test}}.
}
\examples{
# Fix seed for reproduceable simulations.
set.seed(123)
# Simulate autoregressive time series of length n with linear trend 1+2*t,
# where t is a regular sequence on the interval (0,1].
n <- 100
t <- c(1:n)/n
U <- 1+2*t + arima.sim(n=n, list(order = c(2,0,0), ar = c(-0.7, -0.1)))
# Test for linear trend with output of all results.
\dontrun{
wavk.test(U, factor.length = "adaptive.selection", H0="linear", out=TRUE, B=1000)
}
# Sample output:
##
## Trend test by Wang, Akritas and Van Keilegom
##
##data: U
##WAVK test statistic = 0.8562, adaptively selected window = 4, p-value = 0.356
##alternative hypothesis: presence of a nonlinear trend
##sample estimates:
##$linear_trend_coefficients
##(Intercept) t
## 0.9917251 2.0224272
##
##$AR_coefficients
## phi_1 phi_2
##-0.6814546 -0.2404422
##
##$all_considered_windows
## Window WAVK-statistic p-value
## 4 0.8561654 0.356
## 5 0.8620023 0.320
## 7 0.8691870 0.288
## 10 0.6837790 0.306
# Test H0 of absence of a trend using asymptotic distribution of statistic.
wavk.test(U, method="asympt")
# Sample output:
##
## Trend test by Wang, Akritas and Van Keilegom
##
##data: U
##WAVK test statistic = 18.4712, user-defined window = 10, p-value < 2.2e-16
##alternative hypothesis: presence of a trend
}
\keyword{ts}
\keyword{htest}
\keyword{trend}
|
YATAProvider <- R6Class("YATAProvider",
public = list(
name = NULL
,prefix = NULL
,initialize = function(provider) {
self$name = provider
self$prefix = "POL"
}
)
)
| /YATACore/R/R6_YATAProvider.R | no_license | Grandez/YATA | R | false | false | 216 | r | YATAProvider <- R6Class("YATAProvider",
public = list(
name = NULL
,prefix = NULL
,initialize = function(provider) {
self$name = provider
self$prefix = "POL"
}
)
)
|
\name{forgnlit30}
\docType{data}
\alias{forgnlit30}
\title{Foreign-born literacy in 1930}
\description{
This data set contains, on a state level, the proportion of
white residents ten years and older who are foreign born, and
the proportion of those residents who are literate. Data come from
the 1930 census and were first analyzed by Robinson (1950).
}
\usage{data(forgnlit30)}
\format{A data frame containing 5 variables and 48 observations
\tabular{lll}{
X \tab numeric \tab proportion of the white population at least 10
years of age that is foreign born \cr
Y \tab numeric \tab proportion of the white population at least 10
years of age that is illiterate \cr
W1 \tab numeric \tab proportion of the foreign-born white population
at least 10 years of age that is illiterate \cr
W2 \tab numeric \tab proportion of the native-born white population
at least 10 years of age that is illiterate \cr
ICPSR \tab numeric \tab the ICPSR state code
}
}
\references{
Robinson, W.S. (1950). ``Ecological Correlations and the Behavior
of Individuals.'' \emph{American Sociological Review}, vol. 15,
pp.351-357.
}
\keyword{datasets}
| /man/forgnlit30.Rd | no_license | guhjy/eco | R | false | false | 1,187 | rd | \name{forgnlit30}
\docType{data}
\alias{forgnlit30}
\title{Foreign-born literacy in 1930}
\description{
This data set contains, on a state level, the proportion of
white residents ten years and older who are foreign born, and
the proportion of those residents who are literate. Data come from
the 1930 census and were first analyzed by Robinson (1950).
}
\usage{data(forgnlit30)}
\format{A data frame containing 5 variables and 48 observations
\tabular{lll}{
X \tab numeric \tab proportion of the white population at least 10
years of age that is foreign born \cr
Y \tab numeric \tab proportion of the white population at least 10
years of age that is illiterate \cr
W1 \tab numeric \tab proportion of the foreign-born white population
at least 10 years of age that is illiterate \cr
W2 \tab numeric \tab proportion of the native-born white population
at least 10 years of age that is illiterate \cr
ICPSR \tab numeric \tab the ICPSR state code
}
}
\references{
Robinson, W.S. (1950). ``Ecological Correlations and the Behavior
of Individuals.'' \emph{American Sociological Review}, vol. 15,
pp.351-357.
}
\keyword{datasets}
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{distn.stats}
\alias{distn.stats}
\title{Distribution Stats}
\usage{
distn.stats(distn, a, b)
}
\arguments{
\item{distn}{named distribution, one of "beta", "exp", "f", "gamma", "lnorm", "norm", "t",}
\item{a}{numeric; first parameter of \code{distn}}
\item{b}{numeric; second parameter of \code{distn}}
}
\value{
vector with mean and standard deviation
}
\description{
Implementation of standard equations used to calculate mean and sd for a variety of
named distributions different
}
\examples{
distn.stats("norm", 0, 1)
}
\author{
David LeBauer
}
| /utils/man/distn.stats.Rd | permissive | gbromley/pecan | R | false | false | 610 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{distn.stats}
\alias{distn.stats}
\title{Distribution Stats}
\usage{
distn.stats(distn, a, b)
}
\arguments{
\item{distn}{named distribution, one of "beta", "exp", "f", "gamma", "lnorm", "norm", "t",}
\item{a}{numeric; first parameter of \code{distn}}
\item{b}{numeric; second parameter of \code{distn}}
}
\value{
vector with mean and standard deviation
}
\description{
Implementation of standard equations used to calculate mean and sd for a variety of
named distributions different
}
\examples{
distn.stats("norm", 0, 1)
}
\author{
David LeBauer
}
|
PhenotypeCombinedBatch1and2.v2.0 <- read.delim("C:/Users/mwesigwa/OneDrive/VirusAnalysis_R/PhenotypeCombinedBatch1and2.v2.0.txt", row.names=1)
| /Phenotype.R | no_license | savannahmwesigwa/CAFGEN | R | false | false | 144 | r | PhenotypeCombinedBatch1and2.v2.0 <- read.delim("C:/Users/mwesigwa/OneDrive/VirusAnalysis_R/PhenotypeCombinedBatch1and2.v2.0.txt", row.names=1)
|
# NOTE: This code has been modified from AWS Stepfunctions Python:
# https://github.com/aws/aws-step-functions-data-science-sdk-python/blob/main/src/stepfunctions/template/utils.py
# Replace the parameters using $$.Execution.Input.
replace_parameters_with_context_object = function(step){
updated_parameters = list()
for (k in names(step$parameters)){
updated_parameters[sprintf('%s.$',k)] = sprintf("$$.Execution.Input['%s'].%s", step$state_id, k)
}
return(updated_parameters)
}
replace_parameters_with_jsonpath = function(step, params){
replace_values = function(src_params, dest_params){
if (inherits(dest_params, "list")){
for (key in names(dest_params)){
if (endsWith(key, '$')){
original_key = substring(key,1, nchar(key)-2) # Remove .$ in the end
src_params[[original_key]] = NULL
src_params[[key]] = dest_params[[key]]
} else
scr_params = replace_values(src_params[[key]], dest_params[[key]])
}
}
return(src_params)
}
task_parameters = step$parameters
task_parameters = replace_values(task_parameters, params)
return(task_parameters)
}
| /R/template_utils.R | permissive | DyfanJones/aws-step-functions-data-science-sdk-r | R | false | false | 1,156 | r | # NOTE: This code has been modified from AWS Stepfunctions Python:
# https://github.com/aws/aws-step-functions-data-science-sdk-python/blob/main/src/stepfunctions/template/utils.py
# Replace the parameters using $$.Execution.Input.
replace_parameters_with_context_object = function(step){
updated_parameters = list()
for (k in names(step$parameters)){
updated_parameters[sprintf('%s.$',k)] = sprintf("$$.Execution.Input['%s'].%s", step$state_id, k)
}
return(updated_parameters)
}
replace_parameters_with_jsonpath = function(step, params){
replace_values = function(src_params, dest_params){
if (inherits(dest_params, "list")){
for (key in names(dest_params)){
if (endsWith(key, '$')){
original_key = substring(key,1, nchar(key)-2) # Remove .$ in the end
src_params[[original_key]] = NULL
src_params[[key]] = dest_params[[key]]
} else
scr_params = replace_values(src_params[[key]], dest_params[[key]])
}
}
return(src_params)
}
task_parameters = step$parameters
task_parameters = replace_values(task_parameters, params)
return(task_parameters)
}
|
testlist <- list(type = 1L, z = -3.18273654278496e-294)
result <- do.call(esreg::G1_fun,testlist)
str(result) | /esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609890079-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 109 | r | testlist <- list(type = 1L, z = -3.18273654278496e-294)
result <- do.call(esreg::G1_fun,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DataDocumentation.R
\docType{data}
\name{Sector_Make_2013_BeforeRedef}
\alias{Sector_Make_2013_BeforeRedef}
\title{Sector 2013 Make Before Redefinition (2012 schema)}
\format{A dataframe with 16 obs. and 18 variables}
\source{
\url{https://apps.bea.gov//industry/iTables\%20Static\%20Files/AllTablesIO.zip}
}
\usage{
Sector_Make_2013_BeforeRedef
}
\description{
TBD
}
\keyword{datasets}
| /man/Sector_Make_2013_BeforeRedef.Rd | permissive | ccourtUF/useeior | R | false | true | 465 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DataDocumentation.R
\docType{data}
\name{Sector_Make_2013_BeforeRedef}
\alias{Sector_Make_2013_BeforeRedef}
\title{Sector 2013 Make Before Redefinition (2012 schema)}
\format{A dataframe with 16 obs. and 18 variables}
\source{
\url{https://apps.bea.gov//industry/iTables\%20Static\%20Files/AllTablesIO.zip}
}
\usage{
Sector_Make_2013_BeforeRedef
}
\description{
TBD
}
\keyword{datasets}
|
#!/usr/bin/env Rscript
library(biomaRt)
library(data.table)
library(tidyverse)
library(GagnonMR)
library(gassocplot)
library(ggplotify)
library(cowplot)
library(ggrepel)
setwd("/mnt/sda/gagelo01/Projects/small_MR_exploration/FI_BMI")
gwasvcf::set_bcftools()
gwasvcf::set_plink()
ldref = "/home/couchr02/Mendel_Commun/Christian/LDlocal/EUR_rs"
eQTLcoloc <- fread( "Data/Modified/eQTLcoloc.txt")
eQTLcoloc <- eQTLcoloc[outcome != "Fasting_Insulin_correctedBMI",]
gencode <- fread("/home/couchr02/Mendel_Commun/Christian/GTEx_v8/gencode.v26.GRCh38.genes.txt")
gencode[, gene_id2 := gsub("\\..*", "", gene_id)]
eQTLcoloc <- merge(eQTLcoloc, distinct(gencode[, .(gene_id2, gene_name)]), by.x = "exposure", by.y = "gene_id2", all.x = TRUE)
exposures_gtex <- fread( "Data/Modified/exposures_gtex_hyprcoloc.txt")
df_index<-fread("/mnt/sda/gagelo01/Vcffile/server_gwas_id.txt")
df_index_eqtl <- df_index[pmid == 34644572, ][trait %in% eQTLcoloc[!is.na(gene_name), exposure]]
df_index_eqtl <- df_index_eqtl[,.(id, note)]
df_index_eqtl[, ID_exposure_file := paste0("/mnt/sda/gagelo01/Vcffile/Server_vcf/", id, "/", id, ".vcf.gz")]
df_index_eqtl[,id:=NULL]
setnames(df_index_eqtl, "note", "chrompos")
df_index_eqtl_split <- split(df_index_eqtl, 1:nrow(df_index_eqtl))
inst_islet<- map(df_index_eqtl_split, function(x) {
inst_tsmr <- gwasvcf::query_gwas(vcf = x$ID_exposure_file, chrompos = x$chrompos) %>%
gwasglue::gwasvcf_to_TwoSampleMR(.) %>%
as.data.table(.)
return(inst_tsmr)}) %>% rbindlist(., fill = TRUE)
inst_islet_small <- merge(inst_islet, eQTLcoloc[,.(exposure, posprob_colocH4.SNP, gene_name)],
by.x = c("exposure", "SNP"), by.y = c("exposure", "posprob_colocH4.SNP"))
exposures_gtex_small <- merge(exposures_gtex, eQTLcoloc[,.(posprob_colocH4.SNP, gene_name)],
by.x = c("gene.exposure", "SNP"), by.y = c("gene_name", "posprob_colocH4.SNP"))
nametochange<-colnames(exposures_gtex_small)[grepl("exposure", colnames(exposures_gtex_small))]
setnames(exposures_gtex_small, nametochange, gsub("exposure", "outcome", nametochange))
exposures_gtex_small[,id.outcome := outcome]
harm <- TwoSampleMR::harmonise_data(inst_islet_small[!is.na(gene_name)], exposures_gtex_small, 1)
setDT(harm)
harm<-harm[,.(SNP, effect_allele.exposure, other_allele.exposure, outcome, beta.exposure, beta.outcome)]
harm <- separate(harm, col = outcome, into = c("tissue", "gene"), sep = "-")
setnames(harm, "beta.exposure", "beta.pancreatic_islet")
harm[,tissue := paste0("beta.", tissue)]
theres <- dcast(harm, SNP+ effect_allele.exposure + other_allele.exposure+gene+beta.pancreatic_islet ~ tissue, value.var = "beta.outcome")
#PMS2 te rsid is not in GTEX, and apparently there are no proxies
# {ldmat <- exposures_gtex[gene.exposure == "PMS2",c(unique(SNP),"rs7798471") ] %>%
# ieugwasr::ld_matrix_local(., plink_bin = genetics.binaRies::get_plink_binary(), bfile = ldref)
# test <- as.data.frame(ldmat)
# test$rowname <- rownames(test)
# nom <- colnames(test)[grepl("rs7798471",colnames(test))]
# test <- test[,c("rowname", nom)]
# setDT(test)
# test[(rs7798471_C_T^2)>0.1,] #No good proxies, hence
# }
#faire mr
ldmat <- ieugwasr::ld_matrix_local(eQTLcoloc[!is.na(hgnc_symbol), posprob_colocH4.SNP], plink_bin = genetics.binaRies::get_plink_binary(),
bfile = "/home/couchr02/Mendel_Commun/Christian/LDlocal/EUR_rs")
ldmat
diag(ldmat)<- 0
max(ldmat^2) #yes
ldmat
theres #rs1167827 is notspecific therefore remove ?
dat_vcf <- gwasvcf::query_gwas("/mnt/sda/gagelo01/Vcffile/Server_vcf/trait-2-2/trait-2-2.vcf.gz",
rsid = eQTLcoloc[!is.na(gene_name) & gene_name != "PMS2P3", unique(posprob_colocH4.SNP)],
proxies = "no")
inst_map <- dat_vcf %>% gwasglue::gwasvcf_to_TwoSampleMR(. , "exposure") %>% data.table::as.data.table(.)
id_out<-c("trait-1-1")
path_out<-c(paste0("/mnt/sda/gagelo01/Vcffile/Server_vcf/", id_out, "/", id_out, ".vcf.gz"),
"/mnt/sda/gagelo01/Vcffile/MRBase_vcf/ukb-b-9405/ukb-b-9405.vcf.gz")
out <- map(path_out, function(x) GagnonMR::extract_outcome_variant(snps = inst_map$SNP, outcomes = x)) %>%
rbindlist(., fill = TRUE)
resharm <- TwoSampleMR::harmonise_data(exposure_dat = inst_map, outcome_dat = out, action = 1) %>%
as.data.table(.)
resharm <- TwoSampleMR::add_rsq(resharm)
resharm <- TwoSampleMR::steiger_filtering(resharm)
resmap <- resharm %>%
TwoSampleMR::mr(., method = "mr_ivw")
##Faire hyprcoloc
#import fi
inst_fi<- map(as.list(df_index_eqtl$chrompos), function(x) {
inst_tsmr <- gwasvcf::query_gwas(vcf = "/mnt/sda/gagelo01/Vcffile/Server_vcf/trait-2-2/trait-2-2.vcf.gz",
chrompos = x) %>%
gwasglue::gwasvcf_to_TwoSampleMR(.) %>%
as.data.table(.)
return(inst_tsmr)}) %>% rbindlist(., fill = TRUE)
inst_fi[,id.exposure:=exposure]
inst_fi <- distinct(inst_fi)
inst_bmi<- map(as.list(df_index_eqtl$chrompos), function(x) {
inst_tsmr <- gwasvcf::query_gwas(vcf = "/mnt/sda/gagelo01/Vcffile/Server_vcf/trait-1-1/trait-1-1.vcf.gz",
chrompos = x) %>%
gwasglue::gwasvcf_to_TwoSampleMR(.) %>%
as.data.table(.)
return(inst_tsmr)}) %>% rbindlist(., fill = TRUE)
inst_bmi[,id.exposure := exposure]
inst_bmi <- distinct(inst_bmi)
exposures_gtex[SNP %in% eQTLcoloc[!is.na(gene_name), posprob_colocH4.SNP], ][,unique(SNP) %>% length,by = "id.exposure"]
inst_bmi[SNP %in% eQTLcoloc[!is.na(gene_name), posprob_colocH4.SNP], ][,unique(SNP) %>% length,by = "id.exposure"]
#hyprcoloc
stack_assoc_plot_wrapper <- function(df_aligned, res_hypr, annotate_snp=NULL) {
stopifnot(is.factor(df_aligned$exposure))
df_reshaped <- reshape(df_aligned, idvar = c("SNP", "chr.exposure", "pos.exposure"), timevar = "exposure", direction = "wide")
ldmat <- ieugwasr::ld_matrix_local(
df_reshaped$SNP,
plink_bin = genetics.binaRies::get_plink_binary(),
bfile = ldref
)
snpname <- do.call(rbind, strsplit(rownames(ldmat), split = "_"))[,1]
df_reshaped <- df_reshaped[SNP %in% snpname, ]
markers <- df_reshaped[, .(SNP, chr.exposure, pos.exposure)]
setnames(markers, colnames(markers), c("marker", "chr", "pos"))
df_aligned[, z := beta.exposure/se.exposure]
z<-reshape(df_aligned[SNP %in% snpname,.(SNP, exposure, z)], idvar = "SNP", timevar = "exposure", direction = "wide")
z[,SNP:=NULL]
setnames(z, colnames(z), gsub("z.", "", colnames(z)))
setcolorder(z, levels(df_aligned$exposure))
zscores<-as.matrix(z)
top_snp <- res_hypr[traits != "None", ][1, candidate_snp]
if(is.na(top_snp)){top_snp<-annotate_snp}
res <- gassocplot::stack_assoc_plot(markers = markers,
z = zscores,
corr = ldmat,
traits= colnames(zscores),
top.marker= top_snp)
return(res)
}
sensitivity.plot_wrapper <- function(df_aligned) {
df_reshaped <- reshape(df_aligned, idvar = c("SNP", "chr.exposure", "pos.exposure"), timevar = "exposure", direction = "wide")
effect_est <- df_reshaped[, .SD, .SDcols = names(df_reshaped)[grepl("beta.exposure", names(df_reshaped))]] %>% as.matrix
effect_se <- df_reshaped[, .SD, .SDcols = names(df_reshaped)[grepl("^se.exposure", names(df_reshaped))]] %>% as.matrix
res<- hyprcoloc::sensitivity.plot(effect.est = effect_est,
effect.se = effect_se,
trait.names = gsub("beta.exposure.", "", colnames(effect_est), fixed = TRUE),
snp.id = df_reshaped$SNP,
similarity.matrix = TRUE)
return(res)
}
drawheatmap <- function(heat) {
levels <- colnames(heat)
heat <- as.data.frame(heat)
heat$row <- rownames(heat)
rownames(heat)<-NULL
setDT(heat)
heat <- melt(heat, id.vars = "row")
heat[,row:=factor(row, levels = levels)]
heat[,variable:=factor(variable, levels = levels)]
g <- ggplot(heat, aes(x = variable, tissue, y = row, fill = value)) +
geom_tile() +
# scale_fill_gradient(low = "lightblue", high = "blue3",limits=c(0,1)) +
scale_fill_gradient(low = "#F4FAFE", high = "#4981BF",limits=c(0,1)) +
labs(fill = "") +
theme(
panel.background = element_blank(),
plot.margin = margin(t = 0.5, r = 0.5, b = 0.5, l = 0.5, "cm"),
legend.position = "top",
legend.title = element_text(
color = "gray20",
size = 12
),
legend.text = element_text(
color = "gray20",
size = 10
),
legend.title.align = 0.5,
legend.spacing.y = unit(0.1, 'cm'),
legend.key = element_rect(fill = "transparent", colour = "transparent"),
legend.key.size = unit(0.8, "cm"),
axis.title = element_blank(),
axis.line = element_line(size = 1, colour = "gray20"),
axis.ticks = element_line(size = 1, colour = "gray20"),
# axis.text.y = element_text(
# size = 10,
# colour = "gray20"
# ),
axis.text.y=element_blank(),
axis.text.x = element_text(
angle = 60,
size = 10,
hjust = 1,
colour = "gray20"
),
axis.ticks.length = unit(.25, "cm"))
g
}
inst_liver <- exposures_gtex[id.exposure == "Liver",]
inst_liver[,id.exposure := exposure]
inst_liver <- distinct(inst_liver)
inst_islet <- merge(inst_islet, distinct(eQTLcoloc[,.(exposure, gene_name)]), by.x = "exposure", by.y = "exposure")
inst_islet[, exposure := paste0("Pancreatic_islet-", gene_name)]
inst_islet[,id.exposure := exposure]
setnames(inst_islet, "gene_name", "gene.exposure")
genename <- eQTLcoloc[!is.na(gene_name), gene_name]
for(i in 1:length(genename)) {
dt <- rbindlist(list(inst_islet[gene.exposure == genename[i], ], inst_liver[gene.exposure == genename[i]], inst_fi), fill = TRUE)
aligned <- prepare_for_mvmr(exposure_dat = dt, d1 = dt, harmonise_strictness = 1, should_clump = FALSE)
aligned[, chr.exposure := chr.exposure %>% as.character(.) %>% as.numeric(.)]
hyprres <- run_hypr_on_aligned(aligned)
k<-aligned$exposure %>% unique
levels <- c(k[grep("Pancreatic_islet", k)], k[grep("Liver", k)], "Fasting_Insulin")
aligned[,exposure := factor(exposure, levels = rev(levels))]
if(all(is.na(hyprres$candidate_snp))){
annotate_snp<-eQTLcoloc[gene_name == genename[i], posprob_colocH4.SNP]
}else{annotate_snp<-NULL}
A <- stack_assoc_plot_wrapper(df_aligned = aligned, res_hypr = hyprres, annotate_snp=annotate_snp)
res <- sensitivity.plot_wrapper(df_aligned = aligned)
B<-drawheatmap(res[[2]])
twopanel <- ggdraw() +
draw_plot(ggplotify::as.ggplot(A) + theme(text = element_text(size = 0.4)), x = 0.08, y =0, width = .6, height = 1) +
draw_plot(B, x = .65, y =0.1, width = .35, height = 0.7) +
draw_plot_label(label = c("", ""), size = 25,
x = c(0, 0.62), y = c(0.9, 0.9))
saveRDS(ggplotify::as.ggplot(A), paste0("Results/stackassoc_plot_", genename[i], ".rds"))
saveRDS(object = twopanel, file = paste0("Results/twopanel_hypr_plot_", genename[i], ".rds"))
ggsave(plot = twopanel, filename = paste0("Results/", "twopanel_hypr_plot_", genename[i], ".png"),
width = 590/72,height = 583/72,units="in",scale=1, device = "png")
}
####
source("Analysis/my_mr_scatter_plot.R")
genename<-c("TCF7L2", "ADCY5", "TRIM73") #remove "PMS2P3" because in LD and none specific
resharm<-fread( "Data/Modified/mapharm.txt")
resharm <- merge(resharm, distinct(eQTLcoloc[gene_name %in% genename,.(posprob_colocH4.SNP, hgnc_symbol)]), by.x = "SNP", by.y = "posprob_colocH4.SNP")
resharm[, Locus := hgnc_symbol]
resharm[,id.outcome := outcome]
resharm[,id.exposure := exposure]
resharm[, exposure_outcome := paste0(exposure, "_", outcome)]
resharm_split <- split(resharm, resharm$exposure_outcome)
resmap <- map(resharm_split, GagnonMR::all_mr_methods) %>%
rbindlist(.,fill=TRUE)
q_int_f <- function(harm) {
q <- TwoSampleMR::mr_heterogeneity(harm) %>% as.data.table
int <- TwoSampleMR::mr_pleiotropy_test(harm) %>% as.data.table
q<-q[method != "MR Egger",]
q[,c("method", "id.exposure","id.outcome"):=NULL];
setnames(int, c("se", "pval"),c("egger_intercept_se","egger_intercept_pval"))
int[,c("id.exposure", "id.outcome") := NULL]
q_int<- merge(int, q, by = c("exposure", "outcome"))
q_int[,nsnp := harm[,.N]]
q_int[,fstat := GagnonMR::fstat_fromdat(list(harm))]
return(q_int)
}
q_int <- map(resharm_split, q_int_f) %>%
rbindlist(.,fill=TRUE)
setDT(resmap)
k <- resmap[outcome == "bmi_ukbgiant" & grepl("Inverse variance weighted", method),]
m <-resharm[outcome == "bmi_ukbgiant"]
m[, align:=""]
scatter <- my_mr_scatter_plot(mr_results = k, dat = m, equation_facet_grid = "") +
ylab("SNP effect on body mass index") +
xlab("SNP effect on fasting insulin") +
theme(
strip.background = element_blank(),
strip.text.x = element_blank()
)
# annotation <- data.table(x = 0.01, y = 0.015, label = paste0("b = ", round(k$b, digits =2),"; p = ", formatC(k$pval, format = "e", digits = 1)))
# scatter <- scatter +
# geom_text(data=annotation, aes( x=x, y=y, label=label), ,
# color="orange", size=7 , angle=45, fontface="bold" )
for(i in 1:length(genename)){
assign(paste0("X", i),
readRDS(file = paste0("Results/twopanel_hypr_plot_", genename[i], ".rds")))
assign(paste0("Y", i),
readRDS(file = paste0("Results/stackassoc_plot_", genename[i], ".rds")))
}
fourpanel <- cowplot::plot_grid(scatter, X3, X1,X2, labels=c("A)", "B)", "C)", "D)"))
ggsave(plot = fourpanel, filename = "Results/fig2_fourpanel.png",
width = 1000/72,height = 1000/72,units="in",scale=1, device = "png")
threepanel <- cowplot::plot_grid(Y1, Y2,Y3, labels=c("A)", "B)", "C)"), nrow = 1, ncol = 3)
threepanel <- threepanel + theme(plot.background = element_rect(fill = 'white', colour = 'white'))
ggsave(plot = threepanel, filename = "Results/Supplementary_Figure1.tiff",
width = 1100/72,height = 600/72,units="in",scale=1, device = "tiff", dpi = 1200)
inst_map[,inst_sel_strat:="biologically_driven"]
fwrite(inst_map, "Data/Modified/instmapfi.txt")
fwrite(resmap, "Data/Modified/resmap.txt")
fwrite(resharm, "Data/Modified/harmmap_vinuela.txt")
fwrite(theres, "Data/Modified/snptissuespecificity.txt")
message("This script finished without errors")
| /2c_hyprcolocmap.R | no_license | gagelo01/FI_BMI | R | false | false | 14,353 | r | #!/usr/bin/env Rscript
library(biomaRt)
library(data.table)
library(tidyverse)
library(GagnonMR)
library(gassocplot)
library(ggplotify)
library(cowplot)
library(ggrepel)
setwd("/mnt/sda/gagelo01/Projects/small_MR_exploration/FI_BMI")
gwasvcf::set_bcftools()
gwasvcf::set_plink()
ldref = "/home/couchr02/Mendel_Commun/Christian/LDlocal/EUR_rs"
eQTLcoloc <- fread( "Data/Modified/eQTLcoloc.txt")
eQTLcoloc <- eQTLcoloc[outcome != "Fasting_Insulin_correctedBMI",]
gencode <- fread("/home/couchr02/Mendel_Commun/Christian/GTEx_v8/gencode.v26.GRCh38.genes.txt")
gencode[, gene_id2 := gsub("\\..*", "", gene_id)]
eQTLcoloc <- merge(eQTLcoloc, distinct(gencode[, .(gene_id2, gene_name)]), by.x = "exposure", by.y = "gene_id2", all.x = TRUE)
exposures_gtex <- fread( "Data/Modified/exposures_gtex_hyprcoloc.txt")
df_index<-fread("/mnt/sda/gagelo01/Vcffile/server_gwas_id.txt")
df_index_eqtl <- df_index[pmid == 34644572, ][trait %in% eQTLcoloc[!is.na(gene_name), exposure]]
df_index_eqtl <- df_index_eqtl[,.(id, note)]
df_index_eqtl[, ID_exposure_file := paste0("/mnt/sda/gagelo01/Vcffile/Server_vcf/", id, "/", id, ".vcf.gz")]
df_index_eqtl[,id:=NULL]
setnames(df_index_eqtl, "note", "chrompos")
df_index_eqtl_split <- split(df_index_eqtl, 1:nrow(df_index_eqtl))
inst_islet<- map(df_index_eqtl_split, function(x) {
inst_tsmr <- gwasvcf::query_gwas(vcf = x$ID_exposure_file, chrompos = x$chrompos) %>%
gwasglue::gwasvcf_to_TwoSampleMR(.) %>%
as.data.table(.)
return(inst_tsmr)}) %>% rbindlist(., fill = TRUE)
inst_islet_small <- merge(inst_islet, eQTLcoloc[,.(exposure, posprob_colocH4.SNP, gene_name)],
by.x = c("exposure", "SNP"), by.y = c("exposure", "posprob_colocH4.SNP"))
exposures_gtex_small <- merge(exposures_gtex, eQTLcoloc[,.(posprob_colocH4.SNP, gene_name)],
by.x = c("gene.exposure", "SNP"), by.y = c("gene_name", "posprob_colocH4.SNP"))
nametochange<-colnames(exposures_gtex_small)[grepl("exposure", colnames(exposures_gtex_small))]
setnames(exposures_gtex_small, nametochange, gsub("exposure", "outcome", nametochange))
exposures_gtex_small[,id.outcome := outcome]
harm <- TwoSampleMR::harmonise_data(inst_islet_small[!is.na(gene_name)], exposures_gtex_small, 1)
setDT(harm)
harm<-harm[,.(SNP, effect_allele.exposure, other_allele.exposure, outcome, beta.exposure, beta.outcome)]
harm <- separate(harm, col = outcome, into = c("tissue", "gene"), sep = "-")
setnames(harm, "beta.exposure", "beta.pancreatic_islet")
harm[,tissue := paste0("beta.", tissue)]
theres <- dcast(harm, SNP+ effect_allele.exposure + other_allele.exposure+gene+beta.pancreatic_islet ~ tissue, value.var = "beta.outcome")
#PMS2 te rsid is not in GTEX, and apparently there are no proxies
# {ldmat <- exposures_gtex[gene.exposure == "PMS2",c(unique(SNP),"rs7798471") ] %>%
# ieugwasr::ld_matrix_local(., plink_bin = genetics.binaRies::get_plink_binary(), bfile = ldref)
# test <- as.data.frame(ldmat)
# test$rowname <- rownames(test)
# nom <- colnames(test)[grepl("rs7798471",colnames(test))]
# test <- test[,c("rowname", nom)]
# setDT(test)
# test[(rs7798471_C_T^2)>0.1,] #No good proxies, hence
# }
#faire mr
ldmat <- ieugwasr::ld_matrix_local(eQTLcoloc[!is.na(hgnc_symbol), posprob_colocH4.SNP], plink_bin = genetics.binaRies::get_plink_binary(),
bfile = "/home/couchr02/Mendel_Commun/Christian/LDlocal/EUR_rs")
ldmat
diag(ldmat)<- 0
max(ldmat^2) #yes
ldmat
theres #rs1167827 is notspecific therefore remove ?
dat_vcf <- gwasvcf::query_gwas("/mnt/sda/gagelo01/Vcffile/Server_vcf/trait-2-2/trait-2-2.vcf.gz",
rsid = eQTLcoloc[!is.na(gene_name) & gene_name != "PMS2P3", unique(posprob_colocH4.SNP)],
proxies = "no")
inst_map <- dat_vcf %>% gwasglue::gwasvcf_to_TwoSampleMR(. , "exposure") %>% data.table::as.data.table(.)
id_out<-c("trait-1-1")
path_out<-c(paste0("/mnt/sda/gagelo01/Vcffile/Server_vcf/", id_out, "/", id_out, ".vcf.gz"),
"/mnt/sda/gagelo01/Vcffile/MRBase_vcf/ukb-b-9405/ukb-b-9405.vcf.gz")
out <- map(path_out, function(x) GagnonMR::extract_outcome_variant(snps = inst_map$SNP, outcomes = x)) %>%
rbindlist(., fill = TRUE)
resharm <- TwoSampleMR::harmonise_data(exposure_dat = inst_map, outcome_dat = out, action = 1) %>%
as.data.table(.)
resharm <- TwoSampleMR::add_rsq(resharm)
resharm <- TwoSampleMR::steiger_filtering(resharm)
resmap <- resharm %>%
TwoSampleMR::mr(., method = "mr_ivw")
##Faire hyprcoloc
#import fi
inst_fi<- map(as.list(df_index_eqtl$chrompos), function(x) {
inst_tsmr <- gwasvcf::query_gwas(vcf = "/mnt/sda/gagelo01/Vcffile/Server_vcf/trait-2-2/trait-2-2.vcf.gz",
chrompos = x) %>%
gwasglue::gwasvcf_to_TwoSampleMR(.) %>%
as.data.table(.)
return(inst_tsmr)}) %>% rbindlist(., fill = TRUE)
inst_fi[,id.exposure:=exposure]
inst_fi <- distinct(inst_fi)
inst_bmi<- map(as.list(df_index_eqtl$chrompos), function(x) {
inst_tsmr <- gwasvcf::query_gwas(vcf = "/mnt/sda/gagelo01/Vcffile/Server_vcf/trait-1-1/trait-1-1.vcf.gz",
chrompos = x) %>%
gwasglue::gwasvcf_to_TwoSampleMR(.) %>%
as.data.table(.)
return(inst_tsmr)}) %>% rbindlist(., fill = TRUE)
inst_bmi[,id.exposure := exposure]
inst_bmi <- distinct(inst_bmi)
exposures_gtex[SNP %in% eQTLcoloc[!is.na(gene_name), posprob_colocH4.SNP], ][,unique(SNP) %>% length,by = "id.exposure"]
inst_bmi[SNP %in% eQTLcoloc[!is.na(gene_name), posprob_colocH4.SNP], ][,unique(SNP) %>% length,by = "id.exposure"]
#hyprcoloc
stack_assoc_plot_wrapper <- function(df_aligned, res_hypr, annotate_snp=NULL) {
stopifnot(is.factor(df_aligned$exposure))
df_reshaped <- reshape(df_aligned, idvar = c("SNP", "chr.exposure", "pos.exposure"), timevar = "exposure", direction = "wide")
ldmat <- ieugwasr::ld_matrix_local(
df_reshaped$SNP,
plink_bin = genetics.binaRies::get_plink_binary(),
bfile = ldref
)
snpname <- do.call(rbind, strsplit(rownames(ldmat), split = "_"))[,1]
df_reshaped <- df_reshaped[SNP %in% snpname, ]
markers <- df_reshaped[, .(SNP, chr.exposure, pos.exposure)]
setnames(markers, colnames(markers), c("marker", "chr", "pos"))
df_aligned[, z := beta.exposure/se.exposure]
z<-reshape(df_aligned[SNP %in% snpname,.(SNP, exposure, z)], idvar = "SNP", timevar = "exposure", direction = "wide")
z[,SNP:=NULL]
setnames(z, colnames(z), gsub("z.", "", colnames(z)))
setcolorder(z, levels(df_aligned$exposure))
zscores<-as.matrix(z)
top_snp <- res_hypr[traits != "None", ][1, candidate_snp]
if(is.na(top_snp)){top_snp<-annotate_snp}
res <- gassocplot::stack_assoc_plot(markers = markers,
z = zscores,
corr = ldmat,
traits= colnames(zscores),
top.marker= top_snp)
return(res)
}
sensitivity.plot_wrapper <- function(df_aligned) {
df_reshaped <- reshape(df_aligned, idvar = c("SNP", "chr.exposure", "pos.exposure"), timevar = "exposure", direction = "wide")
effect_est <- df_reshaped[, .SD, .SDcols = names(df_reshaped)[grepl("beta.exposure", names(df_reshaped))]] %>% as.matrix
effect_se <- df_reshaped[, .SD, .SDcols = names(df_reshaped)[grepl("^se.exposure", names(df_reshaped))]] %>% as.matrix
res<- hyprcoloc::sensitivity.plot(effect.est = effect_est,
effect.se = effect_se,
trait.names = gsub("beta.exposure.", "", colnames(effect_est), fixed = TRUE),
snp.id = df_reshaped$SNP,
similarity.matrix = TRUE)
return(res)
}
drawheatmap <- function(heat) {
levels <- colnames(heat)
heat <- as.data.frame(heat)
heat$row <- rownames(heat)
rownames(heat)<-NULL
setDT(heat)
heat <- melt(heat, id.vars = "row")
heat[,row:=factor(row, levels = levels)]
heat[,variable:=factor(variable, levels = levels)]
g <- ggplot(heat, aes(x = variable, tissue, y = row, fill = value)) +
geom_tile() +
# scale_fill_gradient(low = "lightblue", high = "blue3",limits=c(0,1)) +
scale_fill_gradient(low = "#F4FAFE", high = "#4981BF",limits=c(0,1)) +
labs(fill = "") +
theme(
panel.background = element_blank(),
plot.margin = margin(t = 0.5, r = 0.5, b = 0.5, l = 0.5, "cm"),
legend.position = "top",
legend.title = element_text(
color = "gray20",
size = 12
),
legend.text = element_text(
color = "gray20",
size = 10
),
legend.title.align = 0.5,
legend.spacing.y = unit(0.1, 'cm'),
legend.key = element_rect(fill = "transparent", colour = "transparent"),
legend.key.size = unit(0.8, "cm"),
axis.title = element_blank(),
axis.line = element_line(size = 1, colour = "gray20"),
axis.ticks = element_line(size = 1, colour = "gray20"),
# axis.text.y = element_text(
# size = 10,
# colour = "gray20"
# ),
axis.text.y=element_blank(),
axis.text.x = element_text(
angle = 60,
size = 10,
hjust = 1,
colour = "gray20"
),
axis.ticks.length = unit(.25, "cm"))
g
}
inst_liver <- exposures_gtex[id.exposure == "Liver",]
inst_liver[,id.exposure := exposure]
inst_liver <- distinct(inst_liver)
inst_islet <- merge(inst_islet, distinct(eQTLcoloc[,.(exposure, gene_name)]), by.x = "exposure", by.y = "exposure")
inst_islet[, exposure := paste0("Pancreatic_islet-", gene_name)]
inst_islet[,id.exposure := exposure]
setnames(inst_islet, "gene_name", "gene.exposure")
genename <- eQTLcoloc[!is.na(gene_name), gene_name]
for(i in 1:length(genename)) {
dt <- rbindlist(list(inst_islet[gene.exposure == genename[i], ], inst_liver[gene.exposure == genename[i]], inst_fi), fill = TRUE)
aligned <- prepare_for_mvmr(exposure_dat = dt, d1 = dt, harmonise_strictness = 1, should_clump = FALSE)
aligned[, chr.exposure := chr.exposure %>% as.character(.) %>% as.numeric(.)]
hyprres <- run_hypr_on_aligned(aligned)
k<-aligned$exposure %>% unique
levels <- c(k[grep("Pancreatic_islet", k)], k[grep("Liver", k)], "Fasting_Insulin")
aligned[,exposure := factor(exposure, levels = rev(levels))]
if(all(is.na(hyprres$candidate_snp))){
annotate_snp<-eQTLcoloc[gene_name == genename[i], posprob_colocH4.SNP]
}else{annotate_snp<-NULL}
A <- stack_assoc_plot_wrapper(df_aligned = aligned, res_hypr = hyprres, annotate_snp=annotate_snp)
res <- sensitivity.plot_wrapper(df_aligned = aligned)
B<-drawheatmap(res[[2]])
twopanel <- ggdraw() +
draw_plot(ggplotify::as.ggplot(A) + theme(text = element_text(size = 0.4)), x = 0.08, y =0, width = .6, height = 1) +
draw_plot(B, x = .65, y =0.1, width = .35, height = 0.7) +
draw_plot_label(label = c("", ""), size = 25,
x = c(0, 0.62), y = c(0.9, 0.9))
saveRDS(ggplotify::as.ggplot(A), paste0("Results/stackassoc_plot_", genename[i], ".rds"))
saveRDS(object = twopanel, file = paste0("Results/twopanel_hypr_plot_", genename[i], ".rds"))
ggsave(plot = twopanel, filename = paste0("Results/", "twopanel_hypr_plot_", genename[i], ".png"),
width = 590/72,height = 583/72,units="in",scale=1, device = "png")
}
####
source("Analysis/my_mr_scatter_plot.R")
genename<-c("TCF7L2", "ADCY5", "TRIM73") #remove "PMS2P3" because in LD and none specific
resharm<-fread( "Data/Modified/mapharm.txt")
resharm <- merge(resharm, distinct(eQTLcoloc[gene_name %in% genename,.(posprob_colocH4.SNP, hgnc_symbol)]), by.x = "SNP", by.y = "posprob_colocH4.SNP")
resharm[, Locus := hgnc_symbol]
resharm[,id.outcome := outcome]
resharm[,id.exposure := exposure]
resharm[, exposure_outcome := paste0(exposure, "_", outcome)]
resharm_split <- split(resharm, resharm$exposure_outcome)
resmap <- map(resharm_split, GagnonMR::all_mr_methods) %>%
rbindlist(.,fill=TRUE)
q_int_f <- function(harm) {
q <- TwoSampleMR::mr_heterogeneity(harm) %>% as.data.table
int <- TwoSampleMR::mr_pleiotropy_test(harm) %>% as.data.table
q<-q[method != "MR Egger",]
q[,c("method", "id.exposure","id.outcome"):=NULL];
setnames(int, c("se", "pval"),c("egger_intercept_se","egger_intercept_pval"))
int[,c("id.exposure", "id.outcome") := NULL]
q_int<- merge(int, q, by = c("exposure", "outcome"))
q_int[,nsnp := harm[,.N]]
q_int[,fstat := GagnonMR::fstat_fromdat(list(harm))]
return(q_int)
}
q_int <- map(resharm_split, q_int_f) %>%
rbindlist(.,fill=TRUE)
setDT(resmap)
k <- resmap[outcome == "bmi_ukbgiant" & grepl("Inverse variance weighted", method),]
m <-resharm[outcome == "bmi_ukbgiant"]
m[, align:=""]
scatter <- my_mr_scatter_plot(mr_results = k, dat = m, equation_facet_grid = "") +
ylab("SNP effect on body mass index") +
xlab("SNP effect on fasting insulin") +
theme(
strip.background = element_blank(),
strip.text.x = element_blank()
)
# annotation <- data.table(x = 0.01, y = 0.015, label = paste0("b = ", round(k$b, digits =2),"; p = ", formatC(k$pval, format = "e", digits = 1)))
# scatter <- scatter +
# geom_text(data=annotation, aes( x=x, y=y, label=label), ,
# color="orange", size=7 , angle=45, fontface="bold" )
for(i in 1:length(genename)){
assign(paste0("X", i),
readRDS(file = paste0("Results/twopanel_hypr_plot_", genename[i], ".rds")))
assign(paste0("Y", i),
readRDS(file = paste0("Results/stackassoc_plot_", genename[i], ".rds")))
}
fourpanel <- cowplot::plot_grid(scatter, X3, X1,X2, labels=c("A)", "B)", "C)", "D)"))
ggsave(plot = fourpanel, filename = "Results/fig2_fourpanel.png",
width = 1000/72,height = 1000/72,units="in",scale=1, device = "png")
threepanel <- cowplot::plot_grid(Y1, Y2,Y3, labels=c("A)", "B)", "C)"), nrow = 1, ncol = 3)
threepanel <- threepanel + theme(plot.background = element_rect(fill = 'white', colour = 'white'))
ggsave(plot = threepanel, filename = "Results/Supplementary_Figure1.tiff",
width = 1100/72,height = 600/72,units="in",scale=1, device = "tiff", dpi = 1200)
inst_map[,inst_sel_strat:="biologically_driven"]
fwrite(inst_map, "Data/Modified/instmapfi.txt")
fwrite(resmap, "Data/Modified/resmap.txt")
fwrite(resharm, "Data/Modified/harmmap_vinuela.txt")
fwrite(theres, "Data/Modified/snptissuespecificity.txt")
message("This script finished without errors")
|
#' The knapsack problem: brute force algorithm.
#'
#' The knapsack problem is a discrete optimization problem where we have a knapsack that can take a
#' limited weight W and we want to fill this knapsack with a number of items i = 1,...,n, each with
#' a weight w(i) and a value v(i). The goal is to find the knapsack with the largest value of the
#' elements added to the knapsack.
#'
#' \code{brute_force_knapsack} uses the brute-force algorithm. This algorithms works by going through
#' all possible alternatives (all possible combinations 2n are evaluated) and return the maximum
#' value found.
#'
#' @param x an object of class data.frame with two variables v (values) and w (weights).
#' @param W numeric scalar object that represents the knapsack size.
#' @param parallel make the function run in parallel.
#' @param fast uses a c++ function for faster execution.
#'
#'
#' @return \code{brute_force_knapsack} returns a list with two elements: the elements added to the knapsack and the maximum knapsack value.
#'
#' @examples
#' knapsack_objects <- generate_knapsack()
#' brute_force_knapsack(x = knapsack_objects[1:8,], W = 3500)
#' brute_force_knapsack(x = knapsack_objects[1:12,], W = 3500)
#' brute_force_knapsack(x = knapsack_objects[1:8,], W = 2000)
#' brute_force_knapsack(x = knapsack_objects[1:12,], W = 2000)
#' brute_force_knapsack(x = knapsack_objects[1:12,], W = 3500, parallel=TRUE)
#'
#' @references \url{https://en.wikipedia.org/wiki/Knapsack_problem}
#'
#' @import parallel
#' @importFrom utils head
#'
#' @export
#'
brute_force_knapsack <- function(x, W, parallel = FALSE, fast = FALSE){
stopifnot(W > 0, is.data.frame(x), is.logical(parallel), is.logical(fast))
combn <- 1:(2^nrow(x))
#' @describeIn brute_force_knapsack Description in main function.
.calculate_row <- function(itr, span, data_frame, weight, use_cpp){
if(use_cpp){
bin <- as.logical(intToBinary(span[itr], nrow(data_frame)))
}else{
bin <- as.logical(head(intToBits(span[itr]), nrow(data_frame)))
}
temp_weight <- sum(data_frame[,1][bin])
if(temp_weight <= weight){
return(c(temp_weight,
sum(data_frame[,2][bin])))
}else{
return(c(NA,NA))
}
}
result <- list()
if(parallel){
chk <- Sys.getenv("_R_CHECK_LIMIT_CORES_", "")
if (nzchar(chk) && chk == "TRUE") {
# use 2 cores in CRAN/Travis/AppVeyor
no_cores <- 2L
}else{
# Leave 1 core for other processes to not lock the computer.
no_cores <- max(1, detectCores() - 1)
}
if(Sys.info()['sysname'] == "Windows"){
cl <- makeCluster(no_cores, type="PSOCK")
}else{
cl <- makeCluster(no_cores, type="FORK")
}
result <- parLapply(cl=cl, X=combn, fun=.calculate_row,
combn, x, W, use_cpp=fast, chunk.size = as.integer(combn/no_cores))
stopCluster(cl)
}else{
for(i in combn){
result[[i]] <- .calculate_row(i, combn, x, W, use_cpp=fast)
}
}
result <- matrix(unlist(result), byrow = TRUE, ncol=2)
result_index <- which.max(result[,2])
value <- result[result_index, 2]
elements <- which(as.logical(head(intToBits(result_index), nrow(x))))
return(list("value"=value, "elements"=elements))
}
| /Advanced Programming in R /Lab 6/knapsack/R/knapsack_brute_force.R | permissive | aydinardalan/MSc_Statistics-and-Machine-Learning | R | false | false | 3,276 | r | #' The knapsack problem: brute force algorithm.
#'
#' The knapsack problem is a discrete optimization problem where we have a knapsack that can take a
#' limited weight W and we want to fill this knapsack with a number of items i = 1,...,n, each with
#' a weight w(i) and a value v(i). The goal is to find the knapsack with the largest value of the
#' elements added to the knapsack.
#'
#' \code{brute_force_knapsack} uses the brute-force algorithm. This algorithms works by going through
#' all possible alternatives (all possible combinations 2n are evaluated) and return the maximum
#' value found.
#'
#' @param x an object of class data.frame with two variables v (values) and w (weights).
#' @param W numeric scalar object that represents the knapsack size.
#' @param parallel make the function run in parallel.
#' @param fast uses a c++ function for faster execution.
#'
#'
#' @return \code{brute_force_knapsack} returns a list with two elements: the elements added to the knapsack and the maximum knapsack value.
#'
#' @examples
#' knapsack_objects <- generate_knapsack()
#' brute_force_knapsack(x = knapsack_objects[1:8,], W = 3500)
#' brute_force_knapsack(x = knapsack_objects[1:12,], W = 3500)
#' brute_force_knapsack(x = knapsack_objects[1:8,], W = 2000)
#' brute_force_knapsack(x = knapsack_objects[1:12,], W = 2000)
#' brute_force_knapsack(x = knapsack_objects[1:12,], W = 3500, parallel=TRUE)
#'
#' @references \url{https://en.wikipedia.org/wiki/Knapsack_problem}
#'
#' @import parallel
#' @importFrom utils head
#'
#' @export
#'
brute_force_knapsack <- function(x, W, parallel = FALSE, fast = FALSE){
stopifnot(W > 0, is.data.frame(x), is.logical(parallel), is.logical(fast))
combn <- 1:(2^nrow(x))
#' @describeIn brute_force_knapsack Description in main function.
.calculate_row <- function(itr, span, data_frame, weight, use_cpp){
if(use_cpp){
bin <- as.logical(intToBinary(span[itr], nrow(data_frame)))
}else{
bin <- as.logical(head(intToBits(span[itr]), nrow(data_frame)))
}
temp_weight <- sum(data_frame[,1][bin])
if(temp_weight <= weight){
return(c(temp_weight,
sum(data_frame[,2][bin])))
}else{
return(c(NA,NA))
}
}
result <- list()
if(parallel){
chk <- Sys.getenv("_R_CHECK_LIMIT_CORES_", "")
if (nzchar(chk) && chk == "TRUE") {
# use 2 cores in CRAN/Travis/AppVeyor
no_cores <- 2L
}else{
# Leave 1 core for other processes to not lock the computer.
no_cores <- max(1, detectCores() - 1)
}
if(Sys.info()['sysname'] == "Windows"){
cl <- makeCluster(no_cores, type="PSOCK")
}else{
cl <- makeCluster(no_cores, type="FORK")
}
result <- parLapply(cl=cl, X=combn, fun=.calculate_row,
combn, x, W, use_cpp=fast, chunk.size = as.integer(combn/no_cores))
stopCluster(cl)
}else{
for(i in combn){
result[[i]] <- .calculate_row(i, combn, x, W, use_cpp=fast)
}
}
result <- matrix(unlist(result), byrow = TRUE, ncol=2)
result_index <- which.max(result[,2])
value <- result[result_index, 2]
elements <- which(as.logical(head(intToBits(result_index), nrow(x))))
return(list("value"=value, "elements"=elements))
}
|
context("Test `qq` and `qqcat`")
test_that("Simple test", {
a = 1
expect_that(qq("this is @{a}"),
equals("this is 1"))
expect_that(qqcat("this is @{a}"),
prints_text("this is 1"))
})
test_that("pass a list as an environment", {
l = list(a = "a")
expect_that(qq("this is @{a} in `l`", env = l),
equals("this is a in `l`"))
expect_that(qqcat("this is @{a} in `l`", env = l),
prints_text("this is a in `l`"))
})
test_that("variables are multiple element vector", {
a = 1:6
expect_that(qq("@{a} is an @{ifelse(a %% 2, 'odd', 'even')} number\n"),
equals("1 is an odd number\n2 is an even number\n3 is an odd number\n4 is an even number\n5 is an odd number\n6 is an even number\n"))
expect_that(qq("@{a} is an @{ifelse(a %% 2, 'odd', 'even')} number\n", collapse = FALSE),
equals(c("1 is an odd number\n", "2 is an even number\n", "3 is an odd number\n", "4 is an even number\n", "5 is an odd number\n", "6 is an even number\n")))
})
test_that("different code patterns", {
expect_that(find_code("@\\{CODE\\}", "@{a}, @[b], @<c>, @(d), ${e}, `f`"),
equals(list(template = "@{a}",
code = "a")))
expect_that(find_code("@\\[CODE\\]", "@{a}, @[b], @<c>, @(d), ${e}, `f`"),
equals(list(template = "@[b]",
code = "b")))
expect_that(find_code("@<CODE>", "@{a}, @[b], @<c>, @(d), ${e}, `f`"),
equals(list(template = "@<c>",
code = "c")))
expect_that(find_code("@\\(CODE\\)", "@{a}, @[b], @<c>, @(d), ${e}, `f`"),
equals(list(template = "@(d)",
code = "d")))
expect_that(find_code("\\$\\{CODE\\}", "@{a}, @[b], @<c>, @(d), ${e}, `f`"),
equals(list(template = "${e}",
code = "e")))
expect_that(find_code("#\\{CODE\\}", "@{a}, @[b], @<c>, @(d), #{e}, `f`"),
equals(list(template = "#{e}",
code = "e")))
expect_that(find_code("`CODE`", "@{a}, @[b], @<c>, @(d), #{e}, `f`"),
equals(list(template = "`f`",
code = "f")))
expect_that(find_code("@\\[\\[CODE\\]\\]", "@{a}, @[b], @<c>, @(d), #{e}, `f`, @[[g]]"),
equals(list(template = "@[[g]]",
code = "g")))
})
test_that("simple template", {
a = letters[1:3]
b = 1:3
expect_that(qq("`
text = character(length(a))
for(i in seq_along(a)) {
text[i] = qq('<tr><td>@{a[i]}</td><td>@{b[i]}</td></tr>\n')
}
text
`", code.pattern = "`CODE`"),
equals("<tr><td>a</td><td>1</td></tr>\n<tr><td>b</td><td>2</td></tr>\n<tr><td>c</td><td>3</td></tr>\n"))
})
test_that("test `cat_prefix`", {
qq.options("cat_prefix" = "INFO:")
expect_that(qqcat("a"),
prints_text("INFO:a"))
qq.options("cat_prefix" = NULL)
expect_that(qqcat("a"),
prints_text("a"))
qq.options("cat_prefix" = function() "DEBUG:")
expect_that(qqcat("a"),
prints_text("DEBUG:a"))
qq.options("cat_prefix" = NULL)
expect_that(qqcat("a"),
prints_text("a"))
qq.options("cat_prefix" = "INFO:", "cat_verbose" = FALSE)
expect_that(qqcat("a"),
prints_text(""))
qq.options("cat_prefix" = "INFO:", "cat_verbose" = TRUE)
expect_that(qqcat("a"),
prints_text("INFO:a"))
qq.options(RESET = TRUE)
expect_that(qqcat("a", cat_prefix = "DEBUG:a"),
prints_text("DEBUG:a"))
qq.options("cat_prefix" = "INFO:")
expect_that(qqcat("a", cat_prefix = "DEBUG:a"),
prints_text("DEBUG:a"))
expect_that(qqcat("a", cat_prefix = function() "DEBUG:a"),
prints_text("DEBUG:a"))
expect_that(qqcat("a",),
prints_text("INFO:a"))
qq.options(RESET = TRUE)
op = qq.options(READ.ONLY = FALSE)
qq.options(op)
expect_that(qq.options("cat_prefix"), equals(""))
qq.options(cat_prefix = function() "INFO:")
op = qq.options(READ.ONLY = FALSE)
qq.options(op)
})
| /inst/tests/test_qq.R | no_license | akhtet/GetoptLong | R | false | false | 3,853 | r | context("Test `qq` and `qqcat`")
test_that("Simple test", {
a = 1
expect_that(qq("this is @{a}"),
equals("this is 1"))
expect_that(qqcat("this is @{a}"),
prints_text("this is 1"))
})
test_that("pass a list as an environment", {
l = list(a = "a")
expect_that(qq("this is @{a} in `l`", env = l),
equals("this is a in `l`"))
expect_that(qqcat("this is @{a} in `l`", env = l),
prints_text("this is a in `l`"))
})
test_that("variables are multiple element vector", {
a = 1:6
expect_that(qq("@{a} is an @{ifelse(a %% 2, 'odd', 'even')} number\n"),
equals("1 is an odd number\n2 is an even number\n3 is an odd number\n4 is an even number\n5 is an odd number\n6 is an even number\n"))
expect_that(qq("@{a} is an @{ifelse(a %% 2, 'odd', 'even')} number\n", collapse = FALSE),
equals(c("1 is an odd number\n", "2 is an even number\n", "3 is an odd number\n", "4 is an even number\n", "5 is an odd number\n", "6 is an even number\n")))
})
test_that("different code patterns", {
expect_that(find_code("@\\{CODE\\}", "@{a}, @[b], @<c>, @(d), ${e}, `f`"),
equals(list(template = "@{a}",
code = "a")))
expect_that(find_code("@\\[CODE\\]", "@{a}, @[b], @<c>, @(d), ${e}, `f`"),
equals(list(template = "@[b]",
code = "b")))
expect_that(find_code("@<CODE>", "@{a}, @[b], @<c>, @(d), ${e}, `f`"),
equals(list(template = "@<c>",
code = "c")))
expect_that(find_code("@\\(CODE\\)", "@{a}, @[b], @<c>, @(d), ${e}, `f`"),
equals(list(template = "@(d)",
code = "d")))
expect_that(find_code("\\$\\{CODE\\}", "@{a}, @[b], @<c>, @(d), ${e}, `f`"),
equals(list(template = "${e}",
code = "e")))
expect_that(find_code("#\\{CODE\\}", "@{a}, @[b], @<c>, @(d), #{e}, `f`"),
equals(list(template = "#{e}",
code = "e")))
expect_that(find_code("`CODE`", "@{a}, @[b], @<c>, @(d), #{e}, `f`"),
equals(list(template = "`f`",
code = "f")))
expect_that(find_code("@\\[\\[CODE\\]\\]", "@{a}, @[b], @<c>, @(d), #{e}, `f`, @[[g]]"),
equals(list(template = "@[[g]]",
code = "g")))
})
test_that("simple template", {
a = letters[1:3]
b = 1:3
expect_that(qq("`
text = character(length(a))
for(i in seq_along(a)) {
text[i] = qq('<tr><td>@{a[i]}</td><td>@{b[i]}</td></tr>\n')
}
text
`", code.pattern = "`CODE`"),
equals("<tr><td>a</td><td>1</td></tr>\n<tr><td>b</td><td>2</td></tr>\n<tr><td>c</td><td>3</td></tr>\n"))
})
test_that("test `cat_prefix`", {
qq.options("cat_prefix" = "INFO:")
expect_that(qqcat("a"),
prints_text("INFO:a"))
qq.options("cat_prefix" = NULL)
expect_that(qqcat("a"),
prints_text("a"))
qq.options("cat_prefix" = function() "DEBUG:")
expect_that(qqcat("a"),
prints_text("DEBUG:a"))
qq.options("cat_prefix" = NULL)
expect_that(qqcat("a"),
prints_text("a"))
qq.options("cat_prefix" = "INFO:", "cat_verbose" = FALSE)
expect_that(qqcat("a"),
prints_text(""))
qq.options("cat_prefix" = "INFO:", "cat_verbose" = TRUE)
expect_that(qqcat("a"),
prints_text("INFO:a"))
qq.options(RESET = TRUE)
expect_that(qqcat("a", cat_prefix = "DEBUG:a"),
prints_text("DEBUG:a"))
qq.options("cat_prefix" = "INFO:")
expect_that(qqcat("a", cat_prefix = "DEBUG:a"),
prints_text("DEBUG:a"))
expect_that(qqcat("a", cat_prefix = function() "DEBUG:a"),
prints_text("DEBUG:a"))
expect_that(qqcat("a",),
prints_text("INFO:a"))
qq.options(RESET = TRUE)
op = qq.options(READ.ONLY = FALSE)
qq.options(op)
expect_that(qq.options("cat_prefix"), equals(""))
qq.options(cat_prefix = function() "INFO:")
op = qq.options(READ.ONLY = FALSE)
qq.options(op)
})
|
library(tidyverse)
library(ggplot2)
library(here)
library(lubridate)
library(parallel)
library(patchwork)
library(extrafont)
library(skimr)
library(beepr)
library(ggbump)
library(tidytext)
library(stringr)
library(ggrepel)
library(extrafont)
library(rayshader)
here()
usethis::edit_r_environ()
set.seed(1)
options(scipen=999)
source("TidyTuesdayTheme.R")
rawData<- tidytuesdayR::tt_load(2021, week = 38)
modelData<-left_join(rawData$billboard,rawData$audio_features, by = "song_id")
skim(modelData)
modelData%>%
group_by(song_id)%>%
mutate(billboardPopularity = sum(week_position,na.rm = T)/n(),
week = min(mdy(week_id)))%>%
ungroup()%>%
mutate(billboardPopularity = 100*(1- ecdf(billboardPopularity)(billboardPopularity)))%>%
ggplot(aes(y = spotify_track_popularity/billboardPopularity+.0001, x = week))+
geom_point(alpha=.1)+
geom_smooth()+
scale_y_log10()
modelData%>%
group_by(song_id)%>%
mutate(billboardPopularity = sum(week_position,na.rm = T)/n(),
week = min(mdy(week_id)))%>%
ungroup()%>%
mutate(billboardPopularity = 100*(1- ecdf(billboardPopularity)(billboardPopularity)))%>%
ggplot(aes(y = billboardPopularity, x = week, color = fct_lump(spotify_genre,9)))+
geom_point(alpha=.1)+
geom_smooth()
library(tidytext)
library(stringr)
topGenres<-unlist((modelData%>%
unnest_tokens(word,spotify_genre, token ="words") %>%
filter(!word %in% stop_words$word,
!word %in% str_remove_all(stop_words$word, "'"),
str_detect(word, "[a-z]"))%>%
count(word, sort = TRUE))[1:20,1])
modelData[,topGenres]<-lapply(topGenres,function(genre){grepl(genre,modelData$spotify_genre,ignore.case =T)})%>%do.call(bind_cols,.)
modelData<-modelData%>%
mutate(genre = ifelse(rock,"Rock",
ifelse(pop,"Pop",
ifelse(country,"Country",
ifelse(rock,"Rock",
ifelse(rap,"Rap",
ifelse(hip,"Rap",
ifelse(soul,"Soul",
ifelse(hop,"Rap",
ifelse(dance,"Dance",
ifelse(classic,"Classical",
ifelse(contemporary,"Contemporary","Other")
)
)
)
)
)
)
)
)
)
))
library(gganimate)
myanim<-modelData%>%
dplyr::select(week_id, week_position, tempo, genre)%>%
distinct()%>%
mutate(week = mdy(week_id))%>%
filter(week>=mdy("1-1-1980"),week<=mdy("1-1-2001"))%>%
ggplot(aes(y = week_position, x = tempo, color = as.factor(genre)))+
geom_point(size = 3,shape=8)+
# geom_path(size = 2)+
scale_color_tidyTues()+
scale_x_continuous(limits = c(70,180))+
scale_y_reverse()+
coord_polar(start =130)+
theme_void()+
theme(
plot.background = element_rect(fill="#6c757d", colour=NA),
legend.position = "none")+
transition_time(week)+
ease_aes()+
enter_fade() +
exit_shrink()+
shadow_trail(distance = .1, size = 2,alpha =.25,max_frames =10)
animate(myanim,fps = 26,nframes = 52*20)
anim_save(filename = "FireworkPlotofBillboardPositions_TT-9-15-2021_small.gif")
| /TidyTuesday9-15-2021.R | no_license | jpohlkamphartt/TidyTuesday | R | false | false | 3,876 | r | library(tidyverse)
library(ggplot2)
library(here)
library(lubridate)
library(parallel)
library(patchwork)
library(extrafont)
library(skimr)
library(beepr)
library(ggbump)
library(tidytext)
library(stringr)
library(ggrepel)
library(extrafont)
library(rayshader)
here()
usethis::edit_r_environ()
set.seed(1)
options(scipen=999)
source("TidyTuesdayTheme.R")
rawData<- tidytuesdayR::tt_load(2021, week = 38)
modelData<-left_join(rawData$billboard,rawData$audio_features, by = "song_id")
skim(modelData)
modelData%>%
group_by(song_id)%>%
mutate(billboardPopularity = sum(week_position,na.rm = T)/n(),
week = min(mdy(week_id)))%>%
ungroup()%>%
mutate(billboardPopularity = 100*(1- ecdf(billboardPopularity)(billboardPopularity)))%>%
ggplot(aes(y = spotify_track_popularity/billboardPopularity+.0001, x = week))+
geom_point(alpha=.1)+
geom_smooth()+
scale_y_log10()
modelData%>%
group_by(song_id)%>%
mutate(billboardPopularity = sum(week_position,na.rm = T)/n(),
week = min(mdy(week_id)))%>%
ungroup()%>%
mutate(billboardPopularity = 100*(1- ecdf(billboardPopularity)(billboardPopularity)))%>%
ggplot(aes(y = billboardPopularity, x = week, color = fct_lump(spotify_genre,9)))+
geom_point(alpha=.1)+
geom_smooth()
library(tidytext)
library(stringr)
topGenres<-unlist((modelData%>%
unnest_tokens(word,spotify_genre, token ="words") %>%
filter(!word %in% stop_words$word,
!word %in% str_remove_all(stop_words$word, "'"),
str_detect(word, "[a-z]"))%>%
count(word, sort = TRUE))[1:20,1])
modelData[,topGenres]<-lapply(topGenres,function(genre){grepl(genre,modelData$spotify_genre,ignore.case =T)})%>%do.call(bind_cols,.)
modelData<-modelData%>%
mutate(genre = ifelse(rock,"Rock",
ifelse(pop,"Pop",
ifelse(country,"Country",
ifelse(rock,"Rock",
ifelse(rap,"Rap",
ifelse(hip,"Rap",
ifelse(soul,"Soul",
ifelse(hop,"Rap",
ifelse(dance,"Dance",
ifelse(classic,"Classical",
ifelse(contemporary,"Contemporary","Other")
)
)
)
)
)
)
)
)
)
))
library(gganimate)
myanim<-modelData%>%
dplyr::select(week_id, week_position, tempo, genre)%>%
distinct()%>%
mutate(week = mdy(week_id))%>%
filter(week>=mdy("1-1-1980"),week<=mdy("1-1-2001"))%>%
ggplot(aes(y = week_position, x = tempo, color = as.factor(genre)))+
geom_point(size = 3,shape=8)+
# geom_path(size = 2)+
scale_color_tidyTues()+
scale_x_continuous(limits = c(70,180))+
scale_y_reverse()+
coord_polar(start =130)+
theme_void()+
theme(
plot.background = element_rect(fill="#6c757d", colour=NA),
legend.position = "none")+
transition_time(week)+
ease_aes()+
enter_fade() +
exit_shrink()+
shadow_trail(distance = .1, size = 2,alpha =.25,max_frames =10)
animate(myanim,fps = 26,nframes = 52*20)
anim_save(filename = "FireworkPlotofBillboardPositions_TT-9-15-2021_small.gif")
|
\name{RLab4-package}
\alias{RLab4-package}
\alias{RLab4}
\docType{package}
\title{
Linear regression and theme
}
\description{
Fitting linear models. Theme for LiU with ggplot2.
}
\details{
\tabular{ll}{
Package: \tab RLab4\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2015-09-15\cr
License: \tab What license is it under?\cr
}
}
\usage{
linreg(formula,data)
}
\author{
Martina Sandberg <marsa505@student.liu.se>
Caroline Svahn <carsv733@student.liu.se>
}
\keyword{ package }
| /RLab4.Rcheck/00_pkg_src/RLab4/man/RLab4-package.Rd | no_license | Martina145/RLab4 | R | false | false | 492 | rd | \name{RLab4-package}
\alias{RLab4-package}
\alias{RLab4}
\docType{package}
\title{
Linear regression and theme
}
\description{
Fitting linear models. Theme for LiU with ggplot2.
}
\details{
\tabular{ll}{
Package: \tab RLab4\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2015-09-15\cr
License: \tab What license is it under?\cr
}
}
\usage{
linreg(formula,data)
}
\author{
Martina Sandberg <marsa505@student.liu.se>
Caroline Svahn <carsv733@student.liu.se>
}
\keyword{ package }
|
#' Gives an html table of the first few lines of the data frame along with a description of the variables
#' and dimension of the data frame.
#'
#' @param data.frame a data frame
#' @param nlines How many rows of the data frame will be shown in the table (default 6)
#' @param dec.places How many decimal places should be shown if the variable is numeric (default 3)
#'
#'
#' @return an html document with a table that has variable names, descriptions, and the first few lines of the data frame
#'
#' @examples
#' data(iris)
#' hearhead(iris)
#'
#' @export
hearhead<-function(data.frame, nlines=6,dec.places=3){
if(!is.data.frame(data.frame)){
return(cat("argument data.frame not a data frame"))
}
dframename<-deparse(substitute(data.frame))
types<-sapply(data.frame,class)
for(i in 1:length(types)){
if(types[i]=="factor"){
levs<-levels(data.frame[,i])
types[i]<-paste(types[i],"with", length(levs), "levels:", paste(levs, collapse = " "))
data.frame[,i]<-as.character(data.frame[,i])
}else{
if(types[i]=="character"){
levs<-names(table(data.frame[,i]))
types[i]<-paste(types[i],"with", length(levs), "possible values:", paste(levs, collapse = " "))
}else{
if(types[i]=="numeric"){
data.frame[,i]<-round(data.frame[,i],dec.places)
}
}
}
}
tab<-rbind(names(types),rep("-", length(types)),types,data.frame[1:nlines,])
tmp<-tempfile()
writeLines( paste0(paste(c("# ", dframename, dim(data.frame)[1], "observations of",
dim(data.frame)[2], " variables",
"\n\n"),collapse=" "), paste(apply(tab, 1, paste, collapse="|"), collapse=" \n ")),
paste0(tmp, ".Rmd"))
rmarkdown::render(input=paste0(tmp, ".Rmd"),output_file =paste0(tmp, ".html") ,output_format = "html_document")
browseURL(paste0(tmp, ".html"))
}
| /R/hearhead.R | no_license | benthegirl/BlindR | R | false | false | 1,837 | r | #' Gives an html table of the first few lines of the data frame along with a description of the variables
#' and dimension of the data frame.
#'
#' @param data.frame a data frame
#' @param nlines How many rows of the data frame will be shown in the table (default 6)
#' @param dec.places How many decimal places should be shown if the variable is numeric (default 3)
#'
#'
#' @return an html document with a table that has variable names, descriptions, and the first few lines of the data frame
#'
#' @examples
#' data(iris)
#' hearhead(iris)
#'
#' @export
hearhead<-function(data.frame, nlines=6,dec.places=3){
if(!is.data.frame(data.frame)){
return(cat("argument data.frame not a data frame"))
}
dframename<-deparse(substitute(data.frame))
types<-sapply(data.frame,class)
for(i in 1:length(types)){
if(types[i]=="factor"){
levs<-levels(data.frame[,i])
types[i]<-paste(types[i],"with", length(levs), "levels:", paste(levs, collapse = " "))
data.frame[,i]<-as.character(data.frame[,i])
}else{
if(types[i]=="character"){
levs<-names(table(data.frame[,i]))
types[i]<-paste(types[i],"with", length(levs), "possible values:", paste(levs, collapse = " "))
}else{
if(types[i]=="numeric"){
data.frame[,i]<-round(data.frame[,i],dec.places)
}
}
}
}
tab<-rbind(names(types),rep("-", length(types)),types,data.frame[1:nlines,])
tmp<-tempfile()
writeLines( paste0(paste(c("# ", dframename, dim(data.frame)[1], "observations of",
dim(data.frame)[2], " variables",
"\n\n"),collapse=" "), paste(apply(tab, 1, paste, collapse="|"), collapse=" \n ")),
paste0(tmp, ".Rmd"))
rmarkdown::render(input=paste0(tmp, ".Rmd"),output_file =paste0(tmp, ".html") ,output_format = "html_document")
browseURL(paste0(tmp, ".html"))
}
|
# @knitr sidebar
column(6,
tags$head(
tags$link(rel="stylesheet", type="text/css", href="styles_black_lightblue.css")
),
conditionalPanel(condition="input.tsp!=='info'",
wellPanel(
fluidRow(
column(6,
textInput("useremail","Email results to:", value="paul.duffy@neptuneinc.org"),
bsTooltip("useremail", "Results will be emailed when the job is finished, including a url to a new Shiny app for an interactive look at the model outputs.")
),
column(6,
selectInput("json_files", "JSON", all_json_files, "cruSW5m.JSON", width="100%"),
bsTooltip("json_files", "ALFRESCO parameters from the selected file can be viewed on the next tab. They are updated in real time as input settings below are changed. Various input fields update and populate with defaults based on the naming convention of the input file. The current file is sent to the Atlas cluster when ALFRESCO is launched.",
placement="right", options=list(container="body"))
)
),
fluidRow(
column(3, uiOutput("RCP_opts")),
column(3, uiOutput("Model_opts")),
column(3, uiOutput("Year_opts1")),
column(3, uiOutput("Year_opts2"))
),
hr(style="border-color:#000000;"),
fluidRow(
column(4, numericInput("FireSensitivity", "Fire Sensitivity", value=default_Fire.Sensitivity, min=1, max=100000)),
column(4, selectInput("FireSensFMO", "Sens. FMO", c("None", "Standard", "15-km buffered"), width="100%")),
column(4, sliderInput("FireSensFMOMax", "Max suppression", 0, 1, 0, 0.01, width="100%"))
),
fluidRow(
column(4, numericInput("IgnitionFactor", "Fire Ignition Factor", value=default_Fire.IgnitionFactor, min=0.00001, max=0.1)),
column(4, selectInput("IgnitFacFMO", "Ignit. FMO", c("None", "Standard", "15-km buffered"), width="100%")),
column(4, sliderInput("IgnitFacFMOMax", "Max suppression", 0, 1, 0, 0.01, width="100%"))
),
hr(style="border-color:#000000;"),
fluidRow(
column(4, uiOutput("PointLocs")),
column(4, textInput("frp_buffers", "Fire Return Period buffers", value="5")),
column(4, selectInput("fire_cause", "Empirical fire sources", choices=c("Lightning", "All"), selected="Lightning", width="100%"))
),
fluidRow(
column(4, checkboxInput("include_fseByVeg", "FSE by vegetation", TRUE)),
column(4, checkboxInput("include_frp", "Include FRP", TRUE)),
column(4, checkboxInput("group_runs", "Check if grouping runs", TRUE))
),
hr(style="border-color:#000000;"),
fluidRow(
column(4, numericInput("n_sims", "Number of Sims", value=32, min=32, max=192)),
column(4, uiOutput("GroupName")),
column(4, uiOutput("RunName"))
),
fluidRow(
column(4, numericInput("randseed", "Random Seed", value=1234799211)),
column(4, checkboxInput("update_json_defaults", "Save Sen/Ign as new defaults", FALSE)),
column(4, checkboxInput("skipAlf", "Skip Alfresco/Rerun R", FALSE))
),
fluidRow(
column(4,
actionButton("msy_btn","Output map options", class="btn-block"),
bsTooltip("msy_btn", "Select output map types and starting years.", placement="top"),
bsModal("msy", "Output map options", "msy_btn", size="large", uiOutput("msy_input_panel"))
),
column(4,
actionButton("secrun_btn","Secondary run options", class="btn-block"),
bsTooltip("secrun_btn", "Setup a secondary run that starts from final-year outputs of an existing run.", placement="top"),
bsModal("secrun", "Secondary runs", "secrun_btn", size="large",
fluidRow(
column(6,
checkboxInput("secrun_use", "Run is secondary", FALSE),
bsTooltip("secrun_use", "If checked, this run is assumed to follow an existing run whose final-year map outputs have been prepared on the Atlas cluster for use as first-year inputs to this run. Ensure the start year for this run is set appropriately. Prior year outputs from the previous run are assumed available as inputs.")
),
column(6,
textInput("prev_run_name", "Previous run name", value=default_prev_run_name),
bsTooltip("secrun_use", "Edit the default previous run name if necessary.")
)
)
)
),
column(4,
actionButton("goButton_JSON", "Run Alfresco", class="btn-block"),
bsTooltip("goButton_JSON", "ALFRESCO will launch on the Atlas cluster only if running this app from the Eris server. Please wait a moment for the call to complete. When successful, a printout of the command line call to sbatch on Atlas will apear to the right.", placement="top")
)
),
style="background-color: rgba(255, 255, 255, 0.9);")
)
)
| /run_alfresco/sidebar.R | no_license | dddyvonne/shiny-apps | R | false | false | 4,731 | r | # @knitr sidebar
column(6,
tags$head(
tags$link(rel="stylesheet", type="text/css", href="styles_black_lightblue.css")
),
conditionalPanel(condition="input.tsp!=='info'",
wellPanel(
fluidRow(
column(6,
textInput("useremail","Email results to:", value="paul.duffy@neptuneinc.org"),
bsTooltip("useremail", "Results will be emailed when the job is finished, including a url to a new Shiny app for an interactive look at the model outputs.")
),
column(6,
selectInput("json_files", "JSON", all_json_files, "cruSW5m.JSON", width="100%"),
bsTooltip("json_files", "ALFRESCO parameters from the selected file can be viewed on the next tab. They are updated in real time as input settings below are changed. Various input fields update and populate with defaults based on the naming convention of the input file. The current file is sent to the Atlas cluster when ALFRESCO is launched.",
placement="right", options=list(container="body"))
)
),
fluidRow(
column(3, uiOutput("RCP_opts")),
column(3, uiOutput("Model_opts")),
column(3, uiOutput("Year_opts1")),
column(3, uiOutput("Year_opts2"))
),
hr(style="border-color:#000000;"),
fluidRow(
column(4, numericInput("FireSensitivity", "Fire Sensitivity", value=default_Fire.Sensitivity, min=1, max=100000)),
column(4, selectInput("FireSensFMO", "Sens. FMO", c("None", "Standard", "15-km buffered"), width="100%")),
column(4, sliderInput("FireSensFMOMax", "Max suppression", 0, 1, 0, 0.01, width="100%"))
),
fluidRow(
column(4, numericInput("IgnitionFactor", "Fire Ignition Factor", value=default_Fire.IgnitionFactor, min=0.00001, max=0.1)),
column(4, selectInput("IgnitFacFMO", "Ignit. FMO", c("None", "Standard", "15-km buffered"), width="100%")),
column(4, sliderInput("IgnitFacFMOMax", "Max suppression", 0, 1, 0, 0.01, width="100%"))
),
hr(style="border-color:#000000;"),
fluidRow(
column(4, uiOutput("PointLocs")),
column(4, textInput("frp_buffers", "Fire Return Period buffers", value="5")),
column(4, selectInput("fire_cause", "Empirical fire sources", choices=c("Lightning", "All"), selected="Lightning", width="100%"))
),
fluidRow(
column(4, checkboxInput("include_fseByVeg", "FSE by vegetation", TRUE)),
column(4, checkboxInput("include_frp", "Include FRP", TRUE)),
column(4, checkboxInput("group_runs", "Check if grouping runs", TRUE))
),
hr(style="border-color:#000000;"),
fluidRow(
column(4, numericInput("n_sims", "Number of Sims", value=32, min=32, max=192)),
column(4, uiOutput("GroupName")),
column(4, uiOutput("RunName"))
),
fluidRow(
column(4, numericInput("randseed", "Random Seed", value=1234799211)),
column(4, checkboxInput("update_json_defaults", "Save Sen/Ign as new defaults", FALSE)),
column(4, checkboxInput("skipAlf", "Skip Alfresco/Rerun R", FALSE))
),
fluidRow(
column(4,
actionButton("msy_btn","Output map options", class="btn-block"),
bsTooltip("msy_btn", "Select output map types and starting years.", placement="top"),
bsModal("msy", "Output map options", "msy_btn", size="large", uiOutput("msy_input_panel"))
),
column(4,
actionButton("secrun_btn","Secondary run options", class="btn-block"),
bsTooltip("secrun_btn", "Setup a secondary run that starts from final-year outputs of an existing run.", placement="top"),
bsModal("secrun", "Secondary runs", "secrun_btn", size="large",
fluidRow(
column(6,
checkboxInput("secrun_use", "Run is secondary", FALSE),
bsTooltip("secrun_use", "If checked, this run is assumed to follow an existing run whose final-year map outputs have been prepared on the Atlas cluster for use as first-year inputs to this run. Ensure the start year for this run is set appropriately. Prior year outputs from the previous run are assumed available as inputs.")
),
column(6,
textInput("prev_run_name", "Previous run name", value=default_prev_run_name),
bsTooltip("secrun_use", "Edit the default previous run name if necessary.")
)
)
)
),
column(4,
actionButton("goButton_JSON", "Run Alfresco", class="btn-block"),
bsTooltip("goButton_JSON", "ALFRESCO will launch on the Atlas cluster only if running this app from the Eris server. Please wait a moment for the call to complete. When successful, a printout of the command line call to sbatch on Atlas will apear to the right.", placement="top")
)
),
style="background-color: rgba(255, 255, 255, 0.9);")
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/p_base.R
\name{p_base}
\alias{p_base}
\title{Base Install Packages}
\usage{
p_base(base.only = TRUE, open = FALSE, basemarker = "***")
}
\arguments{
\item{base.only}{logical. If \code{TRUE} a character vector of only base
install packages is returned.}
\item{open}{logical. If \code{TRUE} opens the directory of the base install
packages.}
\item{basemarker}{Character string. The string to append to mark which
packages are part of the default packages.}
}
\description{
List just base packages or list all the packages in the local library and
mark those in a base install.
}
\note{
Packages that are installed when R starts are marked with an asterisk(*).
}
\examples{
\dontrun{
p_base()
p_base(TRUE)
}
}
\seealso{
\code{\link[base]{getOption}}
}
\keyword{base}
\keyword{package}
| /man/p_base.Rd | no_license | cran/pacman | R | false | true | 904 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/p_base.R
\name{p_base}
\alias{p_base}
\title{Base Install Packages}
\usage{
p_base(base.only = TRUE, open = FALSE, basemarker = "***")
}
\arguments{
\item{base.only}{logical. If \code{TRUE} a character vector of only base
install packages is returned.}
\item{open}{logical. If \code{TRUE} opens the directory of the base install
packages.}
\item{basemarker}{Character string. The string to append to mark which
packages are part of the default packages.}
}
\description{
List just base packages or list all the packages in the local library and
mark those in a base install.
}
\note{
Packages that are installed when R starts are marked with an asterisk(*).
}
\examples{
\dontrun{
p_base()
p_base(TRUE)
}
}
\seealso{
\code{\link[base]{getOption}}
}
\keyword{base}
\keyword{package}
|
"pwm.ub" <-
function(x,nmom=5,sort=TRUE) {
z <- pwm(x,nmom=nmom,sort=sort)
z$source <- "pwm.ub"
return(z)
}
| /R/pwm.ub.R | no_license | wasquith/lmomco | R | false | false | 114 | r | "pwm.ub" <-
function(x,nmom=5,sort=TRUE) {
z <- pwm(x,nmom=nmom,sort=sort)
z$source <- "pwm.ub"
return(z)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/files.R
\name{listFiles}
\alias{listFiles}
\title{List files}
\usage{
listFiles(path = "")
}
\arguments{
\item{path}{The path of directory.}
}
\value{
A list of available files and metadata, including path, type, size.
}
\description{
List files available on the portal.
}
\examples{
\donttest{
listFiles("data")
}
}
| /man/listFiles.Rd | permissive | smmtw/ROpenDataSMM | R | false | true | 395 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/files.R
\name{listFiles}
\alias{listFiles}
\title{List files}
\usage{
listFiles(path = "")
}
\arguments{
\item{path}{The path of directory.}
}
\value{
A list of available files and metadata, including path, type, size.
}
\description{
List files available on the portal.
}
\examples{
\donttest{
listFiles("data")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{TwoPointCrossOverOnDoublesUsingBytes}
\alias{TwoPointCrossOverOnDoublesUsingBytes}
\title{Two-point Crossover operation on the two vectors of doubles using their byte representations}
\usage{
TwoPointCrossOverOnDoublesUsingBytes(d1, d2, cutpoint1, cutpoint2)
}
\arguments{
\item{d1}{A vector of doubles of the first parent}
\item{d2}{A vector of doubles of the second parent}
\item{cutpoint1}{An integer between 1 and chromosome length for crossover cutting}
\item{cutpoint2}{An integer between cutpoint1 and chromosome length for crossover cutting}
}
\value{
List of two double vectors of offspring
}
\description{
This function is a C++ wrapper for crossing-over of two double vectors of candidate solutions using their byte representations
}
\examples{
d1 <- runif(3)
d2 <- runif(3)
cutpoints <- sort(sample(1:(length(d1)*SizeOfDouble()), 2, replace = FALSE))
offspring <- TwoPointCrossOverOnDoublesUsingBytes(d1,d2,cutpoints[1], cutpoints[2])
print("Parents:")
print(d1)
print(d2)
print("Offspring:")
print(offspring[[1]])
print(offspring[[2]])
}
\author{
Mehmet Hakan Satman - mhsatman@istanbul.edu.tr
}
\seealso{
TwoPointCrossOver
OnePointCrossOver
UniformCrossOver
OnePointCrossOverOnDoublesUsingBytes
}
| /man/TwoPointCrossOverOnDoublesUsingBytes.Rd | no_license | jbytecode/mcga | R | false | true | 1,316 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{TwoPointCrossOverOnDoublesUsingBytes}
\alias{TwoPointCrossOverOnDoublesUsingBytes}
\title{Two-point Crossover operation on the two vectors of doubles using their byte representations}
\usage{
TwoPointCrossOverOnDoublesUsingBytes(d1, d2, cutpoint1, cutpoint2)
}
\arguments{
\item{d1}{A vector of doubles of the first parent}
\item{d2}{A vector of doubles of the second parent}
\item{cutpoint1}{An integer between 1 and chromosome length for crossover cutting}
\item{cutpoint2}{An integer between cutpoint1 and chromosome length for crossover cutting}
}
\value{
List of two double vectors of offspring
}
\description{
This function is a C++ wrapper for crossing-over of two double vectors of candidate solutions using their byte representations
}
\examples{
d1 <- runif(3)
d2 <- runif(3)
cutpoints <- sort(sample(1:(length(d1)*SizeOfDouble()), 2, replace = FALSE))
offspring <- TwoPointCrossOverOnDoublesUsingBytes(d1,d2,cutpoints[1], cutpoints[2])
print("Parents:")
print(d1)
print(d2)
print("Offspring:")
print(offspring[[1]])
print(offspring[[2]])
}
\author{
Mehmet Hakan Satman - mhsatman@istanbul.edu.tr
}
\seealso{
TwoPointCrossOver
OnePointCrossOver
UniformCrossOver
OnePointCrossOverOnDoublesUsingBytes
}
|
# model_maineffects.R - DESC
# OM/model_maineffects.R
# Copyright Iago MOSQUEIRA (WMR), 2020
# Author: Iago MOSQUEIRA (WMR) <iago.mosqueira@wur.nl>
# Modified: Daniela Rosa (IPMA)
#
# Distributed under the terms of the EUPL-1.2
library(data.table)
library(icesTAF)
library(ss3om)
library(ioswomse)
library(doParallel)
registerDoParallel(3)
load("om/data/cpues.Rdata")
load("om/data/lorenzen.Rdata")
# SETUP runs for grid corners
full <- list(
# Natural mortality, M
M=c(0.20, 0.30, 999),
# SR steepness
steepness=c(0.6, 0.75, 0.9),
# Rec variance
sigmaR=c(0.2, 0.6),
# Weight of length samples
ess=c(2, 20),
# Trends in LL catchability
llq=c(1, 1.01),
# Growth + maturity
growmat=c("farley", "wang"),
# CPUEs
cpue=c("jappt", "jap", "twnpt"),
# Area CPUE scaling factor
scaling=c("area", "catch", "biomass", "region"),
# LL selectivity model
llsel=c("DoNorm", "Logistic")
)
nsam <- prod(unlist(lapply(full, length)))
# --- RUN grid
basecase <- list(M=0.25, steepness=0.8, sigmaR=0.2, ess=5, llq=1, growmat="farley", cpue="jappt", scaling="biomass",llsel="DoNorm" )
meffs <- mapply(function(x, y) x[ac(x) != ac(y)], full, basecase)
grid <- rbindlist(lapply(rep(list(basecase),
sum(unlist(lapply(meffs, length)))), as.data.table))
grid[, col := rep(names(meffs), unlist(lapply(meffs, length)))]
for(i in names(grid))
grid[col == i, i] <- meffs[[i]]
grid <- ss3om::nameGrid(grid, from=1)
grid <- setioswogrid(grid, cpues=cpues, dir = "om/model/maineffects",
base = "om/data/sa", name = "swo", write = TRUE)
save(grid, file = "om/model/maineffects/grid.Rdata")
lapply(file.path("om/model/maineffects", grid$id), prepareRetro)
# RUN models
# ls | parallel -j10 --bar --progress '(cd {}; ss3)'
# --- LOAD results
#LOAD SA
sa <- loadOMS(dir="data/gridIO4")
res_sa <- sa$results
stk_sa <- simplify(sa$stock, "area")
range(stk_sa, c("minfbar", "maxfbar")) <- c(2,8)
save(sa, file="model/gridIO4.Rdata", compress="xz")
# tiff(file="stock_sa.tiff", bg = "white", compression="lzw",width = 32,
# height = 20, units = "cm", res = 300)
plot(stk_sa)
#dev.off()
#LOAD MAINEFFECTS
load("model/maineffects/grid.Rdata")
maineffects <- loadOMS(dir="model/maineffects", grid=grid, combine=FALSE)
save(maineffects, file="model/maineffects/main.Rdata", compress="xz")
res <- maineffects$res
stk <- maineffects$stock
srr <- maineffects$sr
# --- DIAGNOSTICS
# 1. CHECK convergence < 1e-4
id1 <- res$Convergence_Level > 1e-4
# RE-RUN with starter.ss$$init_values_src = 1
# starter <- SS_readstarter('../sa/starter.ss', verbose=FALSE)
# starter$jitter_fraction <- 0.25
#
# apply(grid[id1, "id"],1,function (x) SS_writestarter(starter, dir=file.path("model/maineffects", x), overwrite=TRUE))
# for(i in grid[id1, 'id']) {
# SS_writestarter(starter, dir=file.path("model/maineffects", i), overwrite=TRUE)
# }
labels <- c("M=0.2","M=0.3", "M=Lorenzen", "h=0.6","h=0.75", "h=0.9", "sigmaR=0.6", "ess=2","ess=20",
"llq=1.01","growmat=Wang", "cpue=jpn","cpue=twnpt", "scaling=area", "scaling=catch", "llsel=Logistic")
#PLOT SSB2018
ggplot() + geom_bar(data=res[-16,], aes(x=factor(iter), y=SSB_endyr, fill=col), stat="identity")+
geom_hline(yintercept = min(res_sa$SSB_endyr))+
geom_hline(yintercept=max(res_sa$SSB_endyr))+
geom_hline(yintercept=res_sa$SSB_endyr[9], linetype=2)+
scale_x_discrete("",labels=labels)+
ylab(expression(SSB[2018]))+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
#PLOT SSB_Virgin
ggplot() + geom_bar(data=res[-16,], aes(x=factor(iter), y=SSB_Virgin, fill=col), stat="identity")+
geom_hline(yintercept = min(res_sa$SSB_Virgin))+
geom_hline(yintercept=max(res_sa$SSB_Virgin))+
geom_hline(yintercept=res_sa$SSB_Virgin[9], linetype=2)+
scale_x_discrete("", labels=labels)+
ylab(expression(SSB[0]))+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
#PLOT status
ggplot() + geom_bar(data=res[-16,], aes(x=factor(iter), y=SSB_status, fill=col), stat="identity")+
geom_hline(yintercept = min(res_sa$SSB_status))+
geom_hline(yintercept=max(res_sa$SSB_status))+
geom_hline(yintercept=(res_sa$SSB_status[9]), linetype=2)+
scale_x_discrete("",labels=labels)+
ylab(expression(SSB[2018]/SSB[MSY]))+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
#PLOT depletion
ggplot() + geom_bar(data=res[-16,], aes(x=factor(iter), y=SSB_depletion, fill=col), stat="identity")+
geom_hline(yintercept = min(res_sa$SSB_depletion))+
geom_hline(yintercept=max(res_sa$SSB_depletion))+
geom_hline(yintercept=(res_sa$SSB_depletion[9]), linetype=2)+
scale_x_discrete("", labels=labels)+
ylab(expression(SSB[2018]/SSB[0]))+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
#PLOT SSBMSY/SSBVir
res_sa[,SSBMSYSSBVir:=SSB_MSY/SSB_Virgin]
ggplot() + geom_bar(data=res, aes(x=factor(iter), y=SSB_MSY/SSB_Virgin, fill=col), stat="identity")+
geom_hline(yintercept = min(res_sa$SSBMSYSSBVir))+
geom_hline(yintercept=max(res_sa$SSBMSYSSBVir))+
geom_hline(yintercept=(res_sa$SSBMSYSSBVir[9]), linetype=2)+
scale_x_discrete(breaks=1:17, labels=labels)
plot(stk, metrics=list(Rec=rec, SSB=ssb)) + facet_grid(qname~stock, scales="free")
plot(stk)+facet_grid(qname~area, scales="free")
#Length comp fits
essLog <- readOutputss3("model/maineffects/17-M0.25_sigmaR0.2_steepness0.80_ess5_llq1.00_growmatfarley_cpuejappt_scalingbiomass_llselLogistic_colllsel_iter17")
SSplotComps(essLog,subplot=21)
| /OM/model_maineffects.R | no_license | iotcwpm/SWO | R | false | false | 5,548 | r | # model_maineffects.R - DESC
# OM/model_maineffects.R
# Copyright Iago MOSQUEIRA (WMR), 2020
# Author: Iago MOSQUEIRA (WMR) <iago.mosqueira@wur.nl>
# Modified: Daniela Rosa (IPMA)
#
# Distributed under the terms of the EUPL-1.2
library(data.table)
library(icesTAF)
library(ss3om)
library(ioswomse)
library(doParallel)
registerDoParallel(3)
load("om/data/cpues.Rdata")
load("om/data/lorenzen.Rdata")
# SETUP runs for grid corners
full <- list(
# Natural mortality, M
M=c(0.20, 0.30, 999),
# SR steepness
steepness=c(0.6, 0.75, 0.9),
# Rec variance
sigmaR=c(0.2, 0.6),
# Weight of length samples
ess=c(2, 20),
# Trends in LL catchability
llq=c(1, 1.01),
# Growth + maturity
growmat=c("farley", "wang"),
# CPUEs
cpue=c("jappt", "jap", "twnpt"),
# Area CPUE scaling factor
scaling=c("area", "catch", "biomass", "region"),
# LL selectivity model
llsel=c("DoNorm", "Logistic")
)
nsam <- prod(unlist(lapply(full, length)))
# --- RUN grid
basecase <- list(M=0.25, steepness=0.8, sigmaR=0.2, ess=5, llq=1, growmat="farley", cpue="jappt", scaling="biomass",llsel="DoNorm" )
meffs <- mapply(function(x, y) x[ac(x) != ac(y)], full, basecase)
grid <- rbindlist(lapply(rep(list(basecase),
sum(unlist(lapply(meffs, length)))), as.data.table))
grid[, col := rep(names(meffs), unlist(lapply(meffs, length)))]
for(i in names(grid))
grid[col == i, i] <- meffs[[i]]
grid <- ss3om::nameGrid(grid, from=1)
grid <- setioswogrid(grid, cpues=cpues, dir = "om/model/maineffects",
base = "om/data/sa", name = "swo", write = TRUE)
save(grid, file = "om/model/maineffects/grid.Rdata")
lapply(file.path("om/model/maineffects", grid$id), prepareRetro)
# RUN models
# ls | parallel -j10 --bar --progress '(cd {}; ss3)'
# --- LOAD results
#LOAD SA
sa <- loadOMS(dir="data/gridIO4")
res_sa <- sa$results
stk_sa <- simplify(sa$stock, "area")
range(stk_sa, c("minfbar", "maxfbar")) <- c(2,8)
save(sa, file="model/gridIO4.Rdata", compress="xz")
# tiff(file="stock_sa.tiff", bg = "white", compression="lzw",width = 32,
# height = 20, units = "cm", res = 300)
plot(stk_sa)
#dev.off()
#LOAD MAINEFFECTS
load("model/maineffects/grid.Rdata")
maineffects <- loadOMS(dir="model/maineffects", grid=grid, combine=FALSE)
save(maineffects, file="model/maineffects/main.Rdata", compress="xz")
res <- maineffects$res
stk <- maineffects$stock
srr <- maineffects$sr
# --- DIAGNOSTICS
# 1. CHECK convergence < 1e-4
id1 <- res$Convergence_Level > 1e-4
# RE-RUN with starter.ss$$init_values_src = 1
# starter <- SS_readstarter('../sa/starter.ss', verbose=FALSE)
# starter$jitter_fraction <- 0.25
#
# apply(grid[id1, "id"],1,function (x) SS_writestarter(starter, dir=file.path("model/maineffects", x), overwrite=TRUE))
# for(i in grid[id1, 'id']) {
# SS_writestarter(starter, dir=file.path("model/maineffects", i), overwrite=TRUE)
# }
labels <- c("M=0.2","M=0.3", "M=Lorenzen", "h=0.6","h=0.75", "h=0.9", "sigmaR=0.6", "ess=2","ess=20",
"llq=1.01","growmat=Wang", "cpue=jpn","cpue=twnpt", "scaling=area", "scaling=catch", "llsel=Logistic")
#PLOT SSB2018
ggplot() + geom_bar(data=res[-16,], aes(x=factor(iter), y=SSB_endyr, fill=col), stat="identity")+
geom_hline(yintercept = min(res_sa$SSB_endyr))+
geom_hline(yintercept=max(res_sa$SSB_endyr))+
geom_hline(yintercept=res_sa$SSB_endyr[9], linetype=2)+
scale_x_discrete("",labels=labels)+
ylab(expression(SSB[2018]))+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
#PLOT SSB_Virgin
ggplot() + geom_bar(data=res[-16,], aes(x=factor(iter), y=SSB_Virgin, fill=col), stat="identity")+
geom_hline(yintercept = min(res_sa$SSB_Virgin))+
geom_hline(yintercept=max(res_sa$SSB_Virgin))+
geom_hline(yintercept=res_sa$SSB_Virgin[9], linetype=2)+
scale_x_discrete("", labels=labels)+
ylab(expression(SSB[0]))+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
#PLOT status
ggplot() + geom_bar(data=res[-16,], aes(x=factor(iter), y=SSB_status, fill=col), stat="identity")+
geom_hline(yintercept = min(res_sa$SSB_status))+
geom_hline(yintercept=max(res_sa$SSB_status))+
geom_hline(yintercept=(res_sa$SSB_status[9]), linetype=2)+
scale_x_discrete("",labels=labels)+
ylab(expression(SSB[2018]/SSB[MSY]))+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
#PLOT depletion
ggplot() + geom_bar(data=res[-16,], aes(x=factor(iter), y=SSB_depletion, fill=col), stat="identity")+
geom_hline(yintercept = min(res_sa$SSB_depletion))+
geom_hline(yintercept=max(res_sa$SSB_depletion))+
geom_hline(yintercept=(res_sa$SSB_depletion[9]), linetype=2)+
scale_x_discrete("", labels=labels)+
ylab(expression(SSB[2018]/SSB[0]))+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
#PLOT SSBMSY/SSBVir
res_sa[,SSBMSYSSBVir:=SSB_MSY/SSB_Virgin]
ggplot() + geom_bar(data=res, aes(x=factor(iter), y=SSB_MSY/SSB_Virgin, fill=col), stat="identity")+
geom_hline(yintercept = min(res_sa$SSBMSYSSBVir))+
geom_hline(yintercept=max(res_sa$SSBMSYSSBVir))+
geom_hline(yintercept=(res_sa$SSBMSYSSBVir[9]), linetype=2)+
scale_x_discrete(breaks=1:17, labels=labels)
plot(stk, metrics=list(Rec=rec, SSB=ssb)) + facet_grid(qname~stock, scales="free")
plot(stk)+facet_grid(qname~area, scales="free")
#Length comp fits
essLog <- readOutputss3("model/maineffects/17-M0.25_sigmaR0.2_steepness0.80_ess5_llq1.00_growmatfarley_cpuejappt_scalingbiomass_llselLogistic_colllsel_iter17")
SSplotComps(essLog,subplot=21)
|
plot.timeline <- function(person = "", repo = "") {
localized.day <- function(day) {
localized <- day
month(localized) <- 1
day(localized) <- 1
year(localized) <- 2016
localized
}
person.filter <- ifelse(nchar(person), person, '.')
repo.filter <- ifelse(nchar(repo), repo, '.')
data %>%
mutate(is.highlighted = (grepl(person.filter, username) & grepl(repo.filter, repo))) %>%
mutate(local.day = localized.day(timestamp)) %>%
ggplot() +
aes(x = timestamp, y = local.day, color = is.highlighted, alpha = is.highlighted) +
geom_point() +
ylim('2016/01/02', '2016/01/01') +
labs(x = 'Date', y = 'Time of Day') +
scale_colour_manual(values = c("grey", "black")) +
scale_alpha_manual(values = c(.1, 1)) +
scale_size_manual(values = c(1, 3)) +
theme(legend.position="none")
} | /preboots/contributor-timeline.R | no_license | SivanMehta/shiny-docker-example | R | false | false | 860 | r | plot.timeline <- function(person = "", repo = "") {
localized.day <- function(day) {
localized <- day
month(localized) <- 1
day(localized) <- 1
year(localized) <- 2016
localized
}
person.filter <- ifelse(nchar(person), person, '.')
repo.filter <- ifelse(nchar(repo), repo, '.')
data %>%
mutate(is.highlighted = (grepl(person.filter, username) & grepl(repo.filter, repo))) %>%
mutate(local.day = localized.day(timestamp)) %>%
ggplot() +
aes(x = timestamp, y = local.day, color = is.highlighted, alpha = is.highlighted) +
geom_point() +
ylim('2016/01/02', '2016/01/01') +
labs(x = 'Date', y = 'Time of Day') +
scale_colour_manual(values = c("grey", "black")) +
scale_alpha_manual(values = c(.1, 1)) +
scale_size_manual(values = c(1, 3)) +
theme(legend.position="none")
} |
## Assignment: R codes to create plot4.png
## Included code to get data and unzip
## Uncomment the following code if you want to download and unzip (assumes Rstudio in MAC)
#dataset_url<- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
#download.file(dataset_url, "exdata-data.zip",method="curl")
#dateDownloaded <- date()
#unzip("exdata-data.zip", exdir = "./exdata-data")
#list.files("./exdata-data")
## Read data using read.table
powerdata<-read.table("./exdata-data/household_power_consumption.txt",
na.strings = "?",header = T, sep = ";")
## Subset the data for dates 1/2/2007 and 2/2/2007 only
powerdata_sub<-powerdata[(powerdata$Date == "1/2/2007" | powerdata$Date == "2/2/2007"),]
## Create a new column DateTime to combine the subset date and time,
powerdata_sub$DateTime<-strptime(paste(powerdata_sub$Date,powerdata_sub$Time),"%d/%m/%Y%H:%M:%S")
## Check and inspect new data columns by using str(powerdata_sub)
str(powerdata_sub)
## Create plot, open png device and specify parameters
png(filename = "plot4.png",width = 480, height = 480, bg = "transparent")
## Initialize plot and specify parameters
par(mfrow = c(2,2))
with(powerdata_sub,{
## subplot 1,1
plot(DateTime, Global_active_power, type = "l", xlab = " ", ylab = "Global Active Power")
## subplot 1,2
plot(DateTime, Voltage, type = "l", xlab = " datetime ", ylab = "Voltage")
## subplot 2,1
plot(DateTime, Sub_metering_1,type = "l", col = "black",
xlab = " ", ylab = "Energy sub metering")
## Add plots and annotations
lines(DateTime, Sub_metering_2,col = "red")
lines(DateTime, Sub_metering_3,col = "blue")
legend("topright", lty = 1,col = c("black", "red","blue"), bty = "n",
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
## subplot 2,2
plot(DateTime,Global_reactive_power, type = "l", xlab = "datetime ")
})
## Close png device
dev.off() | /figure/plot4.R | no_license | marrycv/ExData_Plotting1 | R | false | false | 1,964 | r | ## Assignment: R codes to create plot4.png
## Included code to get data and unzip
## Uncomment the following code if you want to download and unzip (assumes Rstudio in MAC)
#dataset_url<- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
#download.file(dataset_url, "exdata-data.zip",method="curl")
#dateDownloaded <- date()
#unzip("exdata-data.zip", exdir = "./exdata-data")
#list.files("./exdata-data")
## Read data using read.table
powerdata<-read.table("./exdata-data/household_power_consumption.txt",
na.strings = "?",header = T, sep = ";")
## Subset the data for dates 1/2/2007 and 2/2/2007 only
powerdata_sub<-powerdata[(powerdata$Date == "1/2/2007" | powerdata$Date == "2/2/2007"),]
## Create a new column DateTime to combine the subset date and time,
powerdata_sub$DateTime<-strptime(paste(powerdata_sub$Date,powerdata_sub$Time),"%d/%m/%Y%H:%M:%S")
## Check and inspect new data columns by using str(powerdata_sub)
str(powerdata_sub)
## Create plot, open png device and specify parameters
png(filename = "plot4.png",width = 480, height = 480, bg = "transparent")
## Initialize plot and specify parameters
par(mfrow = c(2,2))
with(powerdata_sub,{
## subplot 1,1
plot(DateTime, Global_active_power, type = "l", xlab = " ", ylab = "Global Active Power")
## subplot 1,2
plot(DateTime, Voltage, type = "l", xlab = " datetime ", ylab = "Voltage")
## subplot 2,1
plot(DateTime, Sub_metering_1,type = "l", col = "black",
xlab = " ", ylab = "Energy sub metering")
## Add plots and annotations
lines(DateTime, Sub_metering_2,col = "red")
lines(DateTime, Sub_metering_3,col = "blue")
legend("topright", lty = 1,col = c("black", "red","blue"), bty = "n",
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
## subplot 2,2
plot(DateTime,Global_reactive_power, type = "l", xlab = "datetime ")
})
## Close png device
dev.off() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.