blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7b77fb1c62a8a71752bf3a461f8446cdbeb65e60
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/stylo/examples/zeta.chisquare.Rd.R
|
4a7443a29e948eb666939df7cebc4987274840a7
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 245
|
r
|
zeta.chisquare.Rd.R
|
library(stylo)
### Name: zeta.chisquare
### Title: Compare two subcorpora using a home-brew variant of Craig's Zeta
### Aliases: zeta.chisquare
### ** Examples
## Not run:
##D zeta.chisquare(input.data, filter.threshold)
## End(Not run)
|
d3044e6dff6d08c19d2842394f5cc779da15098b
|
5b04bc3cbd6f8f59c9b9e7f0963fcf94b10ab95f
|
/man/yeastInterProDesc.Rd
|
4a120b0595ffd730cd5ba73016f2426d4f5c74a6
|
[] |
no_license
|
Distue/termEnrichment
|
c895c83a4d5f49aa1a45adbb7f46b512e229da4c
|
5783ce7a7b382ade5dcbcd3a53e680edeb41445c
|
refs/heads/master
| 2021-01-19T11:14:22.428019
| 2020-03-04T12:24:39
| 2020-03-04T12:24:39
| 87,942,469
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 394
|
rd
|
yeastInterProDesc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{yeastInterProDesc}
\alias{yeastInterProDesc}
\title{yeast inter pro description}
\format{\code{tibble} instance}
\source{
Biomart, Ensembl 88
}
\usage{
yeastInterProDesc
}
\value{
\code{tibble} instance
}
\description{
yeast inter pro description
}
\author{
Thomas Schwarzl, 2017-04-11
}
|
260312fa5eaec47e5cd8211e1ddef0ce3644238b
|
87909bcf22ecc26b3cdb0f2e72e605114317bb37
|
/R/additional.R
|
85abc1863e75cd393118ad8d712f2b8d4d641482
|
[] |
no_license
|
rbagd/dynfactoR
|
4322eff7461f89ca283f012dc2ea0b0cf7017f62
|
cd92d901e5dce4d1d3c68f0f336a632035b872d4
|
refs/heads/master
| 2022-11-11T01:00:47.865367
| 2022-10-17T07:25:42
| 2022-10-17T07:25:42
| 38,105,065
| 24
| 11
| null | null | null | null |
UTF-8
|
R
| false
| false
| 609
|
r
|
additional.R
|
#' Estimate a p-th order vector autoregressive (VAR) model
#'
#' @param x Data matrix (T x n)
#' @param p Maximum lag order, i.e. VAR(p) will be estimated
#' @return Estimated parameter matrix, residuals and regression model
#' independent and dependent variables
#' @examples
#' x <- matrix(rnorm(50*2), nrow=50, ncol=2)
#' VAR(x, 2)
VAR <- function(x, p) {
T <- nrow(x)
Y <- x[(p + 1):T, ]
X <- c()
for (i in 1:p) {
X <- cbind(X, x[(p + 1 - i):(T - i), ])
}
A <- solve(t(X) %*% X) %*% t(X) %*% Y
res <- Y - X %*% A
return(list(Y = Y, X = X, A = A, res = res))
}
|
a1ee3d4cf69013c1f2f03171b1fc409ba762be09
|
8e20241c4310c0c52d11a3fd505c458c330312ef
|
/metalcomposit-thermal-conductivity/server.R
|
5da5a7c372df19d825ec938ddb48ede2964e3191
|
[] |
no_license
|
AnatoliiPotapov/shiny
|
efe54cc1ec6bad04552a88176d58b3c60bc55f8b
|
6d208a040d13129a72cfc55086a75f4ec0e1ccea
|
refs/heads/master
| 2020-12-28T21:18:09.739405
| 2015-11-25T12:16:31
| 2015-11-25T12:16:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,531
|
r
|
server.R
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://www.rstudio.com/shiny/
#
library(shiny)
library(ggplot2)
library(reshape2)
source("./method/method.R")
source("./math/fit.R")
source("./plot/plot.R")
pi = 3.1415926
ParseInfo <- function(data) {
return(HTML(paste(data[1],data[2],data[3],data[4],data[5],data[6],sep="<br/>")))
}
ParseNumericData <- function(data) {
cycle = c()
wave1 = c()
wave2 = c()
unnown1 = c()
unnown2 = c()
for (i in (1:(length(data)-6))) {
numericVector <- as.numeric(unlist(strsplit(data[i+6], " ")))
cycle[i] <- numericVector[1]
wave1[i] <- numericVector[2]
wave2[i] <- numericVector[3]
unnown1[i] <- numericVector[4]
unnown2[i] <- numericVector[5]
}
numericData <- cbind(cycle, wave1, wave2, unnown1, unnown2)
return(numericData)
}
server <- function(input, output) {
result <- reactive({
output<- input$rho * input$Cud * (input$ld - input$lb)**2 / 2 / parameters()[[1]][[2]] /
log((parameters()[[1]][[1]]),base = exp(1)) / (10 ** 6)
return(output)
})
parameters <- reactive({
output <- list(
Parameters <- CalculateParameters(nls_first(), nls_second(), input$period)
)
return(output)
})
isFile_first <- reactive({
inFile <- input$inputFile_first
if (is.null(inFile))
return(NULL)
return(readLines(inFile$datapath))
})
isFile_second <- reactive({
inFile <- input$inputFile_second
if (is.null(inFile))
return(NULL)
return(readLines(inFile$datapath))
})
numericData_first <- reactive({
ParseNumericData(isFile_first())
})
numericData_second <- reactive({
ParseNumericData(isFile_second())
})
nls_first <- reactive({
if (is.null(isFile_first())) return(NULL)
output <- list()
output[[1]] <- FitBaseline(numericData_first())
output[[2]] <- FitResultLine(numericData_first())
return(output)
})
nls_second <- reactive({
if (is.null(isFile_second())) return(NULL)
output <- list()
output[[1]] <- FitBaseline(numericData_second())
output[[2]] <- FitResultLine(numericData_second())
return(output)
})
output$status <- renderPrint({
if (is.null(isFile_first())) {
print("Не загружен файл с данными (по ближнему положению).")
}
if (is.null(isFile_second())) {
print("Не загружен файл с данными (по дальнему положению).")
}
if (!is.null(isFile_first()) && !is.null(isFile_second())) {
print("OK")
}
})
output$file_input <- renderUI({
box(
title = "Загрузка файлов .dat:",
width = 12,
fileInput('inputFile_first', 'Загрузка файла (по ближнему положению)',
accept=c('.dat')),
fileInput('inputFile_second', 'Загрузка файла (по дальнему положению)',
accept=c('.dat')),
h4("Статус: "),
textOutput('status')
)
})
output$plot <- renderUI({
if (!is.null(isFile_first()) && !is.null(isFile_second())) {
data_first <- numericData_first()
data_second <- numericData_second()
box(
title = "Графики фитирования кривых:",
"По ближнему положению:",
hr(),
renderPlot(plot_ggplot(data_first, nls_first())),
hr(),
"По дальнему положению:",
hr(),
renderPlot(plot_ggplot(data_second, nls_second()))
)
}
})
output$data <- renderUI({
if (!is.null(isFile_first()) && !is.null(isFile_second())) {
data_first <- numericData_first()
data_second <- numericData_second()
box(
title = "Параметры фитирования",
"По ближнему положению:",
hr(),
renderPrint(summary(nls_first()[[2]])),
hr(),
"По дальнему положению:",
hr(),
renderPrint(summary(nls_second()[[2]]))
)
}
})
output$markdown <- renderUI({
if (!is.null(isFile_first()) && !is.null(isFile_second())) {
box(
title = "Результаты измерений:",
width =12,
includeMarkdown("./markdown/info.Rmd"),
hr(),
h3("Теплопроводность исследуемого образца, Ватт/ (метр*Кельвин):"),
renderPrint(print(result())),
hr(),
h4("Параметры:"),
renderPrint(print(parameters())),
h4("Параметры фитирования задающих кривых:"),
renderPrint(summary(nls_first()[[1]])),
renderPrint(summary(nls_second()[[1]]))
)
}
})
output$settings <- renderUI({
box(
width =12,
h4("Параметры образца"),
numericInput("period", label = h5("Период, С:"), value = 100),
numericInput("rho", label = h5("Плотность образца, kg/m^3:"), value = 6000),
numericInput("Cud", label = h5("Удельная теплоемкость, Дж/(kg*K):"), value = 500),
numericInput("lb", label = h5("Расстояние по ближнему положению, mm:"), value = 15),
numericInput("ld", label = h5("Расстояние по дальнему положению, mm:"), value = 25)
)
})
}
|
116f3c1361307a1bfdc8ec6b5d01d0b7047d3dd5
|
4766928560ace79430e299a0cbfc56830338f989
|
/Week 2/gradientDescent.R
|
79c295f5d682a6717e329bdc845c2351ec9548d0
|
[] |
no_license
|
Tatortreiniger91/Coursera_Machine_Learning
|
7690e4e176fe70e1c2cda1018093d611df64cd49
|
19aa017fc6ce275eb3f40f7cf7c8b2147659343b
|
refs/heads/master
| 2020-03-11T03:29:24.646192
| 2018-05-17T14:32:57
| 2018-05-17T14:32:57
| 129,748,941
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 838
|
r
|
gradientDescent.R
|
#GRADIENTDESCENT Performs gradient descent to learn theta
# theta = GRADIENTDESENT(X, y, theta, alpha, num_iters) updates theta by
# taking num_iters gradient steps with learning rate alpha
# ====================== YOUR CODE HERE ======================
# Instructions: Perform a single gradient step on the parameter vector
# theta.
#
# Hint: While debugging, it can be useful to print out the values
# of the cost function (computeCost) and gradient here.
#
gradientDescent <- function(X, y, theta, alpha, num_iters){
m <- length(y)
J_hist <- rep(0,1500)
for(i in 1:1500){
delta <- (1/m)* (t(X)%*%X%*%theta - t(X)%*%y)
theta <- (theta - (alpha * delta))
J_hist[i] <- computeCost(X, y, theta)
}
result <- list(theta=theta, J_hist=J_hist)
return(result)
}
|
90fe3f90717a57cc64f68862991864524a0bbcff
|
9bb3920636dfbf3ee32c19b3be6d265379bf5a28
|
/man/oceanTime_GetTimes.Rd
|
40d687de68f588c7fc40954749c0e5e9b6d49441
|
[
"MIT"
] |
permissive
|
wStockhausen/wtsROMS
|
549ccbbf04f4fd09f350243e503f454a0c0a489f
|
ec48097404da8e00ca3d5876ac0fd7fb15989035
|
refs/heads/master
| 2022-06-28T01:00:20.409440
| 2022-06-08T19:26:13
| 2022-06-08T19:26:13
| 244,923,755
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 904
|
rd
|
oceanTime_GetTimes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/oceanTime_GetTimes.R
\name{oceanTime_GetTimes}
\alias{oceanTime_GetTimes}
\title{Get a dataframe of ocean_times associated with files in a folder}
\usage{
oceanTime_GetTimes(
path,
pattern,
ref = as.POSIXct("1900-01-01 00:00:00", tz = "GMT"),
verbose = FALSE
)
}
\arguments{
\item{path}{- path to folder with model output files}
\item{pattern}{- pattern for model output filenames (e.g. "avg_*.nc")}
\item{ref}{- calendar reference (default = "1900-01-01 00:00:00")}
\item{verbose}{- flag to print extra information}
}
\value{
Dataframe with filename, ocean_time (in seconds), and ocean_date (POSIXct dates) as columns.
}
\description{
Function to get a dataframe of ocean_times associated with files in a folder.
}
\details{
Numeric values for ocean_times are in seconds
relative to the calendar reference date.
}
|
23e3a41b24874183bdc44b74e80bb0f47ecf5eec
|
7c949f9ec55c15e9ffe8862fad0688a3e5421441
|
/man/code_CDR.Rd
|
c00a74a823ad3089864b85d80e17ef6fa3052d4a
|
[] |
no_license
|
thebackman/CDRalg
|
723436e37aab797b3d29b246588d99019f8efc68
|
4f273c9b46e139a7dd2f82f57604c6e4c115e06c
|
refs/heads/master
| 2021-01-02T08:34:53.084401
| 2017-10-25T13:57:41
| 2017-10-25T13:57:41
| 99,021,245
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 665
|
rd
|
code_CDR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/code_CDR_user.R
\name{code_CDR}
\alias{code_CDR}
\title{Assignment of CDR rating}
\usage{
code_CDR(df, id_name = "lopnr", deb = FALSE)
}
\arguments{
\item{df}{a data frame}
\item{id_name}{the name of the primary ID variable in the data set}
\item{deb}{debug yes or no (will slow function down)}
}
\value{
a data frame
}
\description{
code_CDR adds a variable called "CDR" to a data set according to the scoring
algorithm by Morris (1993).
}
\examples{
data("CDRexamples")
code_CDR(df = CDRexamples, id_name = "id", deb = FALSE)
code_CDR(df = CDRexamples, id_name = "id", deb = TRUE)
}
|
d3dd8939c783d1b54a6fb1907d34754254ce2e33
|
8dfee68e3695253eb9aa719a2571ea5607a5311b
|
/R/drive_auth.R
|
210487572ad8cbb5c7752b2c4b3bdba6fb43e967
|
[
"MIT"
] |
permissive
|
fuentesortiz/googledrive
|
49e7384a0749fbb9870821541e7b8e3ca1d7f735
|
20ffe8cb87ef180246fd3a94e00010879117aaa1
|
refs/heads/master
| 2023-03-07T17:37:20.406535
| 2020-11-19T21:47:22
| 2020-11-19T21:47:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,764
|
r
|
drive_auth.R
|
## This file is the interface between googledrive and the
## auth functionality in gargle.
# Initialization happens in .onLoad
.auth <- NULL
## The roxygen comments for these functions are mostly generated from data
## in this list and template text maintained in gargle.
gargle_lookup_table <- list(
PACKAGE = "googledrive",
YOUR_STUFF = "your Drive files",
PRODUCT = "Google Drive",
API = "Drive API",
PREFIX = "drive"
)
#' Authorize googledrive
#'
#' @eval gargle:::PREFIX_auth_description(gargle_lookup_table)
#' @eval gargle:::PREFIX_auth_details(gargle_lookup_table)
#' @eval gargle:::PREFIX_auth_params()
#'
#' @family auth functions
#' @export
#'
#' @examples
#' \dontrun{
#' ## load/refresh existing credentials, if available
#' ## otherwise, go to browser for authentication and authorization
#' drive_auth()
#'
#' ## see user associated with current token
#' drive_user()
#'
#' ## force use of a token associated with a specific email
#' drive_auth(email = "jenny@example.com")
#' drive_user()
#'
#' ## force a menu where you can choose from existing tokens or
#' ## choose to get a new one
#' drive_auth(email = NA)
#'
#' ## use a 'read only' scope, so it's impossible to edit or delete files
#' drive_auth(
#' scopes = "https://www.googleapis.com/auth/drive.readonly"
#' )
#'
#' ## use a service account token
#' drive_auth(path = "foofy-83ee9e7c9c48.json")
#' }
drive_auth <- function(email = gargle::gargle_oauth_email(),
path = NULL,
scopes = "https://www.googleapis.com/auth/drive",
cache = gargle::gargle_oauth_cache(),
use_oob = gargle::gargle_oob_default(),
token = NULL) {
cred <- gargle::token_fetch(
scopes = scopes,
app = drive_oauth_app() %||% gargle::tidyverse_app(),
email = email,
path = path,
package = "googledrive",
cache = cache,
use_oob = use_oob,
token = token
)
if (!inherits(cred, "Token2.0")) {
stop(
"Can't get Google credentials.\n",
"Are you running googledrive in a non-interactive session? Consider:\n",
" * `drive_deauth()` to prevent the attempt to get credentials.\n",
" * Call `drive_auth()` directly with all necessary specifics.\n",
" * Read more in: https://gargle.r-lib.org/articles/non-interactive-auth.html",
call. = FALSE
)
}
.auth$set_cred(cred)
.auth$set_auth_active(TRUE)
invisible()
}
#' Suspend authorization
#'
#' @eval gargle:::PREFIX_deauth_description_with_api_key(gargle_lookup_table)
#'
#' @family auth functions
#' @export
#' @examples
#' \dontrun{
#' drive_deauth()
#' drive_user()
#' public_file <-
#' drive_get(as_id("1Hj-k7NpPSyeOR3R7j4KuWnru6kZaqqOAE8_db5gowIM"))
#' drive_download(public_file)
#' }
drive_deauth <- function() {
.auth$set_auth_active(FALSE)
.auth$clear_cred()
invisible()
}
#' Produce configured token
#'
#' @eval gargle:::PREFIX_token_description(gargle_lookup_table)
#' @eval gargle:::PREFIX_token_return()
#'
#' @family low-level API functions
#' @export
#' @examples
#' \dontrun{
#' req <- request_generate(
#' "drive.files.get",
#' list(fileId = "abc"),
#' token = drive_token()
#' )
#' req
#' }
drive_token <- function() {
if (isFALSE(.auth$auth_active)) {
return(NULL)
}
if (!drive_has_token()) {
drive_auth()
}
httr::config(token = .auth$cred)
}
#' Is there a token on hand?
#'
#' @eval gargle:::PREFIX_has_token_description(gargle_lookup_table)
#' @eval gargle:::PREFIX_has_token_return()
#'
#' @family low-level API functions
#' @export
#'
#' @examples
#' drive_has_token()
drive_has_token <- function() {
inherits(.auth$cred, "Token2.0")
}
#' Edit and view auth configuration
#'
#' @eval gargle:::PREFIX_auth_configure_description(gargle_lookup_table)
#' @eval gargle:::PREFIX_auth_configure_params()
#' @eval gargle:::PREFIX_auth_configure_return(gargle_lookup_table)
#'
#' @family auth functions
#' @export
#' @examples
#' # see and store the current user-configured OAuth app (probaby `NULL`)
#' (original_app <- drive_oauth_app())
#'
#' # see and store the current user-configured API key (probaby `NULL`)
#' (original_api_key <- drive_api_key())
#'
#' if (require(httr)) {
#' # bring your own app via client id (aka key) and secret
#' google_app <- httr::oauth_app(
#' "my-awesome-google-api-wrapping-package",
#' key = "123456789.apps.googleusercontent.com",
#' secret = "abcdefghijklmnopqrstuvwxyz"
#' )
#' google_key <- "the-key-I-got-for-a-google-API"
#' drive_auth_configure(app = google_app, api_key = google_key)
#'
#' # confirm the changes
#' drive_oauth_app()
#' drive_api_key()
#' }
#'
#' \dontrun{
#' ## bring your own app via JSON downloaded from Google Developers Console
#' drive_auth_configure(
#' path = "/path/to/the/JSON/you/downloaded/from/google/dev/console.json"
#' )
#' }
#'
#' # restore original auth config
#' drive_auth_configure(app = original_app, api_key = original_api_key)
drive_auth_configure <- function(app, path, api_key) {
if (!missing(app) && !missing(path)) {
stop("Must supply exactly one of `app` and `path`", call. = FALSE)
}
stopifnot(missing(api_key) || is.null(api_key) || is_string(api_key))
if (!missing(path)) {
stopifnot(is_string(path))
app <- gargle::oauth_app_from_json(path)
}
stopifnot(missing(app) || is.null(app) || inherits(app, "oauth_app"))
if (!missing(app) || !missing(path)) {
.auth$set_app(app)
}
if (!missing(api_key)) {
.auth$set_api_key(api_key)
}
invisible(.auth)
}
#' @export
#' @rdname drive_auth_configure
drive_api_key <- function() .auth$api_key
#' @export
#' @rdname drive_auth_configure
drive_oauth_app <- function() .auth$app
|
e61e9301e7312cf399639666607ebb3a0348cb09
|
b4a6ab0f66c3d79588eb64b237a36e198ba60999
|
/setup/rpackages.R
|
62f51d28ecb34b58438b8ea13e714fa6cdad227e
|
[] |
no_license
|
Blinket/dotfiles
|
f69d6945d0b963beef45f86a68bdfa38779c64d9
|
a4e896c68bc0c37e453c0053c355b22d6e7c7a5d
|
refs/heads/master
| 2020-07-21T08:00:29.027731
| 2019-07-17T08:18:20
| 2019-07-17T08:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,045
|
r
|
rpackages.R
|
getthese <- c('caret',
'classInt',
'DBI',
'data.table',
'devtools',
'doMC',
'doParallel',
'dplyr',
'forecast',
'ggmap',
'ggrepel',
'gridExtra',
'httr',
'jsonlite',
'knitr',
'languageserver',
'lintr',
'lubridate',
'magrittr',
'purrr',
'RColorBrewer',
'RPostgreSQL',
'readr',
'rdoc',
'rmarkdown',
'scales',
'svglite',
'viridis',
'zoo'
)
for (package in getthese) {
if (package %in% installed.packages()) {
print(paste(package, 'already installed.'))
} else {
install.packages(package, repos = 'https://cloud.r-project.org')
}
}
# install IRKernel package
devtools::install_github('IRkernel/IRkernel')
IRkernel::installspec()
|
c4b9780ce885ed2f43bcc7dfe1f834a9e6cb3633
|
976163cb410214d74152c6982e64b3efac572833
|
/simplex/Harjoitustyo_t.r
|
a9c845c7c7565be3a8b867af7ebde703cf293325
|
[] |
no_license
|
Jylital/projects
|
8fad4faa281b6712749d9859628468650c54a5ed
|
ca4396671d826dbc7fdd8b78acd8057be6286b91
|
refs/heads/master
| 2020-11-29T15:32:53.683250
| 2017-04-07T08:21:31
| 2017-04-07T08:21:31
| 87,477,175
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 6,330
|
r
|
Harjoitustyo_t.r
|
# require(matrixcalc)
require(lpSolve)
simplex.part <- function(constants, G, b, P, V)
{
while(TRUE)
{
#if(is.singular.matrix(G[, P]))
#{
# cat("Keskeytys.\n")
# return(NA)
#}
# Edellä oleva saattaa hidastaa suoritusta aika paljon suuremmissa systeemeissä,
# joten sitä ei välttämättä tarvitse.
xx <- solve(G[, P], b)
ww <- t(solve(t(G[, P]), constants[P]))
zz <- ww %*% G[, V] - constants[V]
if(all(zz <= 0))
{
return(list(indeksit = P, kanta = xx))
}
k <- which.max(zz)
y.k <- solve(G[, P], G[, V[k]])
if(all(y.k <= 0))
{
cat("Tehtävä on rajoittamaton.\n")
return(NA)
}
yy <- xx[y.k > 0]/y.k[y.k > 0]
r <- which.min(yy)
if(yy[r] == 0)
{
cat("Keskeytys.\n")
return(NA)
}
xx <- c(xx[-r] - yy[r]*y.k[-r], yy[r])
l <- P[r]
P <- c(P[P != P[r]], V[k])
V <- c(V[V != V[k]], l)
}
}
auxiliary <- function(A, b)
{
m <- dim(A)[1]
n <- dim(A)[2]
constants <- as.matrix(rep(1, m))
constants <- rbind(as.matrix(rep(0, n)), constants)
G <- A
for(i in 1:m)
{
vv <- as.matrix(rep(0, m))
vv[i] <- 1
G <- cbind(G, vv)
}
P <- (n+1):(n+m)
V <- 1:n
sol <- simplex.part(constants, G, b, P, V)
if(!is.list(sol))
{
return(NA)
}
else if(any(sol$indeksit %in% (n + 1):(n + m)))
{
cat("Keskeytys.\n")
return(NA)
}
else
{
return(sol$indeksit)
}
}
simplex <- function(const, A, b, d = 'eq')
{
if(is.vector(const))
{
const <- as.matrix(const)
}
if(is.vector(b))
{
b <- as.matrix(b)
}
n <- length(const)
m <- length(b)
if(is.vector(A))
{
A <- matrix(A, nrow = m, ncol = n, byrow = TRUE)
}
if(!is.matrix(const) || !is.matrix(A) || !is.matrix(b))
{
cat("Keskeytys.\n")
return(NA)
}
if(d == 'eq')
{
if(m > n)
{
cat("Keskeytys.\n")
return(NA)
}
if(m == n)
{
if(is.singular.matrix(A))
{
cat("Keskeytys.\n")
return(NA)
}
else
{
xx <- solve(A, b)
if(all(xx >= 0))
{
return(xx)
}
}
}
A[b < 0, ] <- (-1) * A[b < 0, ]
b[b < 0] <- (-1) * b[b < 0]
sol <- auxiliary(A, b)
if(!is.numeric(sol))
{
return(NA)
}
else
{
sol <- simplex.part(const, A, b, sol, (1:n)[-sol])
if(!is.list(sol))
{
return(NA)
}
else
{
xx <- rep(0, n)
xx[sol$indeksit] <- sol$kanta
return(xx)
}
}
}
else if(d == 'leq')
{
const <- rbind(const, as.matrix(rep(0, m)))
A <- cbind(A, diag(nrow = m))
if(any(b < 0))
{
A[b < 0, ] <- (-1) * A[b < 0, ]
b[b < 0] <- (-1) * b[b < 0]
sol <- auxiliary(A, b)
if(!is.numeric(sol))
{
return(NA)
}
else
{
sol <- simplex.part(const, A, b, sol, (1:(n + m))[-sol])
if(!is.list(sol))
{
return(NA)
}
else
{
xx <- rep(0, n + m)
xx[sol$indeksit] <- sol$kanta
return(xx[1:n])
}
}
}
else
{
P <- (n+1):(n+m)
V <- 1:n
sol <- simplex.part(const, A, b, P, V)
if(!is.list(sol))
{
return(NA)
}
else
{
xx <- rep(0, n + m)
xx[sol$indeksit] <- sol$kanta
return(xx[1:n])
}
}
}
else if(d == 'geq')
{
const <- rbind(const, as.matrix(rep(0, m)))
A <- cbind(A, diag(rep(-1, m)))
if(all(b < 0))
{
A <- (-1)*A
b <- (-1)*b
P <- (n+1):(n+m)
V <- 1:n
sol <- simplex.part(const, A, b, P, V)
if(!is.list(sol))
{
return(NA)
}
else
{
xx <- rep(0, n + m)
xx[sol$indeksit] <- sol$kanta
return(xx[1:n])
}
}
else
{
A[b < 0, ] <- (-1) * A[b < 0, ]
b[b < 0] <- (-1) * b[b < 0]
sol <- auxiliary(A, b)
if(!is.numeric(sol))
{
return(NA)
}
else
{
sol <- simplex.part(const, A, b, sol, (1:(n + m))[-sol]) # Muista käsitellä NA
if(!is.list(sol))
{
return(NA)
}
else
{
xx <- rep(0, n + m)
xx[sol$indeksit] <- sol$kanta
return(xx[1:n])
}
}
}
}
else if(!is.character(d))
{
if(!all(d %in% c(-1, 0, 1)))
{
cat("Keskeytys.\n")
return(NA)
}
j <- length(d[d == -1 || d == 1])
A <- cbind(A, diag(d)[, d == -1 || d == 1])
if(m > n + j)
{
cat("Keskeytys.\n")
return(NA)
}
if(m == n + j)
{
if(is.singular.matrix(A))
{
cat("Keskeytys.\n")
return(NA)
}
else
{
xx <- solve(A, b)
if(all(xx >= 0))
{
return(xx[1:n])
}
}
}
const <- rbind(const, as.matrix(rep(0, j)))
A[b < 0, ] <- (-1) * A[b < 0, ]
b[b < 0] <- (-1) * b[b < 0]
sol <- auxiliary(A, b)
if(!is.numeric(sol))
{
return(NA)
}
else
{
sol <- simplex.part(const, A, b, sol, (1:(n + j))[-sol])
if(!is.list(sol))
{
return(NA)
}
else
{
xx <- rep(0, n + j)
xx[sol$indeksit] <- sol$kanta
return(xx[1:n])
}
}
}
}
# Oman simplex-funktion vertailu valmiiseen lineaariseen optimointiin liittyvään funktioon
AA <- matrix(c(3, 1, 2, 1, -3, 1, 1, 2, 3, 4, 3, -1), nrow = 4, ncol = 3, byrow = TRUE)
start.time <- Sys.time()
simplex(as.matrix(c(12, 7, 10)), AA, as.matrix(c(2, 4, 3, 1)), 'geq')
end.time <- Sys.time()
end.time - start.time
start.time <- Sys.time()
lp("min", as.matrix(c(12, 7, 10)), AA, c(">=", ">=", ">=", ">="), as.matrix(c(2, 4, 3, 1)))$solution
end.time <- Sys.time()
end.time - start.time
# Rajoittamattoman ongelman tapaus
simplex(c(-1, 4), c(-2, 1, -1, -2), c(-1, -2), 'leq')
#vv <- as.matrix(runif(10, 1, 2))
#uu <- as.matrix(runif(10, -1, 1))
#GG <- matrix(runif(200, -1, 1), nrow = 20)
#GG[(GG %*% vv) > 0, ] <- (-1) * GG[(GG %*% vv) > 0]
#bb <- as.matrix(runif(20, 0, 1)) + GG %*% uu
#cc <- runif(10, -2, -1)
#simplex(cc, GG, bb, 'leq')
|
7476d08f1fe4c3b8459736e9c2c3d9122454e5e0
|
0d70251d94495adee8e5f6b3e534c4f6ad27afc6
|
/R/guide_rect.R
|
3144e11b5cc5c10af4755252f6b129398c84fcf9
|
[
"MIT"
] |
permissive
|
teunbrand/ggchromatic
|
cea7aa0f19de6035662aa388cf70212f43f8ebfc
|
7ed7b22c02dee6972f2006d9366c249170157ed0
|
refs/heads/master
| 2023-03-24T01:29:05.160303
| 2021-02-24T21:56:41
| 2021-02-24T21:56:41
| 329,685,767
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,342
|
r
|
guide_rect.R
|
# Guide constructor -------------------------------------------------------
#' Chromatic colour rectangle guide
#'
#' The colour rectangle guide is a specialised guide for chromatic scales. It
#' maps two channels of a chromatic scales along the x and y axes and renders a
#' rectangle raster displaying the colours.
#'
#' @inheritParams ggplot2::guide_colorbar
#' @param title A character string or expression indicating the title of guide.
#' If `NULL`, the title is not shown. By default (`waiver()`), the name of the
#' scale object or the name specified in `labs()` is used for the title. Note
#' that the colour rectangle guide can take 2 titles: one for each axis.
#' @param check.overlap If `TRUE`, overlapping labels are silently removed. If
#' `FALSE`, labels are displayed regardless of whether they overlap.
#' @param rectwidth,rectheight A `numeric(1)` or `grid::unit()` object
#' specifying the width/height of the colour rectangle. Default value is the
#' `legend.key.width/height` or `legend.key.size` in the theme, times 4.
#' @param channels A `character()` with the channel names to display. The
#' default, `"auto"`, removes empty channels and subsequently takes the first
#' two.
#'
#' @return A `guide_colourrect` S3 object.
#' @export
#' @family guides for chromatic scales
#'
#' @examples
#' # Make basic plot
#' g <- ggplot(mtcars, aes(mpg, wt)) +
#' geom_point(aes(colour = rgb_spec(as.factor(cyl), drat, qsec)))
#'
#' # Choose the channels
#' g + guides(colour = guide_colourrect(channels = c("g", "b")))
#'
#' # Titles can take length 2 for the x- and y-axis.
#' g + guides(
#' colour = guide_colourrect(title = c("# Cylinders", "Rear axle ratio"))
#' )
#'
#' # If only 2 channels are specified, `guide_colourrect()` becomes the guide
#' ggplot(mtcars, aes(mpg, wt)) +
#' geom_point(aes(colour = hcl_spec(drat, qsec)))
guide_colourrect <- function(
# Title
title = waiver(),
title.position = NULL,
title.theme = NULL,
title.hjust = NULL,
title.vjust = NULL,
# Label
label = TRUE,
label.position = NULL,
label.theme = NULL,
label.hjust = NULL,
label.vjust = NULL,
check.overlap = TRUE,
# Rectangle
rectwidth = NULL,
rectheight = NULL,
nbin = 50,
raster = TRUE,
# Frame
frame.colour = "black",
frame.linewidth = 0.5,
frame.linetype = 1,
# Ticks
ticks = TRUE,
ticks.colour = "black",
ticks.linewidth = 0.5,
# General
default.unit = "line",
channels = "auto",
order = 0,
available_aes = c("colour", "color", "fill"),
...
) {
if (!is.null(rectwidth) && !is.unit(rectwidth)) {
rectwidth <- unit(rectwidth , default.unit)
}
if (!is.null(rectwidth) && !is.unit(rectheight)) {
rectheight <- unit(rectheight, default.unit)
}
structure(list(
# Title
title = title,
title.position = title.position,
title.theme = title.theme,
title.hjust = title.hjust,
title.vjust = title.vjust,
# Label
label = label,
label.position = label.position,
label.theme = label.theme,
label.hjust = label.hjust,
label.vjust = label.vjust,
check.overlap = TRUE,
# Rectangle
rectwidth = rectwidth,
rectheight = rectwidth,
nbin = nbin,
raster = raster,
# Frame
frame.colour = frame.colour,
frame.linewidth = frame.linewidth,
frame.linetype = frame.linetype,
# Ticks
ticks = TRUE,
ticks.colour = "black",
ticks.linewidth = 0.5,
# General
default.unit = default.unit,
channels = channels,
order = order,
# Parameters
available_aes = available_aes,
...,
name = "colourrect"),
class = c("guide", "colourrect", "colorbar")
)
}
# Guide methods -----------------------------------------------------------
#' @export
#' @method guide_train colourrect
guide_train.colourrect <- function(guide, scale, aesthetic = NULL) {
if (!inherits(scale, "ScaleChromatic")) {
rlang::warn("The colourrect guide needs chromatic scales.")
return(NULL)
}
if (length(intersect(scale$aesthetics, guide$available_aes)) == 0) {
rlang::warn("Colourrect guide needs appropriate scales.")
}
aes <- aesthetic %||% scale$aesthetic[[1]]
guide$key <- guide_key_from_chromatic(scale, aes)
limits <- vec_data(scale$get_limits())
void <- vapply(limits, function(x) all(is.na(x)), logical(1))
# Set proper channels
if (length(guide$channels) == 1 && guide$channels == "auto") {
channels <- names(limits)[!void]
} else {
channels <- guide$channels
}
channels <- match(channels, names(limits))[1:2]
if (anyNA(channels)) {
rlang::abort("Invalid channel specification in colourrect guide.")
}
if (length(guide$title) > 1) {
guide$title <- guide$title[channels]
}
guide$key$.channel <- match(guide$key$.channel, channels)
guide$key <- guide$key[!is.na(guide$key$.channel), ]
limits <- limits[, channels]
disc <- vapply(limits, is_discrete, logical(1))
limits <- without_nas(lapply(limits, unique))
lim_len <- lengths(limits)
limits[lim_len == 0] <- list(NA)
# Sequence between continuous limits
cols <- clapply(limits, !disc, function(x) {
seq(x[1], x[2], length.out = guide$nbin)
})
bins <- lengths(cols)
# Make colours
cols <- setNames(xpand(cols[[1]], rev(cols[[2]])), names(bins))
cols <- cols[lim_len > 0]
cols <- do.call(scale$ptype, cols)
ch_lim <- set_channel_default(scale$channel_limits,
setdiff(names(void), names(bins)))
cols <- scale$map(cols, channel_limits = ch_lim)
dim(cols) <- unname(bins)
guide$rstr <- t(cols)
guide
}
#' @export
#' @method guide_gengrob colourrect
guide_gengrob.colourrect <- function(guide, theme) {
cols <- build_rect_grob(guide, theme)
col_params <- cols$params
cols <- cols$grob
frame <- build_rect_frame(guide, col_params)
axes <- build_rect_axes(guide, theme, col_params)
titles <- build_rect_titles(guide, theme, col_params)
hgap <- width_cm(theme$legend.spacing.x %||%
0.5 * unit(titles$fontsize, "pt"))
vgap <- height_cm(theme$legend.spacing.y %||%
0.5 * unit(titles$fontsize, "pt"))
widths <- c(titles$width, hgap, axes$label.width,
axes$ticklength, col_params$size$width)
heights <- c(col_params$size$height, axes$ticklength, axes$label.height,
vgap, titles$height)
padding <- convertUnit(theme$legend.margin %||% margin(), "cm",
valueOnly = TRUE)
widths <- c(padding[4], widths, padding[2])
heights <- c(padding[1], heights, padding[3])
xpos <- 6
ypos <- 2
gt <- gtable(widths = unit(widths, "cm"),
heights = unit(heights, "cm"))
gt <- gtable_add_grob(
gt, element_render(theme, "legend.background"), clip = "off",
t = 1, r = -1, b = -1, l = 1, name = "background"
)
gt <- gtable_add_grob(
gt, cols, clip = 'off',
t = ypos, l = xpos, r = xpos, b = ypos, name = "colours"
)
gt <- gtable_add_grob(
gt, frame, clip = "off",
t = ypos, l = xpos, r = xpos, b = ypos, name = "frame"
)
gt <- gtable_add_grob(
gt, axes$xticks, clip = "off",
t = ypos + 1, l = xpos, r = xpos, b = ypos + 1, name = "x_ticks"
)
gt <- gtable_add_grob(
gt, axes$yticks, clip = "off",
t = ypos, r = xpos - 1, l = xpos - 1, b = ypos, name = "y_ticks"
)
gt <- gtable_add_grob(
gt, axes$xlabs, clip = "off",
t = ypos + 2, l = xpos, r = xpos, b = ypos + 2, name = "x_labels"
)
gt <- gtable_add_grob(
gt, axes$ylabs, clip = "off",
t = ypos, r = xpos - 2, l = xpos - 2, b = ypos, name = "y_labels"
)
gt <- gtable_add_grob(
gt, titles$xtitle, clip = "off",
t = ypos + 4, r = xpos, l = xpos, b = ypos + 4, name = "x_title"
)
gt <- gtable_add_grob(
gt, titles$ytitle, clip = "off",
t = ypos, r = xpos - 4, l = xpos - 4, b = ypos, name = "y_title"
)
gt
}
# Grob constructors -------------------------------------------------------
build_rect_grob <- function(guide, theme) {
# What does theme think?
width <- theme$legend.key.width %||% theme$legend.key.size
height <- theme$legend.key.height %||% theme$legend.key.size
# What does guide think?
width <- width_cm(guide$rectwidth %||% width * 4)
height <- height_cm(guide$rectheight %||% height * 4)
rectgrob <- rasterGrob(guide$rstr,
width = width,
height = height,
default.units = "cm",
interpolate = FALSE)
params <- list(
size = list(width = width, height = height)
)
return(list(grob = rectgrob, params = params))
}
build_rect_frame <- function(guide, params) {
width <- params$size$width
height <- params$size$height
x = c(0, 0, 1, 1, 0)
y = c(1, 0, 0, 1, 1)
id <- c(1, 1, 1, 1, 1)
n <- length(id)
if (any(guide$key$.discrete)) {
key <- guide$key[guide$key$.discrete, ]
if (any(key$.channel == 1)) {
beam <- key[key$.channel == 1, ]
value <- diff(beam$.value) / 2 + head(beam$.value, -1)
len <- length(value)
x <- c(x, rep(value, 2))
y <- c(y, rep(c(0, 1), each = len))
id <- c(id, rep(id[n] + seq_len(len), 2))
n <- length(id)
}
if (any(key$.channel == 2)) {
beam <- key[key$.channel == 2, ]
value <- diff(beam$.value) / 2 + head(beam$.value, -1)
len <- length(value)
x <- c(x, rep(c(0, 1), each = len))
y <- c(y, rep(value, 2))
id <- c(id, rep(id[n] + seq_len(len), 2))
n <- length(id)
}
}
grob <- polylineGrob(x = x, y = y,
id = id,
gp = gpar(
col = guide$frame.colour,
lty = guide$frame.linetype,
lwd = guide$frame.linewidth * .pt
))
return(grob)
}
build_rect_axes <- function(guide, theme, params) {
key <- guide$key
values <- split(key$.value, key$.channel)
values <- c(values, rep(list(numeric()), 2 - length(values)))
.labels <- split(key$.label, key$.channel)
.labels <- c(.labels, rep(list(character()), 2 - length(.labels)))
.labels <- lapply(.labels, unwrap_vexpr)
ticklength <- 0.05
# Do tickmarks
if (guide$ticks && length(values[[1]]) > 0) {
xticks <- polylineGrob(
x = unit(rep(values[[1]], 2), "npc"),
y = unit(rep(c(0, ticklength), each = length(values[[1]])), "cm"),
id = rep(seq_along(values[[1]]), 2),
gp = gpar(
col = guide$ticks.colour,
lwd = guide$ticks.linewidth * .pt
)
)
} else {
xticks <- zeroGrob()
}
if (guide$ticks && length(values[[2]]) > 0) {
yticks <- polylineGrob(
x = unit(rep(c(0, ticklength), each = length(values[[2]])), "cm"),
y = unit(rep(values[[2]], 2), "npc"),
id = rep(seq_along(values[[2]]), 2),
gp = gpar(
col = guide$ticks.colour,
lwd = guide$ticks.linewidth * .pt
)
)
} else {
yticks <- zeroGrob()
}
if (inherits(xticks, "zeroGrob") && inherits(yticks, "zeroGrob")) {
ticklength <- 0
}
# Do label
label.theme <- guide$label.theme %||% calc_element("legend.text", theme)
if (guide$label && length(.labels[[1]]) > 0) {
height <- convertUnit(stringHeight(.labels[[1]]),
"cm", valueOnly = TRUE)
height <- max(height)
xlabs <- element_grob(
label.theme,
label = .labels[[1]],
x = unit(values[[1]], "npc"),
check.overlap = guide$check.overlap
)
} else {
xlabs <- zeroGrob()
height <- 0
}
if (guide$label && length(.labels[[2]]) > 0) {
width <- convertUnit(stringWidth(.labels[[2]]), "cm", valueOnly = TRUE)
width <- max(width)
ylabs <- element_grob(
label.theme,
label = .labels[[2]],
y = unit(values[[2]], "npc"),
hjust = 1,
check.overlap = guide$check.overlap
)
} else {
ylabs <- zeroGrob()
width <- 0
}
out <- list(
xticks = xticks,
yticks = yticks,
xlabs = xlabs,
ylabs = ylabs,
ticklength = ticklength,
label.width = width,
label.height = height
)
}
build_rect_titles <- function(guide, theme, params) {
title.theme <- guide$title.theme %||% calc_element("legend.title", theme)
title.hjust <- guide$title.hjust %||% theme$legend.title.align %||%
title.theme$hjust %||% 0.5
title.vjust <- guide$title.vjust %||% title.theme$vjust %||% 0.5
if (length(guide$title) == 2) {
label <- c(guide$title, rep("", 2 - length(guide$title)))
xtitle <- element_grob(
title.theme,
label = label[1],
hjust = title.hjust,
margin_x = TRUE,
margin_y = TRUE
)
ytitle <- element_grob(
title.theme,
label = label[2],
vjust = title.vjust,
angle = 90,
margin_x = TRUE,
margin_y = TRUE
)
height = convertUnit(grobHeight(xtitle), "cm", valueOnly = TRUE)
width = convertUnit(grobWidth(ytitle), "cm", valueOnly = TRUE)
fontsize <- title.theme$size %||%
calc_element("legend.title", theme)$size %||%
calc_element("text", theme)$size %||% 11
return(list(
xtitle = xtitle,
ytitle = ytitle,
height = height,
width = width,
fontsize = fontsize
))
}
}
#' @export
#' @rdname guide_colourrect
guide_colorrect <- guide_colourrect
|
20629c0078a97d73ecdf8ea3eb5e5a0bec87bacd
|
37c0a409c4f06dfac2365fb792a953f59758f245
|
/R/write_ASAP3_dat_file.R
|
111dfe849e839f932d380f2216e9edddc0d5b49b
|
[
"MIT"
] |
permissive
|
cmlegault/ASAPplots
|
8a3aee8a79137dd8911305397430965aa18ae683
|
75adfd7cf889a5a2b66b6ef0a4dbe22b51aa2084
|
refs/heads/master
| 2021-07-13T00:48:41.370521
| 2021-03-22T21:02:40
| 2021-03-22T21:02:40
| 87,811,600
| 3
| 4
|
MIT
| 2021-03-19T14:02:42
| 2017-04-10T13:04:09
|
R
|
UTF-8
|
R
| false
| false
| 2,094
|
r
|
write_ASAP3_dat_file.R
|
#' WriteASAP3DatFile
#'
#' Function to write ASAP 3 dat file (modified from Tim Miller's text_datwrite.R file).
#' @param fname full directory and file name to be created (including .dat suffix)
#' @param dat.object R object containing all the necessary information
#' @param header.text text put run description line in input file
#' @export
WriteASAP3DatFile <- function(fname,dat.object,header.text){
# fname <- 'bsb.dat'; dat.object <- asap.dat; header.text <- c('Base ASAP run')
# Create full file name
n = nchar(fname)
# Create file with one comment
cat('# ASAP VERSION 3.0\n#', header.text, '\n', file=fname, append=FALSE)
# File data and comments
dat <- dat.object$dat
comments <- dat.object$comments
# Counter for comments
comment.ct <- 0
for (i in 1:length(dat)){
x <- dat[[i]]
if(data.class(x)=='numeric' | data.class(x)=='integer'){
comment.ct <- comment.ct + 1
cat(comments[comment.ct],'\n', sep='', file=fname, append=T)
cat(x, '\n', file=fname, append=T)
} # end of numeric/integer if statement
if(data.class(x)=='matrix'){
comment.ct <- comment.ct + 1
cat(comments[comment.ct],'\n',file=fname,append=T)
write.table(x,col=F,row=F,quote=T, file=fname,append=T)
} # end of matrix if statement
if(data.class(x)=='list'){
for (j in 1:length(x)){
comment.ct <- comment.ct + 1
cat(comments[comment.ct],'\n',file=fname,append=T)
write.table(x[[j]],col=F,row=F,quote=T, file=fname,append=T)
} # end of 'j' for loop
} # end of list if statement
} # End of "i" for loop
# Add fleet and survey names
cat('######\n###### FINIS ######\n# Fleet Names\n',file=fname,append=T)
write.table(as.matrix(paste('#$',dat.object$fleet.names,sep='')),col=F,row=F,quote=F, file=fname,append=T)
cat('# Survey Names\n',file=fname,append=T)
write.table(as.matrix(paste('#$',dat.object$survey.names,sep='')),col=F,row=F,quote=F, file=fname,append=T)
cat('#\n',file=fname,append=T)
return()
} # End of function
|
87a95321c707067a4c8edf49747df4ca31643947
|
61cba4dee2d95f6d0186e28a741f81d83c5b0983
|
/ReadingData/API.R
|
818bc905f653e0cdf7d42bc5c099f1d933a70b0c
|
[] |
no_license
|
xpmanoj/R-Examples
|
ad9db962f465159cd54c53e3b0e2cf98b4b6f1ae
|
c0422d5f8f24f0c3f7403540e2026d634e99b235
|
refs/heads/master
| 2020-05-17T14:13:37.029907
| 2014-08-16T02:30:03
| 2014-08-16T02:30:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 670
|
r
|
API.R
|
# Creating an application
https://dev.twitter.com/apps
# Accessing Twitter from R
myapp = oauth_app("twitter",
key="yourConsumerKeyHere",secret="yourConsumerSecretHere")
sig = sign_oauth1.0(myapp,
token = "yourTokenHere",
token_secret = "yourTokenSecretHere")
homeTL = GET("https://api.twitter.com/1.1/statuses/home_timeline.json", sig)
# Converting the json object
json1 = content(homeTL)
json2 = jsonlite::fromJSON(toJSON(json1))
json2[1,1:4]
# How to know the URL
https://dev.twitter.com/docs/api/1.1/get/search/tweets
# In general, look at the documentation
https://dev.twitter.com/docs/api/1.1/overview
|
14d684955a2ad282c16240989b11fcc05a9d2260
|
31b24efedd2709563bfc8e622e54c4d8a500ed1c
|
/Assignment II/batsmen.r
|
ef29e11d13aa2d427500820ed8a42c1813a14c9d
|
[] |
no_license
|
VishalAmbavade/R-projects
|
248d6b85489be1097369e85033b2ea8873f8e0fa
|
4f59fe1bb3b4f864a79c8caac0284459ce7796eb
|
refs/heads/master
| 2020-04-23T10:04:10.030506
| 2019-05-14T09:12:57
| 2019-05-14T09:12:57
| 171,087,592
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,156
|
r
|
batsmen.r
|
odibatting2007 <- read.csv("D:\\Vishal\\III year\\Data Analytics\\Assignment II\\Player Ratings\\2007odibattingrating.csv")
odibatting2008 <- read.csv("D:\\Vishal\\III year\\Data Analytics\\Assignment II\\Player Ratings\\2008odibattingrating.csv")
odibatting2009 <- read.csv("D:\\Vishal\\III year\\Data Analytics\\Assignment II\\Player Ratings\\2009odibattingrating.csv")
odibatting2010 <- read.csv("D:\\Vishal\\III year\\Data Analytics\\Assignment II\\Player Ratings\\2010odibattingrating.csv")
odibatting2011 <- read.csv("D:\\Vishal\\III year\\Data Analytics\\Assignment II\\Player Ratings\\2011odibattingrating.csv")
odibatting2012 <- read.csv("D:\\Vishal\\III year\\Data Analytics\\Assignment II\\Player Ratings\\2012odibattingrating.csv")
odibatting2013 <- read.csv("D:\\Vishal\\III year\\Data Analytics\\Assignment II\\Player Ratings\\2013odibattingrating.csv")
odibatting2014 <- read.csv("D:\\Vishal\\III year\\Data Analytics\\Assignment II\\Player Ratings\\2014odibattingrating.csv")
odibatting2015 <- read.csv("D:\\Vishal\\III year\\Data Analytics\\Assignment II\\Player Ratings\\2015odibattingrating.csv")
odibatting2016 <- read.csv("D:\\Vishal\\III year\\Data Analytics\\Assignment II\\Player Ratings\\2016odibattingrating.csv")
library(dplyr)
dataOdiBatting <- bind_rows(odibatting2007, odibatting2008, odibatting2009, odibatting2010,
odibatting2011, odibatting2012, odibatting2013, odibatting2014,
odibatting2015, odibatting2016)
summary(dataOdiBatting)
library(VIM)
aggr(dataOdiBatting)
dataOdiBatting <- dataOdiBatting %>%
group_by(Name) %>%
summarise(avg = mean(Rating))
set.seed(20)
batcluster <- kmeans(dataOdiBatting[, 2], 5)
batcluster$cluster <- as.factor(batcluster$cluster)
str(batcluster)
library(ggplot2)
ggplot(dataOdiBatting, aes(dataOdiBatting$Name, avg, color = batcluster$cluster)) +
geom_point(size = 2) +
scale_color_hue(labels = c("Good", "Best", "Useless", "Better", "Average")) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
ggtitle("ODI Batting Ratings(2007-2016)")
dat <- arrange(dataOdiBatting, desc(avg)) %>%
mutate(rank = 1:nrow(dataOdiBatting))
dataOdiBatting <- merge(dataOdiBatting, dat, by = "Name")
dataOdiBatting
library(plotly)
p <- plot_ly(dataOdiBatting, x = ~Name, y = ~avg.x, type = 'scatter',
mode = 'markers', color = batcluster$cluster,
text = ~paste('Rank: ', rank))
p
p <- plot_ly(dataOdiBatting, x = ~Name, y = ~avg.x, type = 'scatter', mode = 'markers', name = 'G1') %>%
add_trace(y = ~avg.x, name = 'Tree 2') %>%
add_trace(y = ~avg.x, name = 'Tree 3') %>%
add_trace(y = ~avg.x, name = 'Tree 4') %>%
add_trace(y = ~avg.x, name = 'Tree 5')
p
dat
#Classification
library(party)
new_dat <- sample(2, nrow(dat), replace = TRUE, prob = c(0.7, 0.3))
train_data <- dat[new_dat == 1, ]
test_data <- dat[new_dat == 2, ]
myf <- avg~ rank
tree <- ctree(myf, data = train_data)
table(predict(tree), train_data$avg)
plot(tree)
test_tree <- ctree(myf, data = test_data)
plot(test_tree)
|
d5bcf34fcafc170127afc3cccdd66230c498739c
|
fbc5705f3a94f34e6ca7b9c2b9d724bf2d292a26
|
/edX/DS Visualization/gapminder/Ex3 selecting desired columns.R
|
6eafe79bf715f3a05d0b4dd607f47e0a165a17c3
|
[] |
no_license
|
shinichimatsuda/R_Training
|
1b766d9f5dfbd73490997ae70a9c25e9affdf2f2
|
df9b30f2ff0886d1b6fa0ad6f3db71e018b7c24d
|
refs/heads/master
| 2020-12-24T20:52:10.679977
| 2018-12-14T15:20:15
| 2018-12-14T15:20:15
| 58,867,484
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 188
|
r
|
Ex3 selecting desired columns.R
|
library(dplyr)
library(dslabs)
data(gapminder)
df <- gapminder %>%
filter(continent == "Africa" & year == "2012" & fertility <= 3 & life_expectancy >= 70) %>%
select(country, region)
|
52d942140eb5726cdbbdb126ba458c12e8e92bd3
|
6514db5b170ef26891a5e02e27440c0701b55802
|
/man/RMs_sets.Rd
|
aab5a98a421239d6e46e643d070dc3ef004196e5
|
[] |
no_license
|
AProfico/Arothron
|
cb8efc56d2fc0b7cdd73a9f6c7be95ac49bd4674
|
788c59e73a505d6bbeec96829e5bf6de976206c4
|
refs/heads/master
| 2021-11-03T19:38:59.399331
| 2019-04-26T10:53:42
| 2019-04-26T10:53:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 418
|
rd
|
RMs_sets.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RMs_sets.R
\docType{data}
\name{RMs_sets}
\alias{RMs_sets}
\title{example dataset}
\usage{
data(RMs_sets)
}
\description{
Array containing the landmark coordinates of the reference sample for Digital Alignment Tool example
}
\author{
Antonio Profico, Alessio Veneziano, Marina Melchionna, Pasquale Raia
}
\keyword{Arothron}
|
49589437e6cc30599986fe94c92ef763c8b83bc6
|
53db6f69689baa4154d45d62b33a93367f3cd434
|
/thekarefunction.R
|
473c6ad26e76255f8cfb3c59a232f47a2cff9657
|
[] |
no_license
|
TonyNdungu/Supervised_learning
|
89d2c5e21345dd9aed14dc87b1dabd6e0624c197
|
ce3799699862ddac82cbfe208227569b461f971d
|
refs/heads/master
| 2020-04-22T05:13:35.170541
| 2019-03-27T13:35:32
| 2019-03-27T13:35:32
| 170,152,497
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,199
|
r
|
thekarefunction.R
|
###############################################
# Step 1: Load all the necessary libraries, Oauth and etc
require(twitteR)
require(plyr)
require(stringr)
require(RCurl)
require(ROAuth)
require(ggplot2)
# Step 2: We need to import the files containing the positive and negative words
pos = scan('/Users/tony//Documents/opinion lexicon/positive_words.txt', what='character', comment.char=';')
neg = scan('/Users/tony/Documents/opinion lexicon/negative_words.txt', what='character', comment.char=';')
#######################################################################################################
#Step 3: Define function score.sentiment
# function score.sentiment
score.sentiment = function(sentences, pos.words, neg.words, .progress='none')
{
# Parameters
# sentences: vector of text to score
# pos.words: vector of words of postive sentiment
# neg.words: vector of words of negative sentiment
# .progress: passed to laply() to control of progress bar
# create simple array of scores with laply
scores = laply(sentences,
function(sentence, pos.words, neg.words)
{
# remove punctuation
sentence = gsub("[[:punct:]]", "", sentence)
# remove control characters
sentence = gsub("[[:cntrl:]]", "", sentence)
# remove digits?
sentence = gsub('\\d+', '', sentence)
# define error handling function when trying tolower
tryTolower = function(x)
{
# create missing value
y = NA
# tryCatch error
try_error = tryCatch(tolower(x), error=function(e) e)
# if not an error
if (!inherits(try_error, "error"))
y = tolower(x)
# result
return(y)
}
# use tryTolower with sapply
sentence = sapply(sentence, tryTolower)
# split sentence into words with str_split (stringr package)
word.list = str_split(sentence, "\\s+")
words = unlist(word.list)
# compare words to the dictionaries of positive & negative terms
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
# get the position of the matched term or NA
# we just want a TRUE/FALSE
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
# final score
score = sum(pos.matches) - sum(neg.matches)
return(score)
}, pos.words, neg.words, .progress=.progress )
# data frame with scores for each sentence
scores.df = data.frame(text=sentences, score=scores)
return(scores.df)
}
##################################################################################
# Step 4: Twitter API Oauth process.
consumer_key <- 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
consumer_secret <- 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
access_token <- 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
access_secret <- 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
setup_twitter_oauth(consumer_key, consumer_secret, access_token, access_secret)
########################################################################################################
#Step 5: Let's harvest tweets talking about wine, beer, coffee, and soda
# tweets with drinks
wine_tweets = searchTwitter("wine", n=500, lang="en")
beer_tweets = searchTwitter("beer", n=500, lang="en")
coffee_tweets = searchTwitter("coffee", n=500, lang="en")
soda_tweets = searchTwitter("soda", n=500, lang="en")
# get text
wine_txt = sapply(wine_tweets, function(x) x$getText())
beer_txt = sapply(beer_tweets, function(x) x$getText())
coffee_txt = sapply(coffee_tweets, function(x) x$getText())
soda_txt = sapply(soda_tweets, function(x) x$getText())
# how many tweets of each drink
nd = c(length(wine_txt), length(beer_txt), length(coffee_txt), length(soda_txt))
# join texts
drinks = as.list(c(wine_txt, beer_txt, coffee_txt, soda_txt) )
################################################################################
# Step 6: Apply score.sentiment and calculate more result
# apply function score.sentiment
scores = score.sentiment(drinks, pos, neg, .progress='text')
scores = score.sentiment("so how negative is this", pos, neg, .progress='text')
scr_df = lapply(drinks, score.sentiment, pos,neg,.progress='text')
scr_df2 = rbind(scr_df)
= rbind(lapply(drinks, score.sentiment, pos,neg,.progress='text'))
scores
# add variables to data frame
scores$drink = factor(rep(c("wine", "beer", "coffee", "soda"), nd))
scores$very.pos = as.numeric(scores$score >= 2)
scores$very.neg = as.numeric(scores$score <= -2)
# how many very positives and very negatives
numpos = sum(scores$very.pos)
numneg = sum(scores$very.neg)
# global score
global_score = round( 100 * numpos / (numpos + numneg) )
################################################################################
#Step 7(optional): Define the colors for the barplot
# colors
cols = c("#7CAE00", "#00BFC4", "#F8766D", "#C77CFF")
names(cols) = c("beer", "coffee", "soda", "wine")
################################################################################
# Step 8: Make some barplots
# As you can tell, wine gets the highest sentiment score, while soda the lowest one
# barplot of average score
meanscore = tapply(scores$score, scores$drink, mean)
df = data.frame(drink=names(meanscore), meanscore=meanscore)
df$drinks <- reorder(df$drink, df$meanscore)
ggplot(df, aes(y=meanscore, x=drinks, fill=drinks)) + geom_bar(stat="identity") +
scale_fill_manual(values=cols[order(df$meanscore)])
|
bafeb539ff78f4c0e98f5e0b56142281bc299a2e
|
688185e8e8df9b6e3c4a31fc2d43064f460665f1
|
/R/spectralclass.R
|
ca0e06ed5adec229ad21e6b4311a5d7768f795a4
|
[] |
no_license
|
IPS-LMU/emuR
|
4b084971c56e4fed9032e40999eeeacfeb4896e8
|
eb703f23c8295c76952aa786d149c67a7b2df9b2
|
refs/heads/master
| 2023-06-09T03:51:37.328416
| 2023-05-26T11:17:13
| 2023-05-26T11:17:13
| 21,941,175
| 17
| 22
| null | 2023-05-29T12:35:55
| 2014-07-17T12:32:58
|
R
|
UTF-8
|
R
| false
| false
| 11,174
|
r
|
spectralclass.R
|
##' Function to test whether the object is of class "spectral"
##'
##' Returns TRUE or FALSE depending on whether the object is of class "spectral"
##'
##'
##' @param dat An R object
##' @return A single element logical vector: TRUE or FALSE
##' @author Jonathan Harrington
##' @seealso \code{\link{as.spectral}}
##' @keywords attribute
##' @examples
##'
##'
##' is.spectral(vowlax.dft.5)
##' is.spectral(fric.dft)
##' is.spectral(fric.dft$data)
##' is.spectral(vowlax.dft.5[1,])
##' is.spectral(fric.dft[1,1])
##'
##'
##'
##' @export is.spectral
"is.spectral" <- function(dat)
{
if(!is.trackdata(dat))
return(any(class(dat) %in% "spectral"))
else
return(any(class(dat$data) %in% "spectral"))
}
##' Function to convert an object into an object of class 'spectral'.
##'
##' The function converts a vector, matrix, or EMU-trackdata object into an
##' object of the same class and of class 'spectral'
##'
##' If fs is a single element numeric vector, then the frequencies of trackdata
##' are defined to extend to fs/2. If fs is missing, then the frequencies are
##' 0:(N-1) where N is the length of trackdata.
##'
##' @param trackdata A vector, matrix, or EMU-trackdata object.
##' @param fs Either a single element numeric vector, or a numeric vector of
##' the same length as the length of trackdata if trackdata is a vector, or of
##' the same number of rows as trackdata
##' @return The same object but of class 'spectral'.
##' @author Jonathan Harrington
##' @seealso \code{\link{is.spectral}} \code{\link{plot.spectral}}
##' @keywords attribute
##' @examples
##'
##' vec = 1:10
##' as.spectral(vec, 2000)
##' mat = rbind(1:10, 1:10)
##' as.spectral(mat)
##' # turn a spectral trackdata object into a trackdata object
##' tr = as.trackdata(rbind(fric.dft$data), fric.dft$index, fric.dft$ftime)
##' # turn it into a spectral trackdata object with sampling freq 16 kHz
##' tr = as.spectral(tr, 16000)
##' # list the frequencies
##' trackfreq(tr)
##' # Notice that only the $data is made into a spectral matrix,
##' # not the entire trackdata object
##' # so this is trackdata
##' class(tr)
##' # this is a spectral matrix
##' class(tr$data)
##'
##'
##'
##'
##' @export as.spectral
"as.spectral" <- function(trackdata, fs)
{
if(is.trackdata(trackdata)){
if(is.spectral(trackdata$data)) {
warning("matrix is already of class spectral")
return(trackdata)
}
N <- ncol(trackdata$data)
if(missing(fs))
fs <- 0: (ncol(trackdata$data)-1)
else{
if(length(fs)==1)
{
fs <- fs/2
fs <- seq(0, fs, length=N)
}
}
attr(trackdata$data, "fs") <- fs
class(trackdata$data) <- c(class(trackdata$data), "spectral")
}
else if (is.matrix(trackdata)){
if(is.spectral(trackdata)) {
warning("matrix is already of class spectral")
return(trackdata)
}
N <- ncol(trackdata)
if(missing(fs))
fs <- 0: (ncol(trackdata)-1)
else{
if(length(fs)==1)
{
fs <- fs/2
fs <- seq(0, fs, length=N)
}
}
attr(trackdata, "fs") <- fs
class(trackdata) <- c(class(trackdata), "spectral")
}
else
{
if(is.spectral(trackdata)){
warning("matrix is already of class spectral")
return(trackdata)
}
N <- length(trackdata)
if(missing(fs))
fs <- 0: (length(trackdata)-1)
else{
if(length(fs)==1)
{
fs <- fs/2
fs <- seq(0, fs, length=N)
}
}
attr(trackdata, "fs") <- fs
class(trackdata) <- c(class(trackdata), "spectral")
}
trackdata
}
##' Plot spectra from EMU spectral objects
##'
##' The function plots spectrum of any EMU spectral object.
##'
##' This function is implemented when a spectral trackdata object is called
##' with the 'plot' function.
##'
##' @param x An EMU object of class 'spectral'
##' @param labs An optional vector character labels. Must be the same length as
##' specdata
##' @param ylim A two-element numeric vector for the y-axis range (see 'par')
##' @param xlim A two-element numeric vector for the x-axis range (see 'par')
##' @param col Specify a color - see 'mu.colour')
##' @param lty Specify a linetype - see 'mu.colour'
##' @param lwd Specify line thickness - see 'mu.colour'
##' @param fun An R function name e.g., mean, var, sum, etc. The function is
##' applied separately to each category type specified in labs
##' @param freq A numeric vector the same length as the number of columns in
##' specdata specifying the frequencies at which the spectral data is to be
##' plotted. If not supplied, defaults to trackfreq(specdata)
##' @param type A single element character vector for the linetype
##' @param power Logical. If TRUE, then specdata (or specdata$data if specdata is
##' a trackdata object, is converted to a *
##' specdata\eqn{\mbox{\textasciicircum}}{^}b, where a and b have the values
##' given in powcoeffs. This operation is applied before b
##' @param powcoeffs A two-element numeric vector. Defaults to c(10, 10)
##' @param dbnorm Logical. If TRUE, apply dB-level normalization per spectrum as
##' defined by dbcoeffs below. Defaults to FALSE.
##' @param dbcoeffs A two element numeric vector (x, y). The spectra are
##' normalised in such a way that the values of each spectrum at a frequency of
##' y are set to a dB level of x. For example, to normalise the spectrum to 10
##' dB at 2000 Hz, set dbnorm to TRUE and dbcoeffs to c(2000, 10)
##' @param legend Parameters for defining the legend. See 'mu.legend' for
##' further details
##' @param axes A logical vector indicating whether the axes should be plotted
##' @param \dots Further graphical parameters may be supplied.
##' @note To plot spectral data from a spectral trackdata object, then call the
##' function explicitly with 'plot/spectral' rather than with just 'plot'
##' @export
##' @author Jonathan Harrington
##' @seealso \code{\link{plot}} \code{\link{plot.trackdata}}
##' \code{\link{as.spectral}}
##' @keywords dplot
##' @examples
##' \dontrun{
##'
##' plot(vowlax.dft.5[1,])
##'
##' # with label types
##' plot(vowlax.dft.5[1:20,], vowlax.l[1:20])
##'
##' # As above but averaged after converting to power ratios.
##' plot(vowlax.dft.5[1:20,], vowlax.l[1:20], fun=mean, power=TRUE)
##'
##' # All the spectra of one segment in a trackdata object
##' plot(fric.dft[1,])
##'
##' }
##'
"plot.spectral" <- function (x, labs, ylim, xlim, col, lty,
lwd, fun, freq, type = "l",
power = FALSE, powcoeffs = c(10, 10),
dbnorm = FALSE, dbcoeffs = c(0, 0),
legend = TRUE, axes=TRUE, ...)
{
oldpar = graphics::par(no.readonly=TRUE)
on.exit(graphics::par(oldpar))
specdata = x
if (is.trackdata(specdata))
specdata <- specdata$data
if (!is.spectral(specdata))
stop("specdata must be of class spectral")
if (dbnorm)
specdata <- dbnorm(specdata, dbcoeffs[1], dbcoeffs[2])
if (missing(freq))
f <- trackfreq(specdata)
else f <- freq
if (is.matrix(specdata))
N <- nrow(specdata)
else {
N <- 1
specdata <- rbind(specdata)
}
if (missing(labs))
labs <- rep(".", N)
if (!missing(fun)) {
if (power)
specdata <- dbtopower(specdata, powcoeffs[1], powcoeffs[2])
mat <- list(NULL)
for (j in unique(labs)) {
temp <- labs == j
v <- apply(specdata[temp, ], 2, fun)
mat$fn <- rbind(mat$fn, v)
mat$l <- c(mat$l, j)
}
dimnames(mat$fn) <- list(mat$l, dimnames(specdata)[[2]])
specdata <- mat$fn
if (power)
specdata <- dbtopower(specdata, powcoeffs[1], powcoeffs[2],
inv = TRUE)
if (length(unique(labs)) > 1)
labs <- dimnames(specdata)[[1]]
else {
labs <- unique(labs)
specdata <- rbind(specdata)
}
}
if (missing(ylim))
ylim <- range(specdata)
if (missing(xlim))
xlim <- range(f)
if (missing(col))
col <- TRUE
if (missing(lty))
lty <- FALSE
if (missing(lwd))
lwd <- NULL
cols <- mu.colour(labs, col, lty, lwd)
for (j in 1:nrow(specdata)) {
graphics::plot(f, specdata[j, ], type = type, col = cols$colour[j],
lty = cols$linetype[j], lwd = cols$lwd[j], xlim = xlim,
ylim = ylim, xlab = "", ylab = "", axes = FALSE)
graphics::par(new = TRUE)
}
if (is.logical(legend)) {
if (legend & length(unique(labs)) > 1) {
legend <- "topright"
legend(legend, NULL, cols$legend$lab, col = cols$legend$col,
lty = as.numeric(cols$legend$lty), lwd = as.numeric(cols$legend$lwd))
}
}
else legend(legend, NULL, cols$legend$lab, col = cols$legend$col,
lty = as.numeric(cols$legend$lty), lwd = as.numeric(cols$legend$lwd))
if(axes)
{
graphics::axis(side = 1)
graphics::axis(side = 2)
}
graphics::title(...)
graphics::box(...)
}
##' @export
"bark.spectral" <- function (f, ...)
{
specobject = f
if (!is.trackdata(specobject)) {
if (!is.matrix(specobject))
specobject <- as.spectral(rbind(specobject), attr(specobject,
"fs"))
}
f <- trackfreq(specobject)
b <- bark(f)
temp <- b < 0
if (any(temp))
specobject <- specobject[, !temp]
f <- trackfreq(specobject)
b <- bark(f)
N <- length(b)
ba <- seq(min(b), max(b), length = N)
if (is.trackdata(specobject))
spec <- specobject$data
else if (is.matrix(specobject))
spec <- specobject
else spec <- as.spectral(rbind(specobject), attr(specobject,"fs"))
res <- NULL
for (j in 1:nrow(spec)) {
v = approx(b, c(spec[j, ]), ba)
if(j == 1){ # preallocate result matrix
res = matrix(nrow = nrow(spec), ncol = length(v$y))
}
res[j, ] <- v$y
}
if (is.trackdata(specobject)) {
specobject$data <- res
if (!is.null(tracktimes(spec)))
rownames(specobject$data) <- tracktimes(spec)
specobject <- as.spectral(specobject, ba)
}
else {
specobject <- res
specobject <- as.spectral(specobject, ba)
}
specobject
}
##' @export
"mel.spectral" <- function (a)
{
specobject = a
if (!is.trackdata(specobject)) {
if (!is.matrix(specobject))
specobject <- as.spectral(rbind(specobject), attr(specobject, "fs"))
}
f <- trackfreq(specobject)
b <- mel(f)
N <- length(b)
ba <- seq(min(b), max(b), length = N)
if (is.trackdata(specobject))
spec <- specobject$data
else if (is.matrix(specobject))
spec <- specobject
else spec <- as.spectral(rbind(specobject), attr(specobject,
"fs"))
res <- NULL
for (j in 1:nrow(spec)) {
v = approx(b, c(spec[j, ]), ba)
if(j == 1){ # preallocate result matrix
res = matrix(nrow = nrow(spec), ncol = length(v$y))
}
res[j, ] <- v$y
}
if (is.trackdata(specobject)) {
specobject$data <- res
if (!is.null(tracktimes(spec)))
rownames(specobject$data) <- tracktimes(spec)
specobject <- as.spectral(specobject, ba)
}
else {
specobject <- res
specobject <- as.spectral(specobject, ba)
}
specobject
}
|
099bbe660201001b7794202c4aa273f1796f8f75
|
0f2981c025b2bdc6fa6030abd0cdbd3dbd853149
|
/run_analysis.R
|
77d9c34fb1201ec2fb9ad8ce5e2ab937739422f3
|
[] |
no_license
|
rainbowsaurus/run_analysis
|
cc5ff9186234a0e420933f0c9d7e4fd0a1329283
|
88db753cd6278ebf02723b88f528e57f6424c6fa
|
refs/heads/master
| 2020-06-04T02:52:00.732758
| 2014-07-26T23:15:27
| 2014-07-26T23:15:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,840
|
r
|
run_analysis.R
|
# UCI Har Dataset unzipped in working directory
# Read separate datasets into R
xtestdata<-read.csv("UCI Har Dataset/test/X_test.txt", sep="", header=FALSE)
ytestdata<-read.csv("UCI Har Dataset/test/y_test.txt", sep="", header=FALSE)
xtraindata<-read.csv("UCI Har Dataset/train/X_train.txt", sep="", header=FALSE)
ytraindata<-read.csv("UCI Har Dataset/train/y_train.txt", sep="", header=FALSE)
# Read features.txt into R, preparing to add them as the headers.
# features.txt lists the variable names
features<-read.csv("UCI Har Dataset/features.txt", sep="", header=FALSE)
#Read in subject data for test and train. This data shows us which subject (person) each observation of the data it is from
subjecttest<-read.csv("UCI HAR Dataset/test/subject_test.txt", sep="", header=FALSE)
subjecttrain<-read.csv("UCI HAR Dataset/train/subject_train.txt", sep="", header=FALSE)
#Set headers for xtestdata and xtraindata as the variables in features.txt
colnames(xtestdata)<-features[,2]
colnames(xtraindata)<-features[,2]
colnames(ytestdata)<-c("activity")
colnames(ytraindata)<-c("activity")
colnames(subjecttest)<-c("subject")
colnames(subjecttrain)<-c("subject")
#merge x and y train data
traindata<-cbind(xtraindata, ytraindata, subjecttrain)
#merge x and y test data
testdata<-cbind(xtestdata, ytestdata, subjecttest)
#merge test and train data
mergeddata<-rbind(testdata, traindata)
#changes the number of the activity to the actual activity
for (i in 1:nrow(mergeddata)) {
if (mergeddata[i, "activity"]==1){
mergeddata[i, "activity"]<-"WALKING"
}
else if (mergeddata[i, "activity"]==2){
mergeddata[i, "activity"]<-"WALKING_UPSTAIRS"
}
else if (mergeddata[i, "activity"]==3){
mergeddata[i, "activity"]<-"WALKING_DOWNSTAIRS"
}
else if (mergeddata[i, "activity"]==4){
mergeddata[i, "activity"]<-"SITTING"
}
else if (mergeddata[i, "activity"]==5){
mergeddata[i, "activity"]<-"STANDING"
}
else if (mergeddata[i, "activity"]==6){
mergeddata[i, "activity"]<-"LAYING"
}
}
#produce a logical vector showing TRUE if mergeddata column names are mean or standard deviations
meanstdcols<-grepl(".*(mean\\(\\)|std\\(\\)|activity|subject).*", colnames(mergeddata))
#Data of only means and standard deviations
meanstddata<-mergeddata[, meanstdcols]
#Provides only the measurements of means and standard deviations
meandatacols<-grepl(".*(mean\\(\\)|activity|subject).*", colnames(meanstddata))
meandata<-meanstddata[,meandatacols]
#Transforms the data into the averages of each measurement for each activity and each subject
library(reshape2)
melteddata<-melt(meandata, id.vars=c("activity", "subject"))
data<-dcast(melteddata, subject + activity ~ variable, mean)
#Creates the file tidydata.txt with the new data
write.table(data, "tidydata.txt")
|
f9f1e88c96c027439a7deb0e346ad6c8a6578a63
|
6e0a10823d35c92efd3d4bc90f3ad5a4ae721782
|
/server.R
|
a408f6ff9e5e5395d2fc4b9fec9dde7ac464761e
|
[] |
no_license
|
DrRoad/btc_predic_shiny
|
d5aa9add1e57f1f1f6e73a88cfb83303dbc3d2b9
|
765f1d77e43bd650383ce020723d60d11ddfe100
|
refs/heads/master
| 2022-01-14T14:23:25.257033
| 2018-10-27T00:36:54
| 2018-10-27T00:36:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,736
|
r
|
server.R
|
# server.R
library(quantmod)
library(ggplot2)
library(Quandl)
library(dplyr)
source("func.r")
toDate <- function(x) as.POSIXct(x,origin="1970-01-01")
z <- read.zoo(file = "sub_total_datset_652015",header=TRUE, sep=",")
sub_total_dataset <- as.xts(z)
no_date_dataset <- sub_total_dataset
x <- Quandl("BCHAIN/MKPRU")
x <- as.xts(read.zoo(x))
colnames(x) <- "MKPRU"
no_date_dataset <- merge(no_date_dataset,x)
no_date_dataset <- na.omit(no_date_dataset)
no_date_dataset <- no_date_dataset[, !(names(no_date_dataset)) %in% "Date"]
colnames(no_date_dataset) <- c("ATRCT","BLCHS","CPTRA","DIFF","ETRVU","HRATE","MKTCP","MIREV","NTRAN","NADDU","TOTBC","TRFEE","TRVOU","TVTVR","MKPRU")
diff_dataset <- diff(as.matrix(no_date_dataset))
diff_dataset <- data.frame(diff_dataset)
diff_dataset$pricechange <- ifelse(diff_dataset$MKPRU>0,1,0)
rownames(diff_dataset) <- 1:nrow(diff_dataset)
gl_lm <- NULL
currentPrices <- NULL
returnUporDown <- function(inp){
if (inp==1){
return("UP")
} else {
return("DOWN")
}
}
shinyServer(
function(input, output) {
output$modelAccuracy <- renderText({
if (input$trainModel){
train <- train<-sample_frac(diff_dataset, 0.7)
sid<-as.numeric(rownames(train))
test<-diff_dataset[-sid,]
lm <- trainModel(train)
gl_lm <<- lm
paste("Prediction accuracy on test data:",testModel(lm,test))
}
})
output$predicYday <- renderText({
if (input$predictFuture){
diff <- tail(currentPrices,n=3)[3,2] - tail(currentPrices,n=3)[1,2]
pred <- predictFromYesterday(gl_lm,sub_total_dataset)
paste("Predicted direction using the difference in factors from yesterday and today: ", returnUporDown(pred), "... Difference in prices", diff, sep=" ")
}
})
output$predicCurrent <- renderText({
if (input$predictFuture){
diff <- tail(currentPrices,n=3)[3,2] - tail(currentPrices,n=3)[2,2]
pred <- predictMostRecent(gl_lm)
paste("Predicted direction using the most recent difference in factors (usually less than 24 hours): ", returnUporDown(pred), "... Difference in prices", diff, sep=" ")
}
})
output$data <- renderTable({
if (input$showData){
data = sub_total_dataset
head(data)
}
})
output$getCurrentPrice <- renderTable({
if (input$showCurrentPrice){
curPrice <- read.csv("https://blockchain.info/charts/market-price?format=csv")
colnames(curPrice) <- c("Date/Time","BTC Price")
currentPrices <<- tail(curPrice, n=3)
tail(curPrice, n=3)
}
})
})
|
531986796bc7c0772ce597900d3bf9cd300269b9
|
f60f72f511cf4c3af8ce3bf72f71d9d26c06801b
|
/capstone/app/ui.R
|
24320302205157c9cd8cf33aa9b7b83c3e7f8441
|
[] |
no_license
|
markczhang/coursera_datascience_with_r
|
3b238a3533361438c0024f072ea8b00fa752b1f0
|
7f157a9f018c431cd5a53d5f178275ccf2e79080
|
refs/heads/master
| 2022-10-20T14:09:23.333598
| 2020-06-18T19:36:55
| 2020-06-18T19:36:55
| 137,272,668
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 822
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
titlePanel('Word Predictor'),
sidebarLayout(
sidebarPanel(
em('This app uses an N-Gram model to predict/suggest for the next word given the previous content you have typed.
By default, the model searches for the most probable next word in a 4-gram table. The Backoff technique is
added to the model to make sure that the predictor will take one step down to use lower gram when there is
no match in the current gram.'),
textInput('sentence', 'Please enter a sentence:')
),
mainPanel(
h3('Next word suggestion:'),
textOutput('text1')
)
)
))
|
99522acdf1e1aab1327beff89f614117e961710f
|
d746fef241f9a0e06ae48cc3b1fe72693c43d808
|
/ark_87287/d77g6c/d77g6c-004/rotated.r
|
708b47907ea41bbdf855382329020f6bbdf92237
|
[
"MIT"
] |
permissive
|
ucd-library/wine-price-extraction
|
5abed5054a6e7704dcb401d728c1be2f53e05d78
|
c346e48b5cda8377335b66e4a1f57c013aa06f1f
|
refs/heads/master
| 2021-07-06T18:24:48.311848
| 2020-10-07T01:58:32
| 2020-10-07T01:58:32
| 144,317,559
| 5
| 0
| null | 2019-10-11T18:34:32
| 2018-08-10T18:00:02
|
JavaScript
|
UTF-8
|
R
| false
| false
| 199
|
r
|
rotated.r
|
r=359.97
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d77g6c/media/images/d77g6c-004/svc:tesseract/full/full/359.97/default.jpg Accept:application/hocr+xml
|
f4695563f89c57548325da9c28317b7141223d76
|
a1ca395cd4db65d52e95c5b396a8e4687a3171a8
|
/man/kriging.quantile.grad.Rd
|
efd82e6d55534465e733e45d8b35010e848368c8
|
[] |
no_license
|
ProgramMonkey-soso/DiceOptim
|
6784806c23332eb47aa28813fdfa2c2719387e0b
|
8da55d4235754de56168e0a029ec37cae6012ece
|
refs/heads/master
| 2022-11-04T21:50:41.469095
| 2020-06-29T16:10:03
| 2020-06-29T16:10:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,314
|
rd
|
kriging.quantile.grad.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kriging.quantile.grad.R
\name{kriging.quantile.grad}
\alias{kriging.quantile.grad}
\title{Analytical gradient of the Kriging quantile of level beta}
\usage{
kriging.quantile.grad(x, model, beta = 0.1, type = "UK", envir = NULL)
}
\arguments{
\item{x}{a vector representing the input for which one wishes to calculate
kriging.quantile.grad.}
\item{model}{an object of class \code{\link[DiceKriging]{km}}.}
\item{beta}{A quantile level (between 0 and 1)}
\item{type}{Kriging type: "SK" or "UK"}
\item{envir}{environment for inheriting intermediate calculations from
\code{"kriging.quantile"}}
}
\value{
The gradient of the Kriging mean predictor with respect to x. %
Returns 0 at design points (where the gradient does not exist).
}
\description{
Computes the gradient of the Kriging quantile of level beta at the current
location. Only available for Universal Kriging with constant trend
(Ordinary Kriging).
}
\examples{
##########################################################################
### KRIGING QUANTILE SURFACE AND ITS GRADIENT FOR ####
### THE BRANIN FUNCTION KNOWN AT A 12-POINT LATIN HYPERCUBE DESIGN ####
##########################################################################
set.seed(421)
# Set test problem parameters
doe.size <- 12
dim <- 2
test.function <- get("branin2")
lower <- rep(0,1,dim)
upper <- rep(1,1,dim)
noise.var <- 0.2
# Generate DOE and response
doe <- as.data.frame(matrix(runif(doe.size*dim),doe.size))
y.tilde <- rep(0, 1, doe.size)
for (i in 1:doe.size) {
y.tilde[i] <- test.function(doe[i,]) + sqrt(noise.var)*rnorm(n=1)
}
y.tilde <- as.numeric(y.tilde)
# Create kriging model
model <- km(y~1, design=doe, response=data.frame(y=y.tilde),
covtype="gauss", noise.var=rep(noise.var,1,doe.size),
lower=rep(.1,dim), upper=rep(1,dim), control=list(trace=FALSE))
# Compute actual function and criterion on a grid
n.grid <- 9 # Change to 21 for a nicer picture
x.grid <- y.grid <- seq(0,1,length=n.grid)
design.grid <- expand.grid(x.grid, y.grid)
nt <- nrow(design.grid)
crit.grid <- apply(design.grid, 1, kriging.quantile, model=model, beta=.1)
crit.grad <- t(apply(design.grid, 1, kriging.quantile.grad, model=model, beta=.1))
z.grid <- matrix(crit.grid, n.grid, n.grid)
contour(x.grid,y.grid, z.grid, 30)
title("kriging.quantile and its gradient")
points(model@X[,1],model@X[,2],pch=17,col="blue")
for (i in 1:nt)
{
x <- design.grid[i,]
arrows(x$Var1,x$Var2, x$Var1+crit.grad[i,1]*.01,x$Var2+crit.grad[i,2]*.01,
length=0.04,code=2,col="orange",lwd=2)
}
}
\references{
O. Roustant, D. Ginsbourger, Y. Deville, \emph{DiceKriging, DiceOptim: Two
R packages for the analysis of computer experiments by kriging-based
metamodeling and optimization}, J. Stat. Soft., 2010.
\url{https://www.jstatsoft.org/article/view/v051i01}
D. Ginsbourger (2009), \emph{Multiples metamodeles pour l'approximation et
l'optimisation de fonctions numeriques multivariables}, Ph.D. thesis, Ecole
Nationale Superieure des Mines de Saint-Etienne, 2009.
}
\seealso{
\code{\link{EI.grad}}
}
\author{
Victor Picheny
David Ginsbourger
}
\keyword{models}
\keyword{optimize}
|
e61b4b0d1f3cf63018507d9f9c6b66900113994a
|
991d72b16c087afb9835502757fa69f38e5ce79a
|
/R/balance-statistics.R
|
d0d7768e1851f5b5407c36880e68966994e736a5
|
[] |
no_license
|
ngreifer/cobalt
|
a1862b212efb254a55a8913a814d4971aaa43ea2
|
42c1ac803a8bae3916833d669f193a7f06c4d89e
|
refs/heads/master
| 2023-08-03T18:58:45.744235
| 2023-07-28T03:44:14
| 2023-07-28T03:44:14
| 63,369,821
| 63
| 13
| null | 2022-10-13T07:20:51
| 2016-07-14T21:07:03
|
R
|
UTF-8
|
R
| false
| false
| 22,692
|
r
|
balance-statistics.R
|
#' Balance Statistics in `bal.tab` and `love.plot`
#' @name balance-statistics
#'
#' @description [bal.tab()] and [love.plot()] display balance statistics for the included covariates. The `stats` argument in each of these functions controls which balance statistics are to be displayed. The argument to `stats` should be a character vector with the names of the desired balance statistics.
#'
#' This page describes all of the available balance statistics and how to request them. Abbreviations are allowed, so you can use the first few letters of each balance statistics to request it instead of typing out its whole name. That convention is used throughout the documentation. For example, to request mean differences and variance ratios in `bal.tab()` or `love.plot()`, you could include `stats = c("m", "v")`. In addition, the `thresholds` argument uses the same naming conventions and can be used to request balance thresholds on each statistic. For example, to request a balance threshold of .1 for mean differences, you could include `thresholds = c(m = .1)`.
#'
#' Below, each allowable entry to `stats` and `thresholds` are described, along with other details or option that accompany them.
#'
#' ## Binary/Multi-Category Treatments
#' \describe{
#' \item{`"mean.diffs"`}{Mean differences as computed by [col_w_smd()]. Can be abbreviated as `"m"`. Setting the arguments `continuous` and `binary` to either `"std"` or `"raw"` will determine whether standardized mean differences or raw mean differences are calculated for continuous and categorical variables, respectively. When standardized mean differences are requested, the `s.d.denom` argument controls how the standardization occurs. When `abs = TRUE`, negative values become positive. Mean differences are requested by default when no entry to `stats` is provided.}
#'
#' \item{`"variance.ratios"`}{Variance ratios as computed by [col_w_vr()]. Can be abbreviated as `"v"`. Will not be computed for binary variables. When `abs = TRUE`, values less than 1 will have their inverse taken. When used with `love.plot`, the x-axis scaled will be logged so that, e.g., .5 is as far away from 1 as 2 is.}
#'
#' \item{`"ks.statistics"`}{Kolmogorov-Smirnov (KS) statistics as computed by [col_w_ks()].}
#'
#' \item{`"ovl.coefficients"`}{Overlapping (OVL) statistics as computed by [col_w_ovl()]. Can be abbreviated as `"ovl"`. Additional arguments passed to `col_w_ovl()`, such as `integrate` or `bw`, can be supplied to `bal.tab()` or `love.plot()`.}
#' }
#'
#' ## Continuous Treatments
#' \describe{
#' \item{`"correlations"`}{Pearson correlations as computed by [col_w_cov()]. Can be abbreviated as `"cor"`. Setting the arguments `continuous` and `binary` to either `"std"` or `"raw"` will determine whether correlations or covariances are calculated for continuous and categorical variables, respectively (they are both `"std"` by default). When correlations are requested, the `s.d.denom` argument controls how the standardization occurs. When `abs = TRUE`, negative values become positive. Pearson correlations are requested by default when no entry to `stats` is provided.}
#'
#' \item{`"spearman.correlations"`}{Spearman correlations as computed by [col_w_cov()]. Can be abbreviated as `"sp"`. All arguments are the same as those for `"correlations"`. When `abs = TRUE`, negative values become positive.}
#'
#' \item{`"mean.diffs.target"`}{Mean differences computed between the weighted and unweighted sample to ensure the weighted sample is representative of the original population. Can be abbreviated as `"m"`. Setting the arguments `continuous` and `binary` to either `"std"` or `"raw"` will determine whether standardized mean differences or raw mean differences are calculated for continuous and categorical variables, respectively. The standardization factor will be computed in the unweighted sample. When `abs = TRUE`, negative values become positive. This statistic is only computed for the adjusted samples.}
#'
#' \item{`"ks.statistics.target"`}{KS-statistics computed between the weighted and unweighted sample to ensure the weighted sample is representative of the original population. Can be abbreviated as `"ks"`. This statistic is only computed for the adjusted samples.}
#' }
#'
#' If a statistic is requested in `thresholds`, it will automatically be placed in `stats`. For example, `bal.tab(..., stats = "m", thresholds = c(v = 2))` will display both mean differences and variance ratios, and the variance ratios will have a balance threshold set to 2.
#'
#' @examples
#' data(lalonde)
#'
#' #Binary treatments
#' bal.tab(treat ~ age + educ + married + re74, data = lalonde,
#' stats = c("m", "v", "ks"))
#' love.plot(treat ~ age + educ + married + re74, data = lalonde,
#' stats = c("m", "v", "ks"), binary = "std",
#' thresholds = c(m = .1, v = 2))
#'
#' #Continuous treatments
#' bal.tab(re75 ~ age + educ + married + re74, data = lalonde,
#' stats = c("cor", "sp"))
#' love.plot(re75 ~ age + educ + married + re74, data = lalonde,
#' thresholds = c(cor = .1, sp = .1))
#'
NULL
get_from_STATS <- function(what) {
setNames(sapply(STATS, function(s) s[[what]], USE.NAMES = FALSE),
names(STATS))
}
STATS <- list()
STATS[["mean.diffs"]] <- {list(
type = "bin",
threshold = "m.threshold",
Threshold = "M.Threshold",
disp_stat = "disp.diff",
adj_only = FALSE,
abs = function(x) abs_(x),
bal.tab_column_prefix = "Diff", #Also which.stat in love.plot
threshold_range = c(0, Inf),
balance_tally_for = "mean differences",
variable_with_the_greatest = "mean difference", #also which.stat2 in love.plot
love.plot_xlab = function(...) {
A <- list(...)
binary <- A$binary #attr(x, "print.options")$binary
continuous <- A$continuous #attr(x, "print.options")$continuous
abs <- A$abs
var_type <- A$var_type #B[["type"]]
stars <- A$stars
#All std, no std, some std
if ((binary == "std" || !any(var_type == "Binary")) &&
(continuous == "std" || !any(var_type == "Contin."))) {
xlab.diff <- "Standardized Mean Differences"
}
else if ((binary == "raw" || !any(var_type == "Binary")) &&
(continuous == "raw" || !any(var_type == "Contin."))) {
xlab.diff <- "Mean Differences"
}
else {
stars <- match_arg(stars, c("none", "std", "raw"))
if (stars == "none") {
xlab.diff <- "Mean Differences"
}
else if (stars == "std") {
xlab.diff <- "Mean Differences"
}
else if (stars == "raw") {
xlab.diff <- "Standardized Mean Differences"
}
}
if (abs) paste("Absolute", xlab.diff) else xlab.diff
},
love.plot_add_stars = function(SS.var, variable.names, ...) {
A <- list(...)
binary <- A$binary #attr(x, "print.options")$binary
continuous <- A$continuous #attr(x, "print.options")$continuous
var_type <- A$var_type #B[["Type"]]
stars <- A$stars
star_char = A$star_char #args$star_char
#All std, no std, some std
if (!((binary == "std" || sum(var_type == "Binary") == 0) &&
(continuous == "std" || sum(var_type != "Binary") == 0))
&&
!((binary == "raw" || sum(var_type == "Binary") == 0) &&
(continuous == "raw" || sum(var_type != "Binary") == 0))) {
stars <- match_arg(stars, c("none", "std", "raw"))
if (stars == "none") {
.wrn("standardized mean differences and raw mean differences are present in the same plot. \nUse the `stars` argument to distinguish between them and appropriately label the x-axis")
}
else {
if (!chk::vld_string(star_char)) star_char <- "*"
vars_to_star <- setNames(rep(FALSE, length(variable.names)), variable.names)
if (stars == "std") {
if (binary == "std") vars_to_star[variable.names[var_type == "Binary"]] <- TRUE
if (continuous == "std") vars_to_star[variable.names[var_type != "Binary"]] <- TRUE
}
else if (stars == "raw") {
if (binary == "raw") vars_to_star[variable.names[var_type == "Binary"]] <- TRUE
if (continuous == "raw") vars_to_star[variable.names[var_type != "Binary"]] <- TRUE
}
new.variable.names <- setNames(variable.names, variable.names)
names(new.variable.names)[vars_to_star[variable.names]] <- paste0(variable.names[vars_to_star[variable.names]], star_char)
SS.var <- do.call(f.recode, c(list(SS.var), new.variable.names))
}
}
SS.var
},
baseline.xintercept = 0,
threshold.xintercepts = function(threshold, abs) {
if (abs) c(lower = base::abs(threshold))
else c(lower = -base::abs(threshold), upper = base::abs(threshold))
},
love.plot_axis_scale = ggplot2::scale_x_continuous,
fun = function(C, treat, weights, std, s.d.denom, abs, s.weights, bin.vars, weighted.weights = weights, subset = NULL, ...) {
col_w_smd(C, treat = treat, weights = weights,
std = std, s.d.denom = s.d.denom,
abs = abs, s.weights = s.weights, bin.vars = bin.vars,
weighted.weights = weighted.weights,
subset = subset)
}
)}
STATS[["variance.ratios"]] <- {list(
type = "bin",
threshold = "v.threshold",
Threshold = "V.Threshold",
disp_stat = "disp.v.ratio",
adj_only = FALSE,
abs = function(x) abs_(x, ratio = TRUE),
bal.tab_column_prefix = "V.Ratio", #Also which.stat in love.plot
threshold_range = c(1, Inf),
balance_tally_for = "variance ratios",
variable_with_the_greatest = "variance ratio", #also which.stat2 in love.plot
love.plot_xlab = function(...) {
"Variance Ratios"
},
love.plot_add_stars = function(SS.var, variable.names, ...) {
SS.var
},
baseline.xintercept = 1,
threshold.xintercepts = function(threshold, abs) {
if (abs) c(lower = abs_(threshold, ratio = TRUE))
else c(lower = abs_(threshold, ratio = TRUE)^-1, upper = abs_(threshold, ratio = TRUE))
},
love.plot_axis_scale = ggplot2::scale_x_log10,
fun = function(C, treat, weights, abs, s.weights, bin.vars, subset = NULL, ...) {
vrs <- rep(NA_real_, ncol(C))
if (any(!bin.vars)) {
vrs[!bin.vars] <- col_w_vr(C[, !bin.vars, drop = FALSE], treat = treat,
weights = weights, abs = abs,
s.weights = s.weights, bin.vars = bin.vars[!bin.vars],
subset = subset)
}
vrs
}
)}
STATS[["ks.statistics"]] <- {list(
type = "bin",
threshold = "ks.threshold",
Threshold = "KS.Threshold",
disp_stat = "disp.ks",
adj_only = FALSE,
abs = function(x) abs_(x),
bal.tab_column_prefix = "KS", #Also which.stat in love.plot
threshold_range = c(0, 1),
balance_tally_for = "KS statistics",
variable_with_the_greatest = "KS statistic", #also which.stat2 in love.plot
love.plot_xlab = function(...) {
"Kolmogorov-Smirnov Statistics"
},
love.plot_add_stars = function(SS.var, variable.names, ...) {
SS.var
},
baseline.xintercept = 0,
threshold.xintercepts = function(threshold, abs) {
c(lower = base::abs(threshold))
},
love.plot_axis_scale = ggplot2::scale_x_continuous,
fun = function(C, treat, weights, s.weights, bin.vars, subset = NULL, ...) {
A <- list(...)
do.call("col_w_ks", c(list(C, treat = treat, weights = weights, s.weights = s.weights, bin.vars = bin.vars,
subset = subset), A))
}
)}
STATS[["ovl.coefficients"]] <- {list(
type = "bin",
threshold = "ovl.threshold",
Threshold = "OVL.Threshold",
disp_stat = "disp.ovl",
adj_only = FALSE,
abs = function(x) abs_(x),
bal.tab_column_prefix = "OVL", #Also which.stat in love.plot
threshold_range = c(0, 1),
balance_tally_for = "overlapping coefficients",
variable_with_the_greatest = "overlapping coefficient", #also which.stat2 in love.plot
love.plot_xlab = function(...) {
"Overlapping Coefficients"
},
love.plot_add_stars = function(SS.var, variable.names, ...) {
SS.var
},
baseline.xintercept = 0,
threshold.xintercepts = function(threshold, abs) {
c(lower = base::abs(threshold))
},
love.plot_axis_scale = ggplot2::scale_x_continuous,
fun = function(C, treat, weights, s.weights, bin.vars, integrate = FALSE, subset = NULL, ...) {
A <- list(...)
do.call("col_w_ovl", c(list(C, treat = treat, weights = weights, s.weights = s.weights, bin.vars = bin.vars,
subset = subset, integrate = integrate), A))
}
)}
STATS[["correlations"]] <- {list(
type = "cont",
threshold = "r.threshold",
Threshold = "R.Threshold",
disp_stat = "disp.corr",
adj_only = FALSE,
abs = function(x) abs_(x),
bal.tab_column_prefix = "Corr", #Also which.stat in love.plot
threshold_range = c(0, 1),
balance_tally_for = "treatment correlations",
variable_with_the_greatest = "treatment correlation", #also which.stat2 in love.plot
love.plot_xlab = function(...) {
A <- list(...)
if (isTRUE(A$abs)) "Absolute Treatment-Covariate Correlations"
else "Treatment-Covariate Correlations"
},
love.plot_add_stars = function(SS.var, variable.names, ...) {
SS.var
},
baseline.xintercept = 0,
threshold.xintercepts = function(threshold, abs) {
if (abs) c(lower = base::abs(threshold))
else c(lower = -base::abs(threshold), upper = base::abs(threshold))
},
love.plot_axis_scale = ggplot2::scale_x_continuous,
fun = function(C, treat, weights, abs, s.weights, std, s.d.denom, bin.vars, weighted.weights = weights, subset = NULL, ...) {
col_w_cov(C, treat = treat, weights = weights, abs = abs, s.weights = s.weights,
std = std, type = "pearson",
s.d.denom = s.d.denom,
bin.vars = bin.vars, weighted.weights = weighted.weights, na.rm = TRUE,
subset = subset)
}
)}
STATS[["spearman.correlations"]] <- {list(
type = "cont",
threshold = "s.threshold",
Threshold = "S.Threshold",
disp_stat = "disp.spear",
adj_only = FALSE,
abs = function(x) abs_(x),
bal.tab_column_prefix = "S.Corr", #Also which.stat in love.plot
threshold_range = c(0, 1),
balance_tally_for = "treatment Spearman correlations",
variable_with_the_greatest = "treatment Spearman correlation", #also which.stat2 in love.plot
love.plot_xlab = function(...) {
A <- list(...)
if (A$abs) "Absolute Treatment-Covariate Spearman Correlations"
else "Treatment-Covariate Spearman Correlations"
},
love.plot_add_stars = function(SS.var, variable.names, ...) {
SS.var
},
baseline.xintercept = 0,
threshold.xintercepts = function(threshold, abs) {
if (abs) c(lower = base::abs(threshold))
else c(lower = -base::abs(threshold), upper = base::abs(threshold))
},
love.plot_axis_scale = ggplot2::scale_x_continuous,
fun = function(C, treat, weights, abs, s.weights, std, s.d.denom, bin.vars, weighted.weights = weights, subset = NULL, ...) {
col_w_cov(C, treat = treat, weights = weights, abs = abs, s.weights = s.weights,
std = std, type = "spearman",
s.d.denom = s.d.denom,
bin.vars = bin.vars, weighted.weights = weighted.weights, na.rm = TRUE,
subset = subset)
}
)}
STATS[["mean.diffs.target"]] <- {list(
type = "cont",
threshold = "m.threshold",
Threshold = "M.Threshold",
disp_stat = "disp.diff",
adj_only = TRUE,
abs = function(x) abs_(x),
bal.tab_column_prefix = "Diff", #Also which.stat in love.plot
threshold_range = c(0, Inf),
balance_tally_for = "target mean differences",
variable_with_the_greatest = "target mean difference", #also which.stat2 in love.plot
love.plot_xlab = function(...) {
A <- list(...)
binary <- A$binary #attr(x, "print.options")$binary
continuous <- A$continuous #attr(x, "print.options")$continuous
abs <- A$abs
var_type <- A$var_type #B[["type"]]
stars <- A$stars
#All std, no std, some std
if ((binary == "std" || !any(var_type == "Binary")) &&
(continuous == "std" || !any(var_type == "Contin."))) {
xlab.diff <- "Standardized Target Mean Differences"
}
else if ((binary == "raw" || !any(var_type == "Binary")) &&
(continuous == "raw" || !any(var_type == "Contin."))) {
xlab.diff <- "Target Mean Differences"
}
else {
stars <- match_arg(stars, c("none", "std", "raw"))
if (stars == "none") {
xlab.diff <- "Target Mean Differences"
}
else if (stars == "std") {
xlab.diff <- "Target Mean Differences"
}
else if (stars == "raw") {
xlab.diff <- "Standardized Target Mean Differences"
}
}
if (abs) paste("Absolute", xlab.diff) else xlab.diff
},
love.plot_add_stars = function(SS.var, variable.names, ...) {
A <- list(...)
binary <- A$binary #attr(x, "print.options")$binary
continuous <- A$continuous #attr(x, "print.options")$continuous
var_type <- A$var_type #B[["Type"]]
stars <- A$stars
star_char = A$star_char #args$star_char
#All std, no std, some std
if (!((binary == "std" || sum(var_type == "Binary") == 0) &&
(continuous == "std" || sum(var_type != "Binary") == 0))
&&
!((binary == "raw" || sum(var_type == "Binary") == 0) &&
(continuous == "raw" || sum(var_type != "Binary") == 0))) {
stars <- match_arg(stars, c("none", "std", "raw"))
if (stars == "none") {
.wrn("standardized mean differences and raw mean differences are present in the same plot. \nUse the `stars` argument to distinguish between them and appropriately label the x-axis")
}
else {
if (!chk::vld_string(star_char)) star_char <- "*"
vars_to_star <- setNames(rep(FALSE, length(variable.names)), variable.names)
if (stars == "std") {
if (binary == "std") vars_to_star[variable.names[var_type == "Binary"]] <- TRUE
if (continuous == "std") vars_to_star[variable.names[var_type != "Binary"]] <- TRUE
}
else if (stars == "raw") {
if (binary == "raw") vars_to_star[variable.names[var_type == "Binary"]] <- TRUE
if (continuous == "raw") vars_to_star[variable.names[var_type != "Binary"]] <- TRUE
}
new.variable.names <- setNames(variable.names, variable.names)
names(new.variable.names)[vars_to_star[variable.names]] <- paste0(variable.names[vars_to_star[variable.names]], star_char)
SS.var <- do.call(f.recode, c(list(SS.var), new.variable.names))
}
}
SS.var
},
baseline.xintercept = 0,
threshold.xintercepts = function(threshold, abs) {
if (abs) c(lower = base::abs(threshold))
else c(lower = -base::abs(threshold), upper = base::abs(threshold))
},
love.plot_axis_scale = ggplot2::scale_x_continuous,
fun = function(C, treat, weights, std, s.d.denom, abs, s.weights, bin.vars, weighted.weights = weights, subset = NULL, ...) {
n <- nrow(C)
C <- rbind(C, C)
treat <- rep(c(0,1), each = n)
if (is_not_null(weights)) weights <- c(weights, rep(1, n))
if (is_not_null(s.weights)) s.weights <- c(s.weights, s.weights)
if (is_not_null(subset)) subset <- c(subset, subset)
s.d.denom <- "1"
if (is_not_null(weights)) {
col_w_smd(C, treat = treat, weights = weights,
std = std, s.d.denom = s.d.denom,
abs = abs, s.weights = s.weights, bin.vars = bin.vars,
weighted.weights = weighted.weights,
subset = subset)
}
else rep(NA_real_, ncol(C))
}
)}
STATS[["ks.statistics.target"]] <- {list(
type = "cont",
threshold = "ks.threshold",
Threshold = "KS.Threshold",
disp_stat = "disp.ks",
adj_only = TRUE,
abs = function(x) abs_(x),
bal.tab_column_prefix = "KS", #Also which.stat in love.plot
threshold_range = c(0, 1),
balance_tally_for = "target KS statistics",
variable_with_the_greatest = "target KS statistic", #also which.stat2 in love.plot
love.plot_xlab = function(...) {
"Target Kolmogorov-Smirnov Statistics"
},
love.plot_add_stars = function(SS.var, variable.names, ...) {
SS.var
},
baseline.xintercept = 0,
threshold.xintercepts = function(threshold, abs) {
c(lower = base::abs(threshold))
},
love.plot_axis_scale = ggplot2::scale_x_continuous,
fun = function(C, treat, weights, s.weights, bin.vars, subset = NULL, ...) {
A <- list(...)
n <- nrow(C)
C <- rbind(C, C)
treat <- rep(c(0,1), each = n)
if (is_not_null(weights)) weights <- c(weights, rep(1, n))
if (is_not_null(s.weights)) s.weights <- c(s.weights, s.weights)
if (is_not_null(subset)) subset <- c(subset, subset)
do.call("col_w_ks", c(list(C, treat = treat, weights = weights, s.weights = s.weights, bin.vars = bin.vars,
subset = subset), A))
}
)}
all_STATS <- function(type) {
if (missing(type)) return(unique(names(STATS)))
type <- match_arg(type, c("bin", "cont"))
out <- names(STATS)[get_from_STATS("type") == type]
unique(out)
}
|
a5892bc1ac3bc06e4990177197d64f2ec685c5c3
|
eff907031716ec2fe0870c6bca31f35df0ad5b62
|
/analysis_scripts/libraries.R
|
18d9d77c42ebb6a6e0ce45fdcd02f5bef86ee131
|
[] |
no_license
|
rhi-batstone/outdoor_event_survey
|
a7ad06033884bfae564499eb17a11d9a1661f967
|
a62933e5b93c5350f0617a2f4b4a7aae60226ff6
|
refs/heads/master
| 2022-11-18T13:59:48.642755
| 2020-07-15T18:02:07
| 2020-07-15T18:02:07
| 271,045,142
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 221
|
r
|
libraries.R
|
library(tidyverse)
library(janitor)
library(xlsx)
library(tidytext)
library(viridis)
library(leaflet)
library(extrafont)
library(sf)
library(mapview)
library(wordcloud)
library(reshape2)
library(lubridate)
library(knitr)
|
694425b8eaae5fbee2d381b176684dadb9aaee14
|
de5c92cdd473daa28707ca840c97fe5ffd3b4135
|
/script2_0.R
|
f9c4f40e1f1258344ef5a8be9acb17c194d5fdd4
|
[] |
no_license
|
stamnosslin/stamnosslin.github.io
|
4829ed78050ebf91a0877dd7b762ab9703f56c1d
|
d17d11a3aec2fd385fa9d380371be859a9e03bce
|
refs/heads/master
| 2021-01-10T23:23:12.223275
| 2019-11-19T09:06:46
| 2019-11-19T09:06:46
| 69,728,420
| 1
| 0
| null | null | null | null |
ISO-8859-2
|
R
| false
| false
| 657
|
r
|
script2_0.R
|
# Script2. Analyzing data from a psychoacoustic experiment (under construction)
#
# The data is from a listening experiment (ALEXIS 106) involving blind and sighted
# listeners, conducted at the Gösta Ekman Laboratory in 2016. The experiment measured
# auditory thresholds for abilities potentially important for human echolocation.
# The data is stored in two files. The first contain threshold data,
# the second background data (age, sex, ...). Please see the file codebooks.txt
# for details on the variables stored in the data files.
#
# Mats Nilsson (mats.nilsson@psychology.su.se), revised: 2016-12-01
# To be started at REX101 ...
|
695f5f98d28f0bbf356a018ef47cb07e569cb412
|
b694b44e564a59cb034cff1930d8611fc68e39c2
|
/stocks_r.R
|
4b769060d644bde87de179717f8364669a390c5b
|
[] |
no_license
|
DheepthaB/StockMarketAnalysis
|
4479f91cf389320bc3bcc8e6992e9f7b4c1e06dc
|
46b43d955cdbe83e8fc6427917c3950c29e822be
|
refs/heads/master
| 2021-08-08T23:31:19.601213
| 2017-11-11T16:38:08
| 2017-11-11T16:38:08
| 104,391,766
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,686
|
r
|
stocks_r.R
|
r=read.csv("WIKI-AAPL 6M.csv")
sm50=SMA(r[c("Close")],n=50)
em10=EMA(r[c("Close")],n=50)
r$Close[50:60]
sm50[70:80]
em10[70:80]
plot(r$Close[70:90],col="blue",type="l",xlab="Days",ylab="Closing prices and SMA50 values")
lines(sm50[70:90],col="red")
legend( x="bottomleft",legend=c("Buy","sell"),col=c("red","red"), lwd=1, lty=c(NA,NA), pch=c(21,25), merge=FALSE )
legend(12,2530,c("BUY","SELL"),lty=c(1,1),lwd=c(2.5,2.5),col=c("blue","red"))
mean(r$Close)
sd(r$Close)
min(r$Close)
max(r$Close)
points(3,2495.85,col="red",pch=21,cex=2)
points(9,2602.60,col="red",pch=25,cex=2)
points(2,98.46,col="red",pch=21,cex=2)
points(6,99.62,col="red",pch=25,cex=2)
points(10,94.2,col="red",pch=21)
head(r)
colnames(r)
nrow(r)
install.packages("TTR")
library(TTR)
sm10=SMA(r[c("Close")],n=10)
View(r)
r$Date[60:80]
r$Close[50:70]
sm50[50:70]
sm100=SMA(r[c("Close")],n=100)
lines(r$Close,col="blue")
lines(sm10,col="blue")
lines(sm100,col="green")
lines(em10,col="green")
1289.45 -1291.522
1289.35 -1292.656
bmm=BBands(r[c("Close")],sd=2)
bbEMA = BBands(r[c("CLose")], sd=2, n=14, mavg=EMA)
rsi14 = RSI(r[c("Close")], n=14)
macd = MACD(r[c("Close")], nFast=12, nSlow=26, nSig=9, maType=SMA)
allData = data.frame(r,sm10,emm,bmm,rsi14,macd)
colnames(allData)
ff=data.frame(r,bmm)
plot(ff$Date,allData$Close)
plot(ff$Close, col = "red")
lines(ff$up, col = "black")
lines(ff$dn, col = "green")
lines(ff$mavg, col = "blue")
#########333
install.packages("quantmod")
library(quantmod)
getSymbols("APPL") #Goldman OHLC from yahoo
#[1] "GS"
chartSeries(GS, TA=NULL)
chartSeries(GS, theme="white",TA="addVo();addBBands();addCCI()")
|
188af7e1a064d7e652729d586d1e8b100f4bb665
|
0d2eea91a487c186c05112bc77bba162833562b0
|
/NAMIBIA_trawl_data_analysis.R
|
7f6df2ddf500147fed7d763848231a423b73eca5
|
[] |
no_license
|
steffenoppel/Namibia
|
c63ead223969c6c2577f3631b6f42d3a10a38c76
|
b8560dbc6b2653c7cb72b08ba2e339bd43d131b9
|
refs/heads/master
| 2021-07-02T18:45:52.168227
| 2021-01-08T11:00:57
| 2021-01-08T11:00:57
| 209,339,239
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 31,582
|
r
|
NAMIBIA_trawl_data_analysis.R
|
### ##########################################
###
### Namibia trawl bycatch analysis
### DATA PREPARATION AND ANALYSIS
### based on scripts by Tim Reid, March 2018
###
### ##########################################
### cleaned up by Steffen Oppel 23 August 2019
### only retained data cleaning/preparation - data anlysis in separate script
## each line is the interaction of one species during a given trawl set - hence the effort and interactions need to be SUMMED over the different lines of data
## FILE RECOVERED ON 13 Sept 2019 after request from Nina da Rocha
## catastrophic file overwrite occurred on 27 Aug, and some portion of the script may have been lost
## REVISION included new data and ensured output in MS matches with these data and results
## ADDED BSL operation - some BSLs were deployed incorrectly
## MINOR ADJUSTMENTS BY Nina da Rocha on 18th Sep 2019
## Data from experimental trials 2009-2010 excluded
## re-run by Steffen Oppel on 19 Sept 2019 with revised dataset
## REVISION of manuscript on 24 March 2020 - included separate analysis for setting, trawling, and haul
## added observer coverage proportion
##############################################################
#### load ALL NECESSARY libraries
##############################################################
library(boot)
library(tidyverse)
library(readxl)
library(lubridate)
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(janitor)
filter<-dplyr::filter
select<-dplyr::select
##############################################################
#### LOAD ALL NECESSARY DATASETS
##############################################################
setwd("C:\\STEFFEN\\RSPB\\Marine\\Bycatch\\Namibia\\Data")
# read in data
# first 19 columns are data, others are reference/lookup
# Trawls with torilines in 2009-2010 are experimental and not representative of standard fishing practices
dat1 <- read_excel("2009-2010 demersal trawl data Namibia new.xlsx",
sheet="6_Interactions")[,1:19]
dat1<- dat1 %>% filter(Tori_line_deployed != "Yes")
dat2 <- read_excel("Trawl data Aug 2019.xlsx",
sheet="6_Interactions")[,1:19]
dat2<- dat2 %>% filter(OBSTRIP_ID != "BLEB2")
names(dat2)
dat<-rbind(dat1,dat2)
##############################################################
#### REMOVE ALL NOCTURNAL TRAWLS WHERE OBSERVATION EFFORT WAS ZERO
##############################################################
NIGHTTRAWLS<-dat %>% mutate(Minutes_observed=minute(Minutes_observed)) %>%
group_by(OBSTRIP_ID,OBSTRAWL_ID) %>%
summarise(obs_eff=sum(Minutes_observed))%>%
filter(obs_eff==0)
dim(dat)
dat <- dat %>% filter(!OBSTRAWL_ID %in% NIGHTTRAWLS$OBSTRAWL_ID) %>%
filter(!is.na(Tori_line_deployed))
dim(dat)
### INFO ON CORRECT DEPLOYMENT OF BSL
BSL <- read_excel("Trawl data Aug 2019.xlsx",
sheet="3_Trawl")
BSL<- BSL %>% select(OBSTRIP_ID,OBSTRAWL_ID,BSL_Deployment) %>%
mutate(BSLcorrect=if_else(BSL_Deployment=="Immediately After Doors","Yes","No")) %>%
filter(!OBSTRAWL_ID %in% NIGHTTRAWLS$OBSTRAWL_ID)
##############################################################
#### MANIPULATE THE DATA
##############################################################
# remove blank rows
dat <- subset(dat, OBSPERIOD_ID != "_")
head(dat)
names(dat)
# convert minutes observed from POSIXct to numeric & express in hours
dat$t.lub <- ymd_hms(dat$Minutes_observed)
dat$h.lub <- lubridate::hour(dat$t.lub) + lubridate::minute(dat$t.lub)/60
# assimilate descriptions of Outcome
dat$Outcome[dat$Outcome=="possibly dead"]<- "Dead"
dat$Outcome[dat$Outcome=="Possibly Dead"]<- "Dead"
dat$Outcome[dat$Outcome=="Possibly dead"]<- "Dead"
dat$Outcome[dat$Outcome=="Not injured"]<- "Unharmed"
dat$Outcome[dat$Outcome=="N/A"]<- "Unknown"
dat$Outcome[dat$Outcome=="n/a"]<- "Unknown"
dat$Outcome[is.na(dat$Outcome)]<- "Unknown"
# define seasons
dat$Month <- lubridate::month(dat$Date_Start_Observation)
dat$Season <- "Summer"
dat$Season[dat$Month>3 & dat$Month<11]<- "Winter"
#define presence of regulation
dat$REG<-ifelse(lubridate::year(dat$Date_Start_Observation)>2015,1,0)
# add information on correct BSL deployment
dat<- dat %>% left_join(BSL, by=c('OBSTRIP_ID','OBSTRAWL_ID')) %>%
mutate(BSLcorrect=if_else(is.na(BSLcorrect),"No",BSLcorrect))
dim(dat)
## assess whether it is worth to split outcomes and interactions or whether too much data are missing:
unique(dat$Outcome)
## The n/a are associted with observations periods in which no interactions took place
## 910 entries listed as interaction species NONE with number of inteaction = 0
dat$Species[dat$Species=="none"]<- "NONE"
dat$Species[dat$Species=="None"]<- "NONE"
table (dat$Species, dat$Outcome)
table(dat$Outcome)
## All but 5 observed interactions have an associated outcome
sum(dat$Number_interactions)
5/854 ## Under 1 % of observed interactions have an unkown outcome
##############################################################
#### SUMMARISE THE DATA
##############################################################
# SUMMARISE INTERACTION DATA PER TRAWL
obs.effort <- dat %>%
group_by(OBSTRAWL_ID,OBSPERIOD_ID,
Offal_during_observation,
Tori_line_deployed,BSLcorrect,
Vessel_Activity,
Date_Start_Observation,REG) %>%
summarize(obs.effort = sum(h.lub), tot.interactions=sum(Number_interactions)) %>%
filter(obs.effort>0) %>%
mutate(Interaction_rate = tot.interactions / obs.effort)
tail(obs.effort)
# SUMMARISE ALL FATAL INTERACTIONS
dat.mort <- dat %>%
filter(Outcome %in% c("Dead","Possibly Dead")) %>%
group_by(OBSPERIOD_ID,Species) %>%
summarize(fatal.interactions = sum(Number_interactions))
dat.mort
# SUMMARISE ALL HEAVY INTERACTIONS (or heavy and medium)
dat.heavy <- dat %>%
#filter(Interaction_type=="Heavy") %>%
filter(Interaction_type %in% c("Heavy","Medium")) %>% ### alternatively use %in% c("Heavy","Medium")
group_by(OBSPERIOD_ID) %>%
summarize(heavy.interactions = sum(Number_interactions))
dat.heavy
sum(dat.heavy$heavy.interactions)
# COMBINE DATA TO SUMMARISE BY SETTING OPERATION ETC
NAM.trawl<- left_join(obs.effort,dat.mort, by="OBSPERIOD_ID", fill=0) %>%
left_join(dat.heavy, by="OBSPERIOD_ID", fill=0) %>%
filter(!is.na(Tori_line_deployed)) %>%
mutate(fatal.interactions=ifelse(is.na(fatal.interactions),0,fatal.interactions)) %>%
mutate(fatal.rate = fatal.interactions/obs.effort) %>%
mutate(heavy.interactions=ifelse(is.na(heavy.interactions),0,heavy.interactions)) %>%
mutate(heavy.rate = heavy.interactions/obs.effort)
dim(NAM.trawl)
##fwrite(NAM.trawl,"Namibia.trawl.interactions.csv")
# SUMMARISE ALL FATAL INTERACTIONS BY SPECIES FOR RESULTS
dat %>%
filter(Outcome %in% c("Dead","Possibly Dead")) %>%
group_by(Species) %>%
summarize(fatal.interactions = sum(Number_interactions))
# SUMMARISE ALL HEAVY INTERACTIONS (or heavy and medium)
deathtally<-dat %>%
#filter(Interaction_type=="Heavy") %>%
filter(Interaction_type %in% c("Heavy","Medium")) %>% ### alternatively use %in% c("Heavy","Medium")
group_by(REG,Species) %>%
summarize(heavy.interactions = sum(Number_interactions)) %>%
mutate(prop=(heavy.interactions/sum(heavy.interactions))*100)
fwrite(deathtally,"Namibia.trawl.heavy.interactions_bySpecies.csv")
##############################################################
#### BASIC SUMMARY OF DATA FOR REPORTING IN MANUSCRIPT
##############################################################
# DEPLOYMENT OF BSL AFTER REGULATION
obs.effort %>% #filter(REG=="YES") %>%
group_by(Tori_line_deployed) %>%
summarise(n=count(OBSTRAWL_ID))
## Compliance levels in the post-reg period
unique(obs.effort$Tori_line_deployed)
table(obs.effort$Tori_line_deployed)
## No BSL deployment data available from 44 trawls
table(obs.effort$Tori_line_deployed, obs.effort$REG)
254/(254+23)
table(obs.effort$BSLcorrect[obs.effort$Tori_line_deployed=='Yes'])
132/(132+122) #when used, BSLs deployed correctly on 52% of trawls
table(obs.effort$BSLcorrect[obs.effort$REG==1])
132/(132+145) #BSLs deployed correctly on 48% of post-regulation trawls
# BASIC SUMMARY OF DATA
rawsummary<-NAM.trawl %>% mutate(count=1) %>% group_by(REG,Tori_line_deployed,Vessel_Activity) %>%
summarise(nsets=sum(count),mean.tot.rate=mean(Interaction_rate), sd.tot.rate=sd(Interaction_rate),mean.fatal.rate=mean(fatal.rate), sd.fatal.rate=sd(fatal.rate),mean.heavy.rate=mean(heavy.rate), sd.heavy.rate=sd(heavy.rate))
rawsummary
fwrite(rawsummary,"Namibia.trawl.interactions_summary.csv")
## n trips and obs effort
dat %>%
filter(!is.na(Tori_line_deployed)) %>%
group_by(REG) %>%
summarise(ntrips=length(unique(OBSTRIP_ID)),nsets=length(unique(OBSTRAWL_ID)),eff=sum(h.lub,na.rm=T))
### proportion of heavy and fatal interactions
dat %>% filter(Outcome!="Unknown") %>%
mutate(count=1) %>%
group_by(Outcome) %>%
summarise(n=sum(count))
14/(14+439)
## reviewer requested proportion of fatal in heavy interactions
dat %>% filter(Outcome!="Unknown") %>%
filter(Interaction_type %in% c("Heavy","Medium")) %>%
mutate(count=1) %>%
group_by(Outcome) %>%
summarise(n=sum(count))
14/(14+175)
dat %>% filter(Interaction_type!="n/a") %>%
mutate(count=1) %>%
group_by(Interaction_type) %>%
summarise(n=sum(count))
(75+115)/(75+268+115) ## proportion of heavy and medium interactions
268/(75+268+115) ## proportion of light interactions
##############################################################
#### PRODUCE BOOTSTRAPPED CONFIDENCE INTERVALS FOR TORI LINES
##############################################################
## stratify the samples
NAM.trawl<- NAM.trawl %>%
mutate(group=paste(Vessel_Activity,Tori_line_deployed, sep="_"))
NAM.trawl$stratum1<-match(NAM.trawl$Vessel_Activity,unique(NAM.trawl$Vessel_Activity))
head(NAM.trawl)
samplemean <- function(x, d) {
return(mean(x[d]))
}
#### SUMMARISE MEAN AND CI FOR PRE-REG SAMPLES #######
prereg <- boot(NAM.trawl$Interaction_rate[NAM.trawl$Tori_line_deployed=="No"], samplemean, R=10000, strata=NAM.trawl$stratum1[NAM.trawl$Tori_line_deployed=="No"])
prereg.ci<-boot.ci(prereg,conf=0.95)
prereg.fat <- boot(NAM.trawl$fatal.rate[NAM.trawl$Tori_line_deployed=="No"], samplemean, R=10000, strata=NAM.trawl$stratum1[NAM.trawl$Tori_line_deployed=="No"])
prereg.fat.ci<-boot.ci(prereg.fat,conf=0.95)
prereg.heavy <- boot(NAM.trawl$heavy.rate[NAM.trawl$Tori_line_deployed=="No"], samplemean, R=10000, strata=NAM.trawl$stratum1[NAM.trawl$Tori_line_deployed=="No"])
prereg.heavy.ci<-boot.ci(prereg.heavy,conf=0.95)
#### SUMMARISE MEAN AND CI FOR POST-REG SAMPLES #######
postreg <- boot(NAM.trawl$Interaction_rate[NAM.trawl$Tori_line_deployed=="Yes"], samplemean, R=10000, strata=NAM.trawl$stratum1[NAM.trawl$Tori_line_deployed=="Yes"])
postreg.ci<-boot.ci(postreg,conf=0.95)
postreg.fat <- boot(NAM.trawl$fatal.rate[NAM.trawl$Tori_line_deployed=="Yes"], samplemean, R=10000, strata=NAM.trawl$stratum1[NAM.trawl$Tori_line_deployed=="Yes"])
postreg.fat.ci<-boot.ci(postreg.fat,conf=0.95)
postreg.heavy <- boot(NAM.trawl$heavy.rate[NAM.trawl$Tori_line_deployed=="Yes"], samplemean, R=10000, strata=NAM.trawl$stratum1[NAM.trawl$Tori_line_deployed=="Yes"])
postreg.heavy.ci<-boot.ci(postreg.heavy,conf=0.95)
#### COMPILE OUTPUT INTO A SINGLE TABLE #######
## >80% reduction of seabird-cable interactions when tori lines are deployed
bootsummary<-NAM.trawl %>% mutate(count=1) %>% group_by(Tori_line_deployed) %>%
summarise(nsets=sum(count))
bootsummary$boot.mean<-c(prereg$t0,postreg$t0)
bootsummary$boot.lcl<-c(prereg.ci$normal[1,2],postreg.ci$normal[1,2])
bootsummary$boot.ucl<-c(prereg.ci$normal[1,3],postreg.ci$normal[1,3])
bootsummary$boot.mean.fatal<-c(prereg.fat$t0,postreg.fat$t0)
bootsummary$boot.lcl.fatal<-c(prereg.fat.ci$percent[1,4],postreg.fat.ci$percent[1,4])
bootsummary$boot.ucl.fatal<-c(prereg.fat.ci$percent[1,5],postreg.fat.ci$percent[1,5])
bootsummary$boot.mean.heavy<-c(prereg.heavy$t0,postreg.heavy$t0)
bootsummary$boot.lcl.heavy<-c(prereg.heavy.ci$percent[1,4],postreg.heavy.ci$percent[1,4])
bootsummary$boot.ucl.heavy<-c(prereg.heavy.ci$percent[1,5],postreg.heavy.ci$percent[1,5])
#### CALCULATE THE CHANGE IN INTERACTION RATE ####
percchange<-function(x){((x[1]-x[2])/x[1])*100}
bootsummary<- as.data.frame(bootsummary) %>% adorn_totals("row")
bootsummary[3,3:11]<-apply(as.matrix(bootsummary[,3:11]),2,percchange)
bootsummary[3,1]<-"CHANGE(%)"
bootsummary
fwrite(bootsummary,"Namibia_trawl_interactions_BSL_comparison.csv")
#### PLOT OUTPUT ####
ggplot(bootsummary[1:2,], aes(x=Tori_line_deployed, y=boot.mean.heavy)) +
geom_point(size=2) +
geom_errorbar(aes(ymin=boot.lcl.heavy, ymax=boot.ucl.heavy), width=.1)+
## format axis ticks
scale_y_continuous(name="Number of medium and heavy interactions/hour", limits=c(0,1.5), breaks=seq(0,1.5,0.3), labels=seq(0,1.5,0.3))+
xlab("Tori lines deployed") +
## beautification of the axes
theme(panel.background=element_rect(fill="white", colour="black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.text.y=element_text(size=18, color="black"),
axis.text.x=element_text(size=18, color="black"),
axis.title=element_text(size=18),
strip.text.x=element_text(size=18, color="black"),
strip.text.y=element_text(size=18, color="black"),
axis.title.y=element_text(margin=margin(0,20,0,0)),
strip.background=element_rect(fill="white", colour="black"))
# ##############################################################
# #### PRODUCE BOOTSTRAPPED CONFIDENCE INTERVALS FOR CORRECTLY DEPLOYED TORI LINES
# ##############################################################
# THIS ANALYSIS IS NOT PARTICULARLY CONVINCING OR STRONG BECAUSE MANY LINES WERE UNOBSERVED AND HAVE NO INFO ABOUT CORRECTNESS
###For some reason this isn't calculating the right number of sets when I run it??
## NOTE that this should only be looking at post-regulation data in any case, preferably compaing no BSL use to correct BSL use
## updated on 18 Sept 2019 by Steffen after Nina's request
## no fatal interactions post-reg, so that aspect has been removed
#
# ## stratify the samples
NAM.correct<- NAM.trawl %>% #filter(REG==1) %>%
filter(!(Tori_line_deployed=="Yes" & BSLcorrect=="No")) %>%
mutate(group=paste(Vessel_Activity,BSLcorrect, sep="_"))
NAM.correct$stratum1<-match(NAM.correct$Vessel_Activity,unique(NAM.correct$Vessel_Activity))
head(NAM.correct)
## this should be the same but isn't - there are 4 trawls where no tori line was 'correctly' deployed!!??
table(NAM.correct$Tori_line_deployed)
table(NAM.correct$BSLcorrect)
NAM.correct %>% filter(Tori_line_deployed=="No") %>% filter(BSLcorrect=="Yes")
#### SUMMARISE MEAN AND CI FOR SAMPLES WITHOUT BSL #######
prereg <- boot(NAM.correct$Interaction_rate[NAM.correct$BSLcorrect=="No"], samplemean, R=10000, strata=NAM.correct$stratum1[NAM.correct$BSLcorrect=="No"])
prereg.ci<-boot.ci(prereg,conf=0.95)
prereg.heavy <- boot(NAM.correct$heavy.rate[NAM.correct$BSLcorrect=="No"], samplemean, R=10000, strata=NAM.correct$stratum1[NAM.correct$BSLcorrect=="No"])
prereg.heavy.ci<-boot.ci(prereg.heavy,conf=0.95)
#### SUMMARISE MEAN AND CI FOR SAMPLES WITH CORRECTLY DEPLOYED BSL #######
postreg <- boot(NAM.correct$Interaction_rate[NAM.correct$BSLcorrect=="Yes"], samplemean, R=10000, strata=NAM.correct$stratum1[NAM.correct$BSLcorrect=="Yes"])
postreg.ci<-boot.ci(postreg,conf=0.95)
postreg.heavy <- boot(NAM.correct$heavy.rate[NAM.correct$BSLcorrect=="Yes"], samplemean, R=10000, strata=NAM.correct$stratum1[NAM.correct$BSLcorrect=="Yes"])
postreg.heavy.ci<-boot.ci(postreg.heavy,conf=0.95)
#### COMPILE OUTPUT INTO A SINGLE TABLE #######
bootsummary2<-NAM.correct %>% mutate(count=1) %>% group_by(BSLcorrect) %>%
summarise(nsets=sum(count))
bootsummary2$boot.mean<-c(prereg$t0,postreg$t0)
bootsummary2$boot.lcl<-c(prereg.ci$normal[1,2],postreg.ci$normal[1,2])
bootsummary2$boot.ucl<-c(prereg.ci$normal[1,3],postreg.ci$normal[1,3])
bootsummary2$boot.mean.heavy<-c(prereg.heavy$t0,postreg.heavy$t0)
bootsummary2$boot.lcl.heavy<-c(prereg.heavy.ci$percent[1,4],postreg.heavy.ci$percent[1,4])
bootsummary2$boot.ucl.heavy<-c(prereg.heavy.ci$percent[1,5],postreg.heavy.ci$percent[1,5])
bootsummary2
#### CALCULATE THE CHANGE IN INTERACTION RATE ####
percchange<-function(x){((x[1]-x[2])/x[1])*100}
bootsummary<- as.data.frame(bootsummary) %>% adorn_totals("row")
bootsummary2[3,3:8]<-apply(as.matrix(bootsummary2[,3:8]),2,percchange)
bootsummary2[3,1]<-"CHANGE(%)"
bootsummary2
fwrite(bootsummary2,"Namibia_trawl_interactions_correctBSL_comparison.csv")
#### PLOT OUTPUT ####
ggplot(bootsummary2[1:2,], aes(x=BSLcorrect, y=boot.mean)) +
geom_point(size=2) +
geom_errorbar(aes(ymin=boot.lcl, ymax=boot.ucl), width=.1)+
## format axis ticks
scale_y_continuous(name="Interactions per hour", limits=c(0,5), breaks=seq(0,5,1), labels=seq(0,5,1))+
xlab("Tori lines correctly deployed") +
## beautification of the axes
theme(panel.background=element_rect(fill="white", colour="black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.text.y=element_text(size=18, color="black"),
axis.text.x=element_text(size=18, color="black"),
axis.title=element_text(size=18),
strip.text.x=element_text(size=18, color="black"),
strip.text.y=element_text(size=18, color="black"),
axis.title.y=element_text(margin=margin(0,20,0,0)),
strip.background=element_rect(fill="white", colour="black"))
##############################################################
#### PRODUCE BOOTSTRAPPED CONFIDENCE INTERVALS FOR REGULATION
##############################################################
## stratify the samples
NAM.trawl<- NAM.trawl %>%
mutate(group=paste(Vessel_Activity,REG, sep="_"))
NAM.trawl$stratum1<-match(NAM.trawl$Vessel_Activity,unique(NAM.trawl$Vessel_Activity))
head(NAM.trawl)
#### SUMMARISE MEAN AND CI FOR PRE-REG SAMPLES #######
prereg <- boot(NAM.trawl$Interaction_rate[NAM.trawl$REG==0], samplemean, R=10000, strata=NAM.trawl$stratum1[NAM.trawl$REG==0])
prereg.ci<-boot.ci(prereg,conf=0.95)
prereg.fat <- boot(NAM.trawl$fatal.rate[NAM.trawl$REG==0], samplemean, R=10000, strata=NAM.trawl$stratum1[NAM.trawl$REG==0])
prereg.fat.ci<-boot.ci(prereg.fat,conf=0.95)
prereg.heavy <- boot(NAM.trawl$heavy.rate[NAM.trawl$REG==0], samplemean, R=10000, strata=NAM.trawl$stratum1[NAM.trawl$REG==0])
prereg.heavy.ci<-boot.ci(prereg.heavy,conf=0.95)
#### SUMMARISE MEAN AND CI FOR POST-REG SAMPLES #######
postreg <- boot(NAM.trawl$Interaction_rate[NAM.trawl$REG==1], samplemean, R=10000, strata=NAM.trawl$stratum1[NAM.trawl$REG==1])
postreg.ci<-boot.ci(postreg,conf=0.95)
postreg.fat <- boot(NAM.trawl$fatal.rate[NAM.trawl$REG==1], samplemean, R=10000, strata=NAM.trawl$stratum1[NAM.trawl$REG==1])
postreg.fat.ci<-boot.ci(postreg.fat,conf=0.95)
postreg.heavy <- boot(NAM.trawl$heavy.rate[NAM.trawl$REG==1], samplemean, R=10000, strata=NAM.trawl$stratum1[NAM.trawl$REG==1])
postreg.heavy.ci<-boot.ci(postreg.heavy,conf=0.95)
#### COMPILE OUTPUT INTO A SINGLE TABLE #######
## >80% reduction of seabird-cable interactions when tori lines are deployed
bootsummary<-NAM.trawl %>% mutate(count=1) %>% group_by(REG) %>%
summarise(nsets=sum(count))
bootsummary$boot.mean<-c(prereg$t0,postreg$t0)
bootsummary$boot.lcl<-c(prereg.ci$normal[1,2],postreg.ci$normal[1,2])
bootsummary$boot.ucl<-c(prereg.ci$normal[1,3],postreg.ci$normal[1,3])
bootsummary$boot.mean.fatal<-c(prereg.fat$t0,postreg.fat$t0)
bootsummary$boot.lcl.fatal<-c(prereg.fat.ci$percent[1,4],postreg.fat.ci$percent[1,4])
bootsummary$boot.ucl.fatal<-c(prereg.fat.ci$percent[1,5],postreg.fat.ci$percent[1,5])
bootsummary$boot.mean.heavy<-c(prereg.heavy$t0,postreg.heavy$t0)
bootsummary$boot.lcl.heavy<-c(prereg.heavy.ci$percent[1,4],postreg.heavy.ci$percent[1,4])
bootsummary$boot.ucl.heavy<-c(prereg.heavy.ci$percent[1,5],postreg.heavy.ci$percent[1,5])
#### CALCULATE THE CHANGE IN INTERACTION RATE ####
percchange<-function(x){((x[1]-x[2])/x[1])*100}
bootsummary<- as.data.frame(bootsummary) %>% adorn_totals("row")
bootsummary[3,3:11]<-apply(as.matrix(bootsummary[,3:11]),2,percchange)
bootsummary[3,1]<-"CHANGE(%)"
bootsummary
fwrite(bootsummary,"Namibia_trawl_interactions_REG_comparison.csv")
#### PLOT OUTPUT ####
ggplot(bootsummary[1:2,], aes(x=REG, y=boot.mean)) +
geom_point(size=2) +
geom_errorbar(aes(ymin=boot.lcl, ymax=boot.ucl), width=.1)+
## format axis ticks
scale_y_continuous(name="N seabird-cable interactions per hour", limits=c(0,8), breaks=seq(0,8,2), labels=seq(0,8,2))+
xlab("Fisheries regulation in effect") +
## beautification of the axes
theme(panel.background=element_rect(fill="white", colour="black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.text.y=element_text(size=18, color="black"),
axis.text.x=element_text(size=18, color="black"),
axis.title=element_text(size=18),
strip.text.x=element_text(size=18, color="black"),
strip.text.y=element_text(size=18, color="black"),
axis.title.y=element_text(margin=margin(0,20,0,0)),
strip.background=element_rect(fill="white", colour="black"))
##############################################################
#### EXTRAPOLATION OF FLEET-WIDE MORTALITY
##############################################################
## read in fleet-wide effort data
##cannot read in these files using this script for some reason
FReff <- read_excel("NAM_fleet_effort_freeze_trawl.xlsx",
sheet="ALL")
head(FReff)
WETeff <- read_excel("NAM_fleet_effort_wetfish_trawl.xlsx",
sheet="ALL")
head(WETeff)
## manipulate and summarise fleet-wide seabird mortality based on fatal interactions
FReffsummary<- FReff %>% mutate(Year=year(DATE)) %>%
rename(effort=`DURATION (HOURS)`) %>%
mutate(REG=if_else(Year>2015,1,0)) %>%
group_by(REG,Year) %>%
summarise(tot_effort=sum(effort,na.rm=T)) %>%
mutate(bycatch=tot_effort*bootsummary$boot.mean.fatal[match(REG,bootsummary$REG)],
bycatch.lcl=tot_effort*bootsummary$boot.lcl.fatal[match(REG,bootsummary$REG)],
bycatch.ucl=tot_effort*bootsummary$boot.ucl.fatal[match(REG,bootsummary$REG)])
WETeffsummary<- WETeff %>% mutate(Year=year(DATE)) %>%
rename(effort=`DURATION(HOURS)`) %>%
mutate(REG=if_else(Year>2015,1,0)) %>%
group_by(REG,Year) %>%
summarise(tot_effort=sum(effort,na.rm=T)) %>%
mutate(bycatch=tot_effort*bootsummary$boot.mean.fatal[match(REG,bootsummary$REG)],
bycatch.lcl=tot_effort*bootsummary$boot.lcl.fatal[match(REG,bootsummary$REG)],
bycatch.ucl=tot_effort*bootsummary$boot.ucl.fatal[match(REG,bootsummary$REG)])
fwrite(FReffsummary,"Namibia_freeze_trawl_extrapolations.csv")
fwrite(WETeffsummary,"Namibia_wetfish_trawl_extrapolations.csv")
## manipulate and summarise fleet-wide seabird mortality based on heavy interactions
FReff %>% mutate(Year=year(DATE)) %>%
rename(effort=`DURATION (HOURS)`) %>%
mutate(REG=if_else(Year>2015,1,0)) %>%
group_by(REG,Year) %>%
summarise(tot_effort=sum(effort,na.rm=T)) %>%
mutate(bycatch=tot_effort*bootsummary$boot.mean.heavy[match(REG,bootsummary$REG)],
bycatch.lcl=tot_effort*bootsummary$boot.lcl.heavy[match(REG,bootsummary$REG)],
bycatch.ucl=tot_effort*bootsummary$boot.ucl.heavy[match(REG,bootsummary$REG)])
WETeff %>% mutate(Year=year(DATE)) %>%
rename(effort=`DURATION(HOURS)`) %>%
mutate(REG=if_else(Year>2015,1,0)) %>%
group_by(REG,Year) %>%
summarise(tot_effort=sum(effort,na.rm=T)) %>%
mutate(bycatch=tot_effort*bootsummary$boot.mean.heavy[match(REG,bootsummary$REG)],
bycatch.lcl=tot_effort*bootsummary$boot.lcl.heavy[match(REG,bootsummary$REG)],
bycatch.ucl=tot_effort*bootsummary$boot.ucl.heavy[match(REG,bootsummary$REG)])
##############################################################
#### CALCULATION OF OBSERVER COVERAGE
##############################################################
### summarise total observation hours ###
EFFORT<-obs.effort %>% mutate(Year=year(Date_Start_Observation)) %>%
group_by(Year,REG) %>%
summarize(obs.effort = sum(obs.effort))
### summarise total fishing hours ###
WETeff %>% mutate(Year=year(DATE)) %>%
rename(effort=`DURATION(HOURS)`) %>%
mutate(REG=if_else(Year>2015,1,0)) %>%
group_by(REG,Year) %>%
summarise(tot_effort=sum(effort,na.rm=T)) %>%
inner_join(EFFORT, by=c("Year","REG")) %>%
ungroup() %>%
group_by(REG) %>%
summarise(tot_effort=sum(tot_effort), obs.effort=sum(obs.effort)) %>%
mutate(obs_rate=(obs.effort/tot_effort)*100)
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## REQUESTED UPON REVISION - ADDED ON 24 March 2020
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## PHASE-SPECIFIC COMPARISON
##############################################################
#### PRODUCE BOOTSTRAPPED CONFIDENCE INTERVALS FOR REGULATION FOR SETTING
##############################################################
## subset the data
NAM.set<- NAM.trawl %>% #filter(!(REG==0 & Tori_line_deployed=="Yes")) %>% ### Nina requested that on 31 May for rebuttal letter
filter(Vessel_Activity=="Set")
#### SUMMARISE MEAN AND CI FOR PRE-REG SAMPLES #######
prereg.heavy <- boot(NAM.set$heavy.rate[NAM.set$REG==0], samplemean, R=10000)
prereg.heavy.ci<-boot.ci(prereg.heavy,conf=0.95)
#### SUMMARISE MEAN AND CI FOR POST-REG SAMPLES #######
postreg.heavy <- boot(NAM.set$heavy.rate[NAM.set$REG==1], samplemean, R=10000)
postreg.heavy.ci<-boot.ci(postreg.heavy,conf=0.95)
#### COMPILE OUTPUT INTO A SINGLE TABLE #######
## >80% reduction of seabird-cable interactions when tori lines are deployed
bootsummary<-NAM.set %>% mutate(count=1) %>% group_by(REG) %>%
summarise(nsets=sum(count))
bootsummary$boot.mean.heavy<-c(prereg.heavy$t0,postreg.heavy$t0)
bootsummary$boot.lcl.heavy<-c(prereg.heavy.ci$percent[1,4],postreg.heavy.ci$percent[1,4])
bootsummary$boot.ucl.heavy<-c(prereg.heavy.ci$percent[1,5],postreg.heavy.ci$percent[1,5])
#### CALCULATE THE CHANGE IN INTERACTION RATE ####
percchange<-function(x){((x[1]-x[2])/x[1])*100}
bootsummary<- as.data.frame(bootsummary) %>% adorn_totals("row")
bootsummary[3,3:5]<-apply(as.matrix(bootsummary[,3:5]),2,percchange)
bootsummary[3,1]<-"CHANGE(%)"
bootsummary
##############################################################
#### PRODUCE BOOTSTRAPPED CONFIDENCE INTERVALS FOR REGULATION FOR TRAWLING
##############################################################
## subset the data
NAM.trawling<- NAM.trawl %>%
filter(Vessel_Activity=="Trawl")
#### SUMMARISE MEAN AND CI FOR PRE-REG SAMPLES #######
prereg.heavy <- boot(NAM.trawling$heavy.rate[NAM.trawling$REG==0], samplemean, R=10000)
prereg.heavy.ci<-boot.ci(prereg.heavy,conf=0.95)
#### SUMMARISE MEAN AND CI FOR POST-REG SAMPLES #######
postreg.heavy <- boot(NAM.trawling$heavy.rate[NAM.trawling$REG==1], samplemean, R=10000)
postreg.heavy.ci<-boot.ci(postreg.heavy,conf=0.95)
#### COMPILE OUTPUT INTO A SINGLE TABLE #######
## >80% reduction of seabird-cable interactions when tori lines are deployed
bootsummary<-NAM.trawling %>% mutate(count=1) %>% group_by(REG) %>%
summarise(nsets=sum(count))
bootsummary$boot.mean.heavy<-c(prereg.heavy$t0,postreg.heavy$t0)
bootsummary$boot.lcl.heavy<-c(prereg.heavy.ci$percent[1,4],postreg.heavy.ci$percent[1,4])
bootsummary$boot.ucl.heavy<-c(prereg.heavy.ci$percent[1,5],postreg.heavy.ci$percent[1,5])
#### CALCULATE THE CHANGE IN INTERACTION RATE ####
percchange<-function(x){((x[1]-x[2])/x[1])*100}
bootsummary<- as.data.frame(bootsummary) %>% adorn_totals("row")
bootsummary[3,3:5]<-apply(as.matrix(bootsummary[,3:5]),2,percchange)
bootsummary[3,1]<-"CHANGE(%)"
bootsummary
##############################################################
#### PRODUCE BOOTSTRAPPED CONFIDENCE INTERVALS FOR REGULATION FOR HAULING
##############################################################
## subset the data
NAM.haul<- NAM.trawl %>%
filter(Vessel_Activity=="Haul")
#### SUMMARISE MEAN AND CI FOR PRE-REG SAMPLES #######
prereg.heavy <- boot(NAM.haul$heavy.rate[NAM.haul$REG==0], samplemean, R=10000)
prereg.heavy.ci<-boot.ci(prereg.heavy,conf=0.95)
#### SUMMARISE MEAN AND CI FOR POST-REG SAMPLES #######
postreg.heavy <- boot(NAM.haul$heavy.rate[NAM.haul$REG==1], samplemean, R=10000)
postreg.heavy.ci<-boot.ci(postreg.heavy,conf=0.95)
#### COMPILE OUTPUT INTO A SINGLE TABLE #######
## >80% reduction of seabird-cable interactions when tori lines are deployed
bootsummary<-NAM.haul %>% mutate(count=1) %>% group_by(REG) %>%
summarise(nsets=sum(count))
bootsummary$boot.mean.heavy<-c(prereg.heavy$t0,postreg.heavy$t0)
bootsummary$boot.lcl.heavy<-c(prereg.heavy.ci$percent[1,4],postreg.heavy.ci$percent[1,4])
bootsummary$boot.ucl.heavy<-c(prereg.heavy.ci$percent[1,5],postreg.heavy.ci$percent[1,5])
#### CALCULATE THE CHANGE IN INTERACTION RATE ####
percchange<-function(x){((x[1]-x[2])/x[1])*100}
bootsummary<- as.data.frame(bootsummary) %>% adorn_totals("row")
bootsummary[3,3:5]<-apply(as.matrix(bootsummary[,3:5]),2,percchange)
bootsummary[3,1]<-"CHANGE(%)"
bootsummary
##############################################################
#### PRODUCE BOOTSTRAPPED CONFIDENCE INTERVALS FOR BSL FOR SETTING
##############################################################
## subset the data
NAM.set<- NAM.trawl %>%
filter(Vessel_Activity=="Set")
#### SUMMARISE MEAN AND CI FOR PRE-REG SAMPLES #######
prereg.heavy <- boot(NAM.set$heavy.rate[NAM.set$Tori_line_deployed=="No"], samplemean, R=10000)
prereg.heavy.ci<-boot.ci(prereg.heavy,conf=0.95)
#### SUMMARISE MEAN AND CI FOR POST-REG SAMPLES #######
postreg.heavy <- boot(NAM.set$heavy.rate[NAM.set$Tori_line_deployed=="Yes"], samplemean, R=10000)
postreg.heavy.ci<-boot.ci(postreg.heavy,conf=0.95)
#### COMPILE OUTPUT INTO A SINGLE TABLE #######
## >80% reduction of seabird-cable interactions when tori lines are deployed
bootsummary<-NAM.set %>% mutate(count=1) %>% group_by(Tori_line_deployed) %>%
summarise(nsets=sum(count))
bootsummary$boot.mean.heavy<-c(prereg.heavy$t0,postreg.heavy$t0)
bootsummary$boot.lcl.heavy<-c(prereg.heavy.ci$percent[1,4],postreg.heavy.ci$percent[1,4])
bootsummary$boot.ucl.heavy<-c(prereg.heavy.ci$percent[1,5],postreg.heavy.ci$percent[1,5])
#### CALCULATE THE CHANGE IN INTERACTION RATE ####
percchange<-function(x){((x[1]-x[2])/x[1])*100}
bootsummary<- as.data.frame(bootsummary) %>% adorn_totals("row")
bootsummary[3,3:5]<-apply(as.matrix(bootsummary[,3:5]),2,percchange)
bootsummary[3,1]<-"CHANGE(%)"
bootsummary
|
ccf7534a024950d530d22653ad4849bfb2fe7860
|
e5fa76d7fadb2eeb7d7407155f39b98df19425bd
|
/bookdown-master/codes/tempo_simu.R
|
d8fb573d5275b396ed7c4123ae2647763f6f5d49
|
[
"CC0-1.0"
] |
permissive
|
andreamirandagz/random_demography
|
5afb710d4f3868229283a75bb9b4605d2c366001
|
26d7915c4b360c149a39b23a75e7dd0cff18d424
|
refs/heads/master
| 2022-12-10T03:24:04.858146
| 2020-08-25T16:58:40
| 2020-08-25T16:58:40
| 268,879,687
| 0
| 0
| null | 2020-08-25T16:58:41
| 2020-06-02T18:35:26
| null |
UTF-8
|
R
| false
| false
| 5,482
|
r
|
tempo_simu.R
|
## Shift simulator
## We simulate births according to their original timing and then
## shift them by age and time according to a continuous shift function.
## We then show that changes in the mean age of the shifted births can
## be used to recover the original birth counts.
## Finally, we consider the case when the population consists of two
## ("hidden") sub-populations. The first population is identical to
## the population above. The second population are "early"
## births. Here we assume there is no change in timing, but there is a
## decline in the number of early births.
## We see what happens to the mean ages in this situation and whether
## we can recover the orignal births.
library(data.table)
source("/hdir/0/fmenares/Book/bookdown-master/codes/utility_functions.R")
source("/hdir/0/fmenares/Book/bookdown-master/codes//tempo_functions.R")
million = 10^6
thousand = 10^3
N <- 1 * million
year.vec <- 1990:2020
#######################################
## simulate originally timed births ##
#######################################
## we'll assume normal with age and uniform with time
x <- rnorm(N, mean = 30, sd = 4)
t <- runif(N, min = min(year.vec), max(year.vec)+1)
dt <- data.table(x, t)
par(mfrow = c(1,1))
if (N == 1000)
{
dt[, plot(t, x, cex = .6)]
}
#########################
## shifting the births ##
#########################
## we'll assume a continuous S-shaped cumulative shift
## shift.t <- 2*plogis(seq(-5, 5, length.out = length(year.vec)))
shift.t <- sin((year.vec - 1990)/2.5)
plot(year.vec, shift.t, type = "o",
main = "Cumulative shifts")
## include shifts as a column
Rt <- approx(x = year.vec, y = shift.t, xout = t)$y
dt[, Rt := approx(x = year.vec, y = shift.t, xout = t)$y]
## calculate shifted times and ages of births
dt[, t.obs := t + Rt]
dt[, x.obs := x + Rt]
## retain only the original time window (for convenience)
dt <- dt[floor(t.obs) %in% year.vec]
if (N == 1000)
{
dt[, plot(t, x, cex = .6, col = "grey")]
dt[, points(t.obs, x.obs, cex = .6, col = "orange", pch = 19)]
}
##########################################
## observed births counts and mean ages ##
##########################################
out <- dt[, .(Bt = .N, ## count the events
mut = mean(x.obs)), ## get mean age
by = .(year = floor(t.obs))] ## by whole years
out <- out[order(year)]
############################################
## change in mean age and adjusted counts ##
############################################
out[, rt.hat := center.diff(mut, end.fill = T)]
out[, Rt.hat := cumsum(rt.hat)]
out[, Bt.adj := Bt / (1 - rt.hat)]
######################
## plot the results ##
######################
par(mfrow = c(2,2))
out[, plot(year, Bt, ylim = c(.8, 1.2) * range(Bt),
main = "Observed Births")]
out[, plot(year, mut,
main = "Mean age of birth")]
out[, plot(year, center.diff(mut),
main = "Change in mean age of birth")]
abline(h = 0)
## observed, adjusted, and original births
Bt.orig.vec <- dt[, table(floor(t))]
out[, plot(year, Bt, ylim = c(.8, 1.5) * range(Bt),
main = "Observed and Adjusted Births")]
out[, lines(year, Bt.adj, col = "red")]
points(names(Bt.orig.vec), Bt.orig.vec, col = "grey")
legend("top", c("observed", "adjusted", "original"),
pch = c(1,-1,1), lty = c(-1, 1,-1),
col = c("black", "red", "grey"),
bty = "n")
## function version of tabulating and plotting
tempo_simu_plot_fun <- function(dt)
{
## requires x.obs and t.obs and
## (optionally) t, the original unshifted birth times
##########################################
## observed births counts and mean ages ##
##########################################
out <- dt[, .(Bt = .N, ## count the events
mut = mean(x.obs)), ## get mean age
by = .(year = floor(t.obs))] ## by whole years
out <- out[order(year)]
############################################
## change in mean age and adjusted counts ##
############################################
out[, rt.hat := center.diff(mut, end.fill = T)]
out[, Rt.hat := cumsum(rt.hat)]
out[, Bt.adj := Bt / (1 - rt.hat)]
######################
## plot the results ##
######################
par(mfrow = c(2,2))
out[, plot(year, Bt, ylim = c(.8, 1.2) * range(Bt),
main = "Observed Births")]
out[, plot(year, mut,
main = "Mean age of birth")]
out[, plot(year, center.diff(mut),
main = "Change in mean age of birth")]
## observed, adjusted, and original births
Bt.orig.vec <- dt[, table(floor(t))]
out[, plot(year, Bt, ylim = c(.8, 1.5) * range(Bt),
main = "Observed and Adjusted Births")]
out[, lines(year, Bt.adj, col = "red")]
points(names(Bt.orig.vec), Bt.orig.vec, col = "grey")
legend("top", c("observed", "adjusted", "original"),
pch = c(1,-1,1), lty = c(-1, 1,-1),
col = c("black", "red", "grey"),
bty = "n")
}
tempo_simu_plot_fun(dt)
## In-class exercises:
## (1) try with N of 4 million -- does it still work? What happens?
## (2) try with a shift function that goes up and down
shift.t <- sin((year.vec - 1990)/2.5)
plot(year.vec, shift.t, type = "o")
## Are the adjusted counts ever LESS than the observed counts? If so, when?
## (3) If the cumulative shift was Rt = a + 0.1*t, what would be a
## formula for tempo-adjusted counts of births? Sketch the 4 panels
## without the computer and then check to see if you're right.
|
7697abd121b6c727cbd389022bca4edce11be53d
|
5966fe8c6d639bb3c92f6e5de2c55644a081d702
|
/man/vision.Rd
|
bfe45fa2319c116f8748b767132c23ab366229ec
|
[] |
no_license
|
lwjohnst86/seer
|
6e4af7300557559443ca3ce7abc8813c0e645f69
|
7df2352bac17473b7a6ca2eff49352e931479a13
|
refs/heads/master
| 2021-01-18T23:43:29.388807
| 2018-02-26T22:27:27
| 2018-02-26T22:27:27
| 45,485,157
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 694
|
rd
|
vision.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vision.R
\name{vision}
\alias{vision}
\alias{vision_simple}
\alias{vision_sparse}
\title{Vision experience (aka plot theme).}
\usage{
vision_sparse(base_plot, base_size = 12, base_family = "Helvetica")
vision_simple(base_plot, base_size = 12, base_family = "Helvetica",
legend_position = c("right", "left", "bottom", "top"))
}
\arguments{
\item{base_plot}{The base_plot sent from the \code{\link{visualize}} chain}
\item{base_size}{Font size}
\item{base_family}{Font family}
}
\value{
Customizes the appearance of the plot.
}
\description{
Describe the vision, also known as setting the theme for your plot.
}
|
cf9b401eaeb8ea93ebfcad5d361584c32bb1a794
|
cb0fc27028b0ce887292a99dbad269abda3b9c82
|
/man/permutations.Rd
|
3fa76183aced8155f4b2de1bed2c2e27bcd58b94
|
[] |
no_license
|
cran/windfarmGA
|
ce1a14c585cf9170f6b54c166cf6c6e8f55ecf36
|
d8ec83780dbd26f50e6ea79a5d376f87507dc33c
|
refs/heads/master
| 2021-06-01T13:51:52.840809
| 2021-05-05T14:20:03
| 2021-05-05T14:20:03
| 95,279,120
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,281
|
rd
|
permutations.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crossover.R
\name{permutations}
\alias{permutations}
\title{Enumerate the Combinations or Permutations of the Elements of a
Vector}
\usage{
permutations(n, r, v = 1:n)
}
\arguments{
\item{n}{Size of the source vector}
\item{r}{Size of the target vectors}
\item{v}{Source vector. Defaults to 1:n}
}
\value{
Returns a matrix where each row contains a vector of length r.
}
\description{
permutations enumerates the possible permutations. The
function is forked and minified from gtools::permutations
}
\references{
Venables, Bill. "Programmers Note", R-News, Vol 1/1, Jan. 2001.
\url{https://cran.r-project.org/doc/Rnews/}
}
\seealso{
Other Helper Functions:
\code{\link{dup_coords}()},
\code{\link{getDEM}()},
\code{\link{getISO3}()},
\code{\link{get_grids}()},
\code{\link{grid_area}()},
\code{\link{hexa_area}()},
\code{\link{isSpatial}()},
\code{\link{readintegerSel}()},
\code{\link{readinteger}()},
\code{\link{splitAt}()},
\code{\link{windata_format}()}
}
\author{
Original versions by Bill Venables
\email{Bill.Venables@cmis.csiro.au.} Extended to handle repeats.allowed
by Gregory R. Warnes \email{greg@warnes.net.}
}
\concept{Helper Functions}
|
00eaee718f5be926b967cfd64562b96504a14d4b
|
2b595692f51b784b49bf7a67351e6013065e3116
|
/plot1.R
|
fd74230650d43c3e83abe9dd6d15b64b6063401c
|
[] |
no_license
|
0xts/ExData_Plotting1
|
39be914c53cdf603879c8a74224959fdc6588a0e
|
8a2410e5e3fd9fbe4d787843732c46f3efb5c2c6
|
refs/heads/master
| 2023-06-12T14:04:38.762286
| 2021-07-04T08:23:11
| 2021-07-04T08:23:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,178
|
r
|
plot1.R
|
file <- "./household_power_consumption.txt"
if(!file.exists(file)){
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "./data.zip")
unzip("./data.zip")
}else{
if(dir.exists("exdata_data_household_power_consumption")){
data <- read.table("./exdata_data_household_power_consumption/household_power_consumption.txt",
header = TRUE, sep = ";", na.strings = "?")
}else{
data <- read.table("./household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
}
}
data$DT <- paste(data$Date, data$Time)
data$DT <- as.Date(data$DT, format = "%d/%m/%Y %H:%M:%S")
data <- data[,-c(1,2)]
data <- data[,c(8,c(1:7))]
data <- data[complete.cases(data),]
condition <- data$DT>as.Date("2007-01-31", format = "%Y-%m-%d")&data$DT<as.Date("2007-02-03", format = "%Y-%m-%d")
data <- data[condition,]
rm(condition)
histogram <- hist(data$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red")
dev2bitmap("plot1.png", type = "png16m", width = 480, height = 480, units = "px")
|
86909d86f526ad2136c53f3fb2f92f50f1ec5ae1
|
80c9b1baa262883d69f9f155b662142ec5b1888b
|
/scripts/ensembl_details.R
|
409b307c691d0e5581411907a59ce51deebe96d5
|
[] |
no_license
|
russHyde/bfx_201909
|
b054eaf72073b29fc3d4bd2f71f291445cb6c486
|
58923e9eb3e83e2c64d647a1647a71f0d5212b33
|
refs/heads/master
| 2020-07-04T16:10:44.143091
| 2019-10-01T13:59:54
| 2019-10-01T13:59:54
| 202,333,892
| 3
| 0
| null | 2019-10-01T13:53:45
| 2019-08-14T11:07:37
|
Python
|
UTF-8
|
R
| false
| false
| 1,525
|
r
|
ensembl_details.R
|
suppressPackageStartupMessages({
library(magrittr)
library(readr)
library(rtracklayer)
library(argparse)
})
###############################################################################
# copied from snakemake r script: `get_ensembl_gene_details.smk.R` in
# `rnaseq_workflow`
parse_gtf <- function(gtf_path){
stopifnot(file.exists(gtf_path))
gtf <- rtracklayer::import(gtf_path)
}
get_genes_from_gtf <- function(gtf){
gtf[which(gtf$type == "gene"), ]
}
get_gene_df_from_gtf <- function(gtf_path, reqd_columns){
gene_df <- gtf_path %>%
parse_gtf() %>%
get_genes_from_gtf() %>%
as.data.frame()
gene_df[, reqd_columns]
}
###############################################################################
main <- function(gtf_path, out_path) {
gtf_columns <- paste(
"gene", c("id", "version", "name", "source", "biotype"), sep = "_"
)
results <- get_gene_df_from_gtf(gtf_path, gtf_columns)
readr::write_tsv(results, path = out_path)
}
###############################################################################
define_parser <- function() {
parser <- argparse::ArgumentParser()
parser$add_argument("--gtf", dest = "gtf_path")
parser$add_argument("-o", "--out", dest = "out_path")
parser
}
###############################################################################
parser <- define_parser()
args <- parser$parse_args()
main(gtf_path = args$gtf_path, out_path = args$out_path)
###############################################################################
|
5955e962e4b729e0bce541e89d26db095f718506
|
bda47498922e92041f38c335f25aa695b8f117ee
|
/plot_agb_drivers_restor_TS.R
|
59e13923552a2ce32a854b60ad676e5ab9f65f80
|
[] |
no_license
|
Kochal/permanence
|
c6eb565fc37bd9bf3a13393df79b5807bfec7ffc
|
7d14c69c8b8902945f9ab6df72c7d99c3dae82af
|
refs/heads/master
| 2023-03-27T22:17:59.241134
| 2021-03-12T01:05:20
| 2021-03-12T01:05:20
| 346,888,716
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,698
|
r
|
plot_agb_drivers_restor_TS.R
|
library(ncdf4); library(ggplot2); library(reshape)
source('/home/akoch/scripts/R_code/fun_Utility.R')
# source('~/Documents/R_code/fun_Utility.R')
# wdir <- '/home/ucfaako/Documents/future_forests/data/'
carbon <- 't3C'
carbon_dir <- 'totc'
carbon_name <- 'Total carbon'
wdir <- paste0('/home/terraces/projects/LPJ_futuretropics/', carbon_dir, '_restor/')
co2_1850_restor.file <- list.files(wdir, pattern = glob2rx(paste0(carbon, '_restor_fixco2_1850_fire_ssp*_ensmean.nc')), full.names = TRUE)
co2_2014_restor.file <- list.files(wdir, pattern = glob2rx(paste0(carbon, '_restor_fixco2_2014_fire_ssp*_ensmean.nc')), full.names = TRUE)
restor.file <- list.files(wdir, pattern = glob2rx(paste0(carbon, '_restor_fire_ssp*_ensmean.nc')), full.names = TRUE)
ctl.file <- list.files(wdir, pattern = glob2rx(paste0(carbon, '_ctl_fire_ssp*_ensmean.nc')), full.names = TRUE)
co2_1850_ctl.file <- list.files(wdir, pattern = glob2rx(paste0(carbon, '_ctl_fixco2_1850_fire_ssp*_ensmean.nc')), full.names = TRUE)
co2_2014_ctl.file <- list.files(wdir, pattern = glob2rx(paste0(carbon, '_ctl_fixco2_2014_fire_ssp*_ensmean.nc')), full.names = TRUE)
ssp <- c('ssp126', 'ssp245', 'ssp370', 'ssp585')
ssp_labs <- c('SSP1-26', 'SSP2-45', 'SSP3-70', 'SSP5-85')
names(ssp_labs) <- ssp
scenario <- c('ctl', 'restor', 'restor_co2_2014', 'restor_co2_1850', 'ctl_co2_2014', 'ctl_co2_1850')
coln <- paste(rep(scenario, each = length(ssp)), ssp, sep = '_')
tx <- 2014:2100
sim.names <- c('SSP landuse (2014)',
'Restoration',
expression('SSP landuse (2014) + fixed'~CO[2]~'(2014)'),
expression('Restoration + fixed'~CO[2]~'(2014)'),
expression('SSP landuse (2014) + fixed'~CO[2]~'(1850)'),
expression('Restoration + fixed'~CO[2]~'(1850)'))
NC2Df <- function(varname){
co2_1850_restor.a <- lapply(co2_1850_restor.file, nc_read, varname)
co2_2014_restor.a <- lapply(co2_2014_restor.file, nc_read, varname)
restor.a <- lapply(restor.file, nc_read, varname)
ctl.a <- lapply(ctl.file, nc_read, varname)
co2_1850_ctl.a <- lapply(co2_1850_ctl.file, nc_read, varname)
co2_2014_ctl.a <- lapply(co2_2014_ctl.file, nc_read, varname)
df <- data.frame(ctl=do.call('cbind', ctl.a),
restor=do.call('cbind', restor.a),
restor_co2_2014=do.call('cbind', co2_2014_restor.a),
restor_co2_1850=do.call('cbind', co2_1850_restor.a),
ctl_co2_2014=do.call('cbind', co2_2014_ctl.a),
ctl_co2_1850=do.call('cbind', co2_1850_ctl.a))
names(df) <- coln
# if (carbon=='totc'){
# df[-1,c(5:ncol(df))] <- df[-1,c(5:ncol(df))] + 8.5e15
# } else {
# df[-1,c(5:ncol(df))] <- df[-1,c(5:ncol(df))] - 3e15
# }
df$date <- tx
df <- melt.data.frame(df, id.vars = 'date')
df$variable <- as.character(df$variable)
df$value <- df$value * 1e-15
df$ssp <- substring(df$variable, nchar(df$variable)-5, nchar(df$variable))
df$scenario <- substring(df$variable, 1, nchar(df$variable)-7)
return(df)
}
mean.df <- NC2Df('mean')
conf1.df <- NC2Df('conf1')
conf2.df <- NC2Df('conf2')
mean.df$lower <- conf1.df$value
mean.df$upper <- conf2.df$value
mean.df$scenario <- factor(mean.df$scenario, levels = c('ctl', 'restor',
'ctl_co2_2014', 'restor_co2_2014',
'ctl_co2_1850', 'restor_co2_1850'))
if (carbon=='totc'){
agb_1970 <- 138.4146 / 100 # to Pg C percent
} else {
agb_1970 <- 152.2093 / 100 # to Pg C percent
}
mean.df <- mean.df[!mean.df$scenario %in% c('ctl_co2_1850', 'restor_co2_1850'),]
## plots #######################################################################
clr <- c('#8c2d04','#084594', '#f16913', '#4292c6', '#fdae6b', '#9ecae1')
p <- ggplot(mean.df, aes(date, value, color = scenario)) + geom_line(stat='identity')
p <- p + geom_ribbon(aes(ymin = lower, ymax = upper, fill = scenario), colour = NA, alpha = 0.2) + guides(color = guide_legend(ncol=2), fill=guide_legend(ncol=2))
p <- p + scale_fill_discrete(type = clr, name = '', labels = sim.names) + scale_color_discrete(type = clr, name = '', labels = sim.names)
# p <- p + scale_fill_manual(values = clr) + scale_color_manual(values = clr)
p <- p + scale_x_continuous(breaks = seq(2015, 2100, 15))
#p <- p + scale_y_continuous(sec.axis = sec_axis(trans=~./agb_1970, name = paste(carbon_name, 'change w.r.t. 1971-2000 (%)'), breaks = seq(0, 150, 25)))
p <- p + labs(title = 'Total carbon change', x = 'Year', y = paste(carbon_name, 'change w.r.t. 2014 (Pg C)')) + theme_bw(base_size = 12) + theme(legend.position = 'top')
p <- p + facet_wrap(ssp~., labeller = labeller(ssp=ssp_labs))
ggsave(paste0(wdir, 'd',carbon, '_2014-2100_ts.pdf'),
plot = p, device = 'pdf', width = 210, height = 170, units = 'mm', dpi = 300)
##
diffmean.df <- rbind(
data.frame(date=mean.df$date[mean.df$scenario=='restor'],
value=mean.df$value[mean.df$scenario=='restor'] - mean.df$value[mean.df$scenario=='ctl'],
lower=mean.df$lower[mean.df$scenario=='restor'] - mean.df$lower[mean.df$scenario=='ctl'],
upper=mean.df$upper[mean.df$scenario=='restor'] - mean.df$upper[mean.df$scenario=='ctl'],
ssp=mean.df$ssp[mean.df$scenario=='restor'], scenario='transient CO2'),
data.frame(date=mean.df$date[mean.df$scenario=='restor_co2_2014'],
value=mean.df$value[mean.df$scenario=='restor_co2_2014'] - mean.df$value[mean.df$scenario=='ctl_co2_2014'],
lower=mean.df$lower[mean.df$scenario=='restor_co2_2014'] - mean.df$lower[mean.df$scenario=='ctl_co2_2014'],
upper=mean.df$upper[mean.df$scenario=='restor_co2_2014'] - mean.df$upper[mean.df$scenario=='ctl_co2_2014'],
ssp=mean.df$ssp[mean.df$scenario=='restor_co2_2014'], scenario='fixed CO2 (2014)')
# data.frame(date=mean.df$date[mean.df$scenario=='restor_co2_1850'],
# value=mean.df$value[mean.df$scenario=='restor_co2_1850'] - mean.df$value[mean.df$scenario=='ctl_co2_1850'],
# lower=mean.df$lower[mean.df$scenario=='restor_co2_1850'] - mean.df$lower[mean.df$scenario=='ctl_co2_1850'],
# upper=mean.df$upper[mean.df$scenario=='restor_co2_1850'] - mean.df$upper[mean.df$scenario=='ctl_co2_1850'],
# ssp=mean.df$ssp[mean.df$scenario=='restor_co2_1850'], scenario='fixed CO2 (1850)')
)
ssp_colours <- c('#66A36C', '#3DACC6', '#D28E54', '#C3474E')
diffmean.df$scenario <- factor(diffmean.df$scenario, levels = c('transient CO2', 'fixed CO2 (2014)', 'fixed CO2 (1850)'),
labels = c(bquote(transient~CO[2]), bquote(fixed~CO[2]~'(2014)'), bquote(fixed~CO[2]~'(1850)')))
## expression in labels
p2 <- ggplot(diffmean.df, aes(date, value, color = ssp)) + geom_line(stat='identity')
p2 <- p2 + geom_ribbon(aes(ymin = lower, ymax = upper, fill = ssp), colour = NA, alpha = 0.2)
p2 <- p2 + scale_fill_manual(values = ssp_colours, name = '', labels = ssp_labs) + scale_color_manual(values = ssp_colours, name = '', labels = ssp_labs)
p2 <- p2 + scale_x_continuous(breaks = seq(2015, 2100, 25))
p2 <- p2 + labs(title = 'Total carbon gain from restoration', x = 'Year', y = paste('Total biomass carbon (Pg C)'))
p2 <- p2 + theme_bw(base_size = 12) + theme(legend.position = 'top', legend.title = element_blank())
p2 <- p2 + facet_wrap(scenario~., labeller = label_parsed)
p2 <- p2 + coord_trans(y = squash_axis(0, 15, 10))
ggsave(paste0(wdir, 'diff',carbon, '_2014-2100_ts.pdf'),
plot = p2, device = 'pdf', width = 210, height = 120, units = 'mm', dpi = 300)
## FIRST DERIVATIVE
for (s in ssp){
for (sc in unique(diffmean.df$scenario)){
diffmean.df$diff[diffmean.df$ssp==s & diffmean.df$scenario==sc] <- c(0, diff(diffmean.df$value[diffmean.df$ssp==s & diffmean.df$scenario==sc]))
diffmean.df$lower[diffmean.df$ssp==s & diffmean.df$scenario==sc] <- c(0, diff(diffmean.df$lower[diffmean.df$ssp==s & diffmean.df$scenario==sc]))
diffmean.df$upper[diffmean.df$ssp==s & diffmean.df$scenario==sc] <- c(0, diff(diffmean.df$upper[diffmean.df$ssp==s & diffmean.df$scenario==sc]))
}
}
## uptake rates and trends ###################################################
# uptake in 2030
diffmean.df[diffmean.df$date==2031 & diffmean.df$scenario=='transient ~ CO[2]',]
diffmean.df$diff[diffmean.df$date==2031 & diffmean.df$scenario=='transient ~ CO[2]'] -
diffmean.df$lower[diffmean.df$date==2031 & diffmean.df$scenario=='transient ~ CO[2]']
diffmean.df[diffmean.df$date==2032 & diffmean.df$scenario=='transient ~ CO[2]',]
diffmean.df$diff[diffmean.df$date==2032 & diffmean.df$scenario=='transient ~ CO[2]'] -
diffmean.df$lower[diffmean.df$date==2032 & diffmean.df$scenario=='transient ~ CO[2]']
diffmean.df[diffmean.df$date==2050 & diffmean.df$scenario=='transient ~ CO[2]',]
diffmean.df$diff[diffmean.df$date==2050 & diffmean.df$scenario=='transient ~ CO[2]'] -
diffmean.df$lower[diffmean.df$date==2050 & diffmean.df$scenario=='transient ~ CO[2]']
diffmean.df[diffmean.df$date==2100 & diffmean.df$scenario=='transient ~ CO[2]',]
diffmean.df$diff[diffmean.df$date==2100 & diffmean.df$scenario=='transient ~ CO[2]'] -
diffmean.df$lower[diffmean.df$date==2100 & diffmean.df$scenario=='transient ~ CO[2]']
# trend
for (SSP in unique(diffmean.df$ssp)){
tnd.df <- diffmean.df[diffmean.df$date >= 2032 & diffmean.df$ssp==SSP & diffmean.df$scenario=='transient ~ CO[2]',]
tnd <- lm(diff ~ date, tnd.df)
tnd <- summary(tnd)
print(paste0('Trend ', tnd$coefficients[2], ' p ', tnd$coefficients[8]))
}
diffmean.df[diffmean.df$date==2031 & diffmean.df$scenario=='fixed ~ CO[2] ~ "(2014)"',]
diffmean.df$diff[diffmean.df$date==2031 & diffmean.df$scenario=='fixed ~ CO[2] ~ "(2014)"'] -
diffmean.df$lower[diffmean.df$date==2031 & diffmean.df$scenario=='fixed ~ CO[2] ~ "(2014)"']
diffmean.df[diffmean.df$date==2032 & diffmean.df$scenario=='fixed ~ CO[2] ~ "(2014)"',]
diffmean.df$diff[diffmean.df$date==2032 & diffmean.df$scenario=='fixed ~ CO[2] ~ "(2014)"'] -
diffmean.df$lower[diffmean.df$date==2032 & diffmean.df$scenario=='fixed ~ CO[2] ~ "(2014)"']
diffmean.df[diffmean.df$date==2050 & diffmean.df$scenario=='fixed ~ CO[2] ~ "(2014)"',]
diffmean.df$diff[diffmean.df$date==2050 & diffmean.df$scenario=='fixed ~ CO[2] ~ "(2014)"'] -
diffmean.df$lower[diffmean.df$date==2050 & diffmean.df$scenario=='fixed ~ CO[2] ~ "(2014)"']
diffmean.df[diffmean.df$date==2100 & diffmean.df$scenario=='fixed ~ CO[2] ~ "(2014)"',]
diffmean.df$diff[diffmean.df$date==2100 & diffmean.df$scenario=='fixed ~ CO[2] ~ "(2014)"'] -
diffmean.df$lower[diffmean.df$date==2100 & diffmean.df$scenario=='fixed ~ CO[2] ~ "(2014)"']
for (SSP in unique(diffmean.df$ssp)){
tnd.df <- diffmean.df[diffmean.df$date >= 2032 & diffmean.df$ssp==SSP & diffmean.df$scenario=='fixed ~ CO[2] ~ "(2014)"',]
tnd <- lm(diff ~ date, tnd.df)
tnd <- summary(tnd)
print(paste0('Trend ', tnd$coefficients[2], ' p ', tnd$coefficients[8]))
}
#############################################################################
p3 <- ggplot(diffmean.df, aes(date, diff, color = ssp)) + geom_line(stat='identity')
p3 <- p3 + geom_ribbon(aes(ymin = lower, ymax = upper, fill = ssp), colour = NA, alpha = 0.2)
p3 <- p3 + scale_fill_manual(values = ssp_colours, name = '', labels = ssp_labs) + scale_color_manual(values = ssp_colours, name = '', labels = ssp_labs)
p3 <- p3 + scale_x_continuous(breaks = seq(2015, 2100, 25))# + scale_y_continuous(limits=c(-0.25, 0.75))
p3 <- p3 + labs(title = 'Annual change in total carbon in restration area w.r.t. control', x = 'Year',
y = expression('Total carbon gain from restoration (Pg C year'^{-1}*')'))
p3 <- p3 + theme_bw(base_size = 12) + theme(legend.position = 'top')
p3 <- p3 + facet_wrap(scenario~., labeller = label_parsed) + coord_trans(y = squash_axis(0.3, 1.4, 10))
ggsave(paste0(wdir, 'derivative_',carbon, '_2014-2100_ts.pdf'),
plot = p3, device = 'pdf', width = 210, height = 120, units = 'mm', dpi = 300)
library(zoo)
for (s in ssp){
for (sc in unique(diffmean.df$scenario)){
diffmean.df$diff[diffmean.df$ssp==s & diffmean.df$scenario==sc] <- rollmean(diffmean.df$diff[diffmean.df$ssp==s & diffmean.df$scenario==sc], 10, align = 'right', na.pad = TRUE)
diffmean.df$upper[diffmean.df$ssp==s & diffmean.df$scenario==sc] <- rollmean(diffmean.df$upper[diffmean.df$ssp==s & diffmean.df$scenario==sc], 10, align = 'right', na.pad = TRUE)
diffmean.df$lower[diffmean.df$ssp==s & diffmean.df$scenario==sc] <- rollmean(diffmean.df$lower[diffmean.df$ssp==s & diffmean.df$scenario==sc], 10, align = 'right', na.pad = TRUE)
}
}
p4 <- ggplot(diffmean.df, aes(date, diff, color = ssp)) + geom_line(stat='identity')
p4 <- p4 + geom_ribbon(aes(ymin = lower, ymax = upper, fill = ssp), colour = NA, alpha = 0.2)
p4 <- p4 + scale_fill_manual(values = ssp_colours, name = '', labels = ssp_labs) + scale_color_manual(values = ssp_colours, name = '', labels = ssp_labs)
p4 <- p4 + scale_x_continuous(breaks = seq(2015, 2100, 25))# + scale_y_continuous(limits=c(-0.25, 0.75))
p4 <- p4 + labs(title = 'Annual change in total carbon in restration area w.r.t. control', x = 'Year',
y = expression('Total carbon gain from restoration (Pg C year'^{-1}*')'))
p4 <- p4 + theme_bw(base_size = 12) + theme(legend.position = 'top')
p4 <- p4 + facet_wrap(scenario~., labeller = label_parsed) + coord_trans(y = squash_axis(0.3, 1.4, 10))
ggsave(paste0(wdir, 'derivative_',carbon, '_2014-2100_ts_smooth.pdf'),
plot = p4, device = 'pdf', width = 190, height = 120, units = 'mm', dpi = 300)
ggarrange(list(p2, p3), ncol = 1, labels = 'AUTO', common.legend = TRUE)
|
b1dad782d86cc67ead8cd74f953251a9a4d401c7
|
3aee1d334563dd323bd7dd16f19036234948b7c5
|
/R/count_taxa.R
|
42a32e5cc9cc428d0879f22ee4ffc61da4163c0f
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
helixcn/plantlist
|
78f020be3cecca367b28314d974e8c8920fecf20
|
7362677dbb4d304c311699c76c1ca75e741c362b
|
refs/heads/master
| 2022-08-17T02:53:53.556080
| 2022-08-02T05:38:05
| 2022-08-02T05:38:05
| 50,150,703
| 21
| 15
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,212
|
r
|
count_taxa.R
|
#' Get the number of taxa under each taxonomic level for a dataframe generated
#' by the function CTPL
#'
#' Compute the number of species under each family, each genus, and each group.
#' See the value secion.
#'
#' Simple function summarizing the number of taxa under each taxonomic
#' level.
#'
#' @param checklist_dat A data.frame generated by CTPL or in the same format.
#' @return \item{NO_OF_FAMILIES}{Number of families in the data.frame}
#'
#' \item{NO_OF_GENERA}{Number of genera in the data.frame}
#'
#' \item{NO_OF_SPECIES}{Number of species in the data.frame}
#'
#' \item{NO_OF_FAMILIES_BY_GROUP}{Number of families under each group of higher
#' plants, namely, Moss, Ferns and Lycophytes, Gymnosperms, Angiosperms}
#'
#' \item{NO_OF_GENERA_BY_GROUP }{Number of genera under each group, namely,
#' Moss, Ferns and lycophytes, Gymnosperms, Angiosperms}
#'
#' \item{NO_OF_SPECIES_BY_GROUP }{Number of species under each group, namely,
#' Moss, Ferns and lycophytes, Gymnosperms, Angiosperms}
#'
#' \item{NO_OF_GENERA_BY_FAMILY }{The name is self-explanatory}
#'
#' \item{NO_OF_SPECIES_BY_FAMILY}{The name is self-explanatory}
#'
#' \item{NO_OF_SPECIES_BY_GENUS }{The name is self-explanatory}
#' @author Jinlong Zhang
#' @seealso \code{\link{make_checklist}}, \code{\link{CTPL}}
#' @references None
#' @examples
#'
#' ## Do not run
#' ## See the vignette
#'
#' @export count_taxa
count_taxa <-
function(checklist_dat) {
checklist_dat <-
subset(
checklist_dat,
select = c(
"YOUR_SEARCH",
"GROUP",
"SPECIES_FULL",
"FAMILY_NUMBER",
"FAMILY",
"GENUS",
"SPECIES"
)
)
if (any(is.na(checklist_dat))) {
warning(paste(
"Taxa: '",
paste(checklist_dat$YOUR_SEARCH[is.na(checklist_dat$SPECIES)],
collapse = ", "),
"' do(es) not have full scientific name, ignored\n",
sep = ""
))
}
checklist_dat <- stats::na.omit(checklist_dat)
# Add a number to each group
checklist_dat$GROUP <-
ifelse(
checklist_dat$GROUP == "Bryophytes",
paste("1", checklist_dat$GROUP),
checklist_dat$GROUP
)
checklist_dat$GROUP <-
ifelse(
checklist_dat$GROUP == "Ferns and lycophytes",
paste("2", checklist_dat$GROUP),
checklist_dat$GROUP
)
checklist_dat$GROUP <- ifelse(is.na(checklist_dat$GROUP),
"", checklist_dat$GROUP)
checklist_dat$GROUP <-
ifelse(
checklist_dat$GROUP == "Gymnosperms",
paste("3", checklist_dat$GROUP),
checklist_dat$GROUP
)
checklist_dat$GROUP <-
ifelse(
checklist_dat$GROUP == "Angiosperms",
paste("4", checklist_dat$GROUP),
checklist_dat$GROUP
)
checklist_dat$FAMILY_NUMBER <-
gsub("[^0-9]", "", checklist_dat$FAMILY_NUMBER)
# Sorting by GROUP, FAMILY_NUMBER and SPECIES_FULL
checklist_dat[is.na(checklist_dat)] <- ""
checklist_dat3 <- checklist_dat[order(checklist_dat$GROUP,
checklist_dat$FAMILY_NUMBER,
checklist_dat$SPECIES_FULL),]
no_genera_by_family_dat <-
unique(data.frame(GENUS = checklist_dat3$GENUS,
FAMILY = checklist_dat3$FAMILY))
no_genera_by_family <-
stats::aggregate(no_genera_by_family_dat$GENUS,
list(no_genera_by_family_dat$FAMILY),
"length")
colnames(no_genera_by_family) <- c("family", "no_of_genera")
no_species_by_family_dat <-
unique(data.frame(SPECIES = checklist_dat3$SPECIES,
FAMILY = checklist_dat3$FAMILY))
no_species_by_family <-
stats::aggregate(
no_species_by_family_dat$SPECIES,
list(no_species_by_family_dat$FAMILY),
"length"
)
colnames(no_species_by_family) <-
c("family", "no_of_species")
no_genera_by_group_dat <-
unique(data.frame(GENUS = checklist_dat3$GENUS,
GROUP = checklist_dat3$GROUP))
no_genera_by_group <-
stats::aggregate(no_genera_by_group_dat$GENUS,
list(no_genera_by_group_dat$GROUP),
"length")
colnames(no_genera_by_group) <- c("group", "no_of_genera")
no_species_by_group_dat <-
unique(data.frame(SPECIES = checklist_dat3$SPECIES,
GROUP = checklist_dat3$GROUP))
no_species_by_group <-
stats::aggregate(no_species_by_group_dat$SPECIES,
list(no_species_by_group_dat$GROUP),
"length")
colnames(no_species_by_group) <- c("group", "no_of_species")
no_families_by_group_dat <-
unique(data.frame(FAMILY = checklist_dat3$FAMILY,
GROUP = checklist_dat3$GROUP))
no_families_by_group <-
stats::aggregate(
no_families_by_group_dat$FAMILY,
list(no_families_by_group_dat$GROUP),
"length"
)
colnames(no_families_by_group) <-
c("group", "no_of_families")
no_species_by_genus_dat <-
unique(data.frame(SPECIES = checklist_dat3$SPECIES,
GENUS = checklist_dat3$GENUS))
no_species_by_genus <-
stats::aggregate(no_species_by_genus_dat$SPECIES,
list(no_species_by_genus_dat$GENUS),
"length")
colnames(no_species_by_genus) <- c("genus", "no_of_species")
no_families <- length(unique(checklist_dat3$FAMILY))
no_genera <- length(unique(checklist_dat3$GENUS))
no_species <- length(unique(checklist_dat3$SPECIES))
return(
list(
NO_OF_FAMILIES = no_families,
NO_OF_GENERA = no_genera,
NO_OF_SPECIES = no_species,
NO_OF_FAMILIES_BY_GROUP = no_families_by_group,
NO_OF_GENERA_BY_GROUP = no_genera_by_group,
NO_OF_SPECIES_BY_GROUP = no_species_by_group,
NO_OF_GENERA_BY_FAMILY = no_genera_by_family,
NO_OF_SPECIES_BY_FAMILY = no_species_by_family,
NO_OF_SPECIES_BY_GENUS = no_species_by_genus
)
)
}
|
fed674a900d91199fdd8780f4166bbe6abefc27e
|
b77d7957713e2219cda85b7d0ebcfe0e605bc49c
|
/rna_seq_home_page/view/report_more_view.R
|
ab3970b5c4fcf0f5ab5af48de4ec82971061b2b6
|
[] |
no_license
|
fxy1018/RNA_Seq_Report
|
a696ef3db28c3a566fb571c4b24d13be5d96b48c
|
2f25e8eddf521427b2f29bbea2c851f8ee8fcea0
|
refs/heads/master
| 2021-08-23T00:08:33.776380
| 2017-12-01T20:50:04
| 2017-12-01T20:50:04
| 104,396,156
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 444
|
r
|
report_more_view.R
|
report_more_view <- function(expNum) {
navbarMenu("More"
# ,
# tabPanel("Download",
# mainPanel(
# DT::dataTableOutput("downloadFileTable"),
# downloadButton("downloadFiles", label="Download Selected Files")
#
# )),
# tabPanel("Documents",
# mainPanel())
)
}
|
7b9420be8a1e9bd2aec2463aad1ddb3a3e8322de
|
202775810402a48a177b2c7f5b20f3bf45809342
|
/R/FUNCOES/FN_LIMPA_XPROD_V3.R
|
bc06944e1657dfd323ed8ef490b215bcfbe5238e
|
[] |
no_license
|
emirsmaka/CLASSIFICA_PRODUTOS
|
a4cd8db15fdc64666e169ebb4a6bf4c0256a2188
|
d96810d502bc5ffad07ad454f2879b6c8b989d40
|
refs/heads/master
| 2023-04-16T12:42:56.126200
| 2021-04-26T11:31:32
| 2021-04-26T11:31:32
| 297,635,969
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,509
|
r
|
FN_LIMPA_XPROD_V3.R
|
fn_limpa_xprod <- function(x){
x$PROD_XPROD_LIMPO <- gsub("[[:digit:]]|[[:punct:]]"," ",x$PROD_XPROD)
x$PROD_XPROD_LIMPO <- str_trim(x$PROD_XPROD_LIMPO,side = "left")
remove_words <- c("cx|ml|cartao|six\\s?pack|cxa|\\ssh\\s|l?gfa|\\sl\\s|\\slt(\\s)?|vd|\\sx\\s|ln|\\s(c)\\s|npal|un(id)?|\\scom\\s|ttc|pct?|\\sc\\s")
remove_words2 <- c("lata(s|o)?|pack|alcool|\\sc\\s|sleek|\\scom\\s|\\sp\\s|\\<lta\\>")
remove_words3 <- c("\\scom(\\s)?|\\sfi(\\s)?|\\sf\\s|pe\\sec\\srd|\\sprec\\s|\\sret\\s|\\spbr(\\s)?|\\su\\s|\\sgfs(\\s)?|^i\\s|\\sn$|\\sow|\\sd(\\s|$)|fora de linha|
garrfa|garrafa|long neck|caixa|cart\\s|\\spap|fridge")
x$PROD_XPROD_LIMPO2 <- gsub(remove_words," ",x$PROD_XPROD_LIMPO,ignore.case = T)
x$PROD_XPROD_LIMPO3 <- gsub(remove_words2," ",x$PROD_XPROD_LIMPO2,ignore.case = T)
x$PROD_XPROD_LIMPO <- NULL
x$PROD_XPROD_LIMPO2 <- NULL
x$PROD_XPROD_LIMPO4 <- gsub(remove_words3," ",x$PROD_XPROD_LIMPO3,ignore.case = T)
x$PROD_XPROD_LIMPO3 <- NULL
x <- rename(x,PROD_XPROD_LIMPO = PROD_XPROD_LIMPO4)
x$PROD_XPROD_LIMPO <- str_trim(x$PROD_XPROD_LIMPO,side = "left")
x$PROD_XPROD_LIMPO <- str_trim(x$PROD_XPROD_LIMPO,side = "right")
x$PROD_XPROD_LIMPO <- str_squish(x$PROD_XPROD_LIMPO)
x$PROD_XPROD_LIMPO <- ifelse(x$PROD_XPROD_LIMPO == "","NULO",x$PROD_XPROD_LIMPO)
x$PROD_XPROD_LIMPO <- tolower(x$PROD_XPROD_LIMPO)
x$PROD_XPROD_LIMPO <- as.factor(x$PROD_XPROD_LIMPO)
#x$CPROD_CERVEJA_SEFAZ <- as.factor(x$CPROD_CERVEJA_SEFAZ)
return(x)
}
|
71bb614dcd2a3a4e7efb63f42ca16b711e99e793
|
85cbc4bd64a653bd3acc21a994620968a9ef659e
|
/notebooks/exploratory/DESeq2Analysis copy.R
|
4de4ecd5c52dbf675e2e69cdd83a9f4baea8c602
|
[] |
no_license
|
TaniaJes/module-4
|
12687a3a1b5e8a82b4af7d36cbc7a6f35d149dd2
|
10bb6c88a9d696339d42654a471a6d5852d2adda
|
refs/heads/main
| 2023-01-05T21:00:59.253392
| 2020-11-01T20:09:05
| 2020-11-01T20:09:05
| 308,375,911
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,381
|
r
|
DESeq2Analysis copy.R
|
##########################################################################################
# This script performs differential gene expression analysis with DESeq2 #
##########################################################################################
### LOAD REQUIRED LIBRARIES
library("DESeq2")
library("org.Hs.eg.db")
library("AnnotationDbi")
library("ggplot2")
library("ashr") # library for type = "ashr" object used to generate DEseq results object
library("gridExtra")
library("plotly")
library("pheatmap")
library("EnhancedVolcano")
library("RColorBrewer")
library("biomaRt")
### SET WORKING DIRECTORY
# note: this directory should be populated with the raw counts file
setwd("~/NYU-classes/bioinformatics/module-4/data")
### Import count table and details on experimental design
# NB: Make sure column names in the sample(table) file and counts file are exactly the same and in the same order
samples <- read.table("~/NYU-classes/bioinformatics/module-4/data/sample_info.txt", header = TRUE)
featCounts <- read.table("~/NYU-classes/bioinformatics/module-4/data/featureCounts_output/geneCounts-output.txt", header = T, row.names = 1)
featCounts <- featCounts[, rownames(samples)] # column reordering to match samples order
Dataset <- DESeqDataSetFromMatrix(countData = featCounts, colData = samples, design = ~batch + condition)
# Note: Always end with conditions for 'design' variable
### PRELIMINARY ANALYSES ###
# The first steps in your analysis should focus on better understanding the relationship of the datasets being studied.
# This can be simply achieved by generating a PCA plot showing the relationship of your samples.
# First we transform our raw count data using a variance stabilizing transformation (VST) that roughly mirrors how DeSeq2 models the data.
vsd1 <- varianceStabilizingTransformation(Dataset, blind=FALSE)
# Then we plot a PCA, grouping and coloring our datasets according to batch
plotPCA(vsd1, "condition")
### note that you can attach additional information based on the column headers in your sample table
plotPCA(vsd1, c("condition","batch"))
# we can also attempt to replicate the batch effect correction performed by DeSeq2 using the limma::removeBatchEffect function
vsd2 <- varianceStabilizingTransformation(Dataset, blind=FALSE)
assay(vsd2) <- limma::removeBatchEffect(assay(vsd2), vsd2$batch)
plotPCA(vsd2, "condition")
|
9975b5014bb574c4cfbd3c8f93a77c6b30c7be07
|
4c51fdcafa3b1a3d044face76ce0c51683ddf6b0
|
/R/simulation_multi_genes.R
|
e526a0a2f9aaf5ffdabea42aee37b4500e56fda9
|
[
"MIT"
] |
permissive
|
JiayuSuPKU/JointModelQTL
|
ae8fe5a1cd20f56c92eff8b807e204bae5e26434
|
0deb158845ae4add8cdecccfcbc34b8861915d77
|
refs/heads/main
| 2023-04-21T09:35:46.640387
| 2021-05-26T14:11:09
| 2021-05-26T14:11:09
| 357,774,407
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,921
|
r
|
simulation_multi_genes.R
|
#' Simulate read for multiple genes
#'
#' This function simulates read counts data from negative binomial (total read counts)
#' and beta-binomial (allele-specific read counts) distributions for multiple genes
#'
#' J>=1, K=1, L=1
#'
#' \code{T ~ NB(mu, phi)}
#' \code{A_alt ~ BB(T * prob_as := N_as, prob_alt, theta)}
#'
#' @param n_i Numbet of samples
#' @param n_j Number of genes
#' @param gene_pars A list of length \code{n_j} or \code{1} specifying
#' gene-level statistics. Each element should be a list containing parameters
#' used in \code{\link{simulateReadCounts.s}}.
#' If \code{length(gene_pars) == 1} then all \code{n_j} genes share the same set
#' of parameters.
#'
#' @importFrom extraDistr rbbinom
#' @importFrom stats rbinom rnbinom runif
#'
#' @export
simulateReadCounts.m <- function(n_i, n_j, gene_pars) {
if (length(gene_pars) == 1) {
gene_pars <- rep(gene_pars, n_j)
}
# total read counts
trc <- sapply(gene_pars, function(l) {
return(rnbinom(n = n_i, mu = l[["mu"]], size = l[["phi"]]))
})
# allele-specific read counts
prob_as <- sapply(gene_pars, function(l) {
return(l[["prob_as"]])
})
n_as <- round(t(t(trc) * prob_as))
# alternative allele read counts
a_alt <- sapply(1:n_j, function(j) {
prob_alt <- gene_pars[[j]][["prob_alt"]]
theta <- gene_pars[[j]][["theta"]]
return(
rbbinom(
n = n_i, size = n_as[, j],
alpha = prob_alt * (theta - 1),
beta = (1 - prob_alt) * (theta - 1)
)
)
})
return(list(TotalReadCounts = trc, RefCounts = n_as - a_alt, AltCounts = a_alt))
}
#' Simulate genotype for multi-to-one gene-snp pairs
#'
#' This function simulates genotype G and phasing P for multi-to-one gene-snp pairs
#'
#' J>=1, K=1, L=1
#'
#' @param n_i Number of samples
#' @param n_j Number of genes
#' @param maf Minor allele frequency of the test snp
#' @param prob_ref A vector specifying the probability of the test snp on the
#' ref allele of each gene if the test snp is heterogeneous.
#' If \code{length(prob_ref) == 1} then all genes share the same \code{prob_ref}
#'
#' @export
simulateGenotype.m2s <- function(n_i, n_j, maf, prob_ref) {
if (length(prob_ref) == 1) {
prob_ref <- rep(prob_ref, n_j)
}
maf <- ifelse(maf <= 0.5, maf, 1 - maf)
# simulate two loci
h1 <- rbinom(n = n_i, size = 1, prob = maf)
h2 <- rbinom(n = n_i, size = 1, prob = maf)
# G: n_i by 1, genotype matrix
G <- h1 + h2
# P: n_i by n_j, phasing matrix
P <- sapply(prob_ref, function(x) {
is_cis <- (runif(n_i) <= x) * 2 - 1
return(ifelse(G == 1, 1, 0) * is_cis)
})
return(list(Genotype = G, Phasing = P))
}
#' Simulate cis-regulatory effect for multi-to-one gene-snp pairs
#'
#' This function simulates genotype G, phasing P, and expression
#' (total and allele-specific) profiles for multi-to-one gene-snp pairs
#'
#' J>=1, K=1, L=1
#'
#' @inheritParams simulateReadCounts.m
#' @inheritParams simulateGenotype.m2s
#' @param gene_pars A list of length \code{n_j} or \code{1} specifying
#' gene-level statistics. Each element should be a list containing parameters
#' used in \code{\link{simulateCisEffect.s2s}}, including phi, prob_as, theta,
#' baseline, and r. If \code{length(gene_pars) == 1} then all \code{n_j} genes
#' share the same set of parameters.
#'
#' @export
simulateCisEffect.m2s <- function(n_i, n_j, maf, prob_ref, gene_pars) {
if (length(gene_pars) == 1) {
gene_pars <- rep(gene_pars, n_j)
}
# simulate genotype
meta <- simulateGenotype.m2s(n_i = n_i, n_j = n_j, maf = maf, prob_ref = prob_ref)
gene_pars_mu <- list()
for (j in 1:n_j) {
phi <- gene_pars[[j]][["phi"]]
theta <- gene_pars[[j]][["theta"]]
prob_as <- gene_pars[[j]][["prob_as"]]
baseline <- gene_pars[[j]][["baseline"]]
r <- gene_pars[[j]][["r"]]
# simulate genetic effects on total read counts
log_mu <- baseline + ifelse(meta$Genotype == 1, log(1 + exp(r)) - log(2), 0) +
ifelse(meta$Genotype == 2, r, 0)
# simulate genetic effects on allelic imbalance
logit_prob_alt <- meta$Phasing[, j] * r
gene_pars_mu <- append(gene_pars_mu, list(list(
mu = exp(log_mu), phi = phi, prob_as = prob_as,
prob_alt = softmax(logit_prob_alt), theta = theta
)))
}
# simulate expression profiles
Y <- simulateReadCounts.m(n_i = n_i, n_j = n_j, gene_pars = gene_pars_mu)
sim <- list(
genotype_pars = list(
n_i = n_i,
maf = maf,
prob_ref = prob_ref
),
gene_pars = gene_pars,
data = list(
I = n_i,
J = n_j,
P = meta$Phasing,
G = meta$Genotype,
T = Y$TotalReadCounts,
log1p_T = log1p(Y$TotalReadCounts),
A_ref = Y$RefCounts,
A_alt = Y$AltCounts,
Is_ase_het = as.matrix(Y$RefCounts * Y$AltCounts != 0) * 1
)
)
sim$data$logit_pi_alt <- ifelse(
!sim$data$Is_ase_het, 0, log(sim$data$A_alt / sim$data$A_ref)
)
return(sim)
}
|
50df26289c6f0e1abc3bede98f4629896d679aee
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/inbreedR/examples/simulate_r2_hf.Rd.R
|
0ba43c8f9eae280702381364f85ee867b89214ec
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 463
|
r
|
simulate_r2_hf.Rd.R
|
library(inbreedR)
### Name: simulate_r2_hf
### Title: Calculates the expected squared correlation between
### heteorzygosity and inbreeding for simulated marker sets
### Aliases: simulate_r2_hf
### ** Examples
data(mouse_msats)
genotypes <- convert_raw(mouse_msats)
sim_r2 <- simulate_r2_hf(n_ind = 10, H_nonInb = 0.5, meanF = 0.2, varF = 0.03,
subsets = c(4,6,8,10), reps = 100,
type = "msats")
plot(sim_r2)
|
102890852ea10bdf3d50bf5375aa8e683ae660e2
|
b9b32fcdfd3f2387cc85e2690ee610010dfdf930
|
/man/clear.labels.Rd
|
7fb64ab32738b4870420047ada87df6e69b99988
|
[] |
no_license
|
jfontestad/danMisc
|
cc49fb89240f128c6a055c8fe65c238af68d0cfc
|
f3dccbe0a8f9415c268b633e8985aa5a3b1c45df
|
refs/heads/master
| 2022-03-06T07:28:55.044264
| 2019-11-23T10:23:59
| 2019-11-23T10:23:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 358
|
rd
|
clear.labels.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.r
\name{clear.labels}
\alias{clear.labels}
\title{supprime tous les labels de HMisc pour eviter les problemes avec dplyr}
\usage{
clear.labels(x)
}
\arguments{
\item{x}{data.frame}
}
\description{
supprime tous les labels de HMisc pour eviter les problemes avec dplyr
}
|
66d1d48ef954b863878cd672563f99378968cec4
|
1dc0ab4e2b05001a5c9b81efde2487f161f800b0
|
/experiments/keel/noisy/cn/cn_sonar.R
|
ac4f3b7cf0387910c7dfb4dcb9839215641a6f10
|
[] |
no_license
|
noeliarico/knnrr
|
efd09c779a53e72fc87dc8c0f222c0679b028964
|
9f6592d1bbc1626b2ea152fbd539acfe9f9a5ab3
|
refs/heads/master
| 2020-06-01T02:44:34.201881
| 2020-03-13T13:30:52
| 2020-03-13T13:30:52
| 190,601,477
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,204
|
r
|
cn_sonar.R
|
# Sonar -------------------------------------------------------------------
sonar_5cn10_nc_5_1tra <- read.keel("data/keel/noisy/cn/sonar-5cn10-nc/sonar-5cn10-nc-5-1tra.dat")
sonar_5cn10_nc_5_1tst <- read.keel("data/keel/noisy/cn/sonar-5cn10-nc/sonar-5cn10-nc-5-1tst.dat")
sonar_5cn10_nc_5_2tra <- read.keel("data/keel/noisy/cn/sonar-5cn10-nc/sonar-5cn10-nc-5-2tra.dat")
sonar_5cn10_nc_5_2tst <- read.keel("data/keel/noisy/cn/sonar-5cn10-nc/sonar-5cn10-nc-5-2tst.dat")
sonar_5cn10_nc_5_3tra <- read.keel("data/keel/noisy/cn/sonar-5cn10-nc/sonar-5cn10-nc-5-3tra.dat")
sonar_5cn10_nc_5_3tst <- read.keel("data/keel/noisy/cn/sonar-5cn10-nc/sonar-5cn10-nc-5-3tst.dat")
sonar_5cn10_nc_5_4tra <- read.keel("data/keel/noisy/cn/sonar-5cn10-nc/sonar-5cn10-nc-5-4tra.dat")
sonar_5cn10_nc_5_4tst <- read.keel("data/keel/noisy/cn/sonar-5cn10-nc/sonar-5cn10-nc-5-4tst.dat")
sonar_5cn10_nc_5_5tra <- read.keel("data/keel/noisy/cn/sonar-5cn10-nc/sonar-5cn10-nc-5-5tra.dat")
sonar_5cn10_nc_5_5tst <- read.keel("data/keel/noisy/cn/sonar-5cn10-nc/sonar-5cn10-nc-5-5tst.dat")
change <- colnames(sonar_5cn10_nc_5_1tra)[colnames(sonar_5cn10_nc_5_1tra) != "Type"]
sonar_5cn10_nc_5_1tra <- sonar_5cn10_nc_5_1tra %>% mutate_at(change, function(x) {as.numeric(as.character(x))})
sonar_5cn10_nc_5_1tst <- sonar_5cn10_nc_5_1tst %>% mutate_at(change, function(x) {as.numeric(as.character(x))})
sonar_5cn10_nc_5_2tra <- sonar_5cn10_nc_5_2tra %>% mutate_at(change, function(x) {as.numeric(as.character(x))})
sonar_5cn10_nc_5_2tst <- sonar_5cn10_nc_5_2tst %>% mutate_at(change, function(x) {as.numeric(as.character(x))})
sonar_5cn10_nc_5_3tra <- sonar_5cn10_nc_5_3tra %>% mutate_at(change, function(x) {as.numeric(as.character(x))})
sonar_5cn10_nc_5_3tst <- sonar_5cn10_nc_5_3tst %>% mutate_at(change, function(x) {as.numeric(as.character(x))})
sonar_5cn10_nc_5_4tra <- sonar_5cn10_nc_5_4tra %>% mutate_at(change, function(x) {as.numeric(as.character(x))})
sonar_5cn10_nc_5_4tst <- sonar_5cn10_nc_5_4tst %>% mutate_at(change, function(x) {as.numeric(as.character(x))})
sonar_5cn10_nc_5_5tra <- sonar_5cn10_nc_5_5tra %>% mutate_at(change, function(x) {as.numeric(as.character(x))})
sonar_5cn10_nc_5_5tst <- sonar_5cn10_nc_5_5tst %>% mutate_at(change, function(x) {as.numeric(as.character(x))})
class_index <- 61
sonar_5cn10_nc_5_1 <- noelia_train(sonar_5cn10_nc_5_1tra, sonar_5cn10_nc_5_1tst, class_index)
sonar_5cn10_nc_5_2 <- noelia_train(sonar_5cn10_nc_5_2tra, sonar_5cn10_nc_5_2tst, class_index)
sonar_5cn10_nc_5_3 <- noelia_train(sonar_5cn10_nc_5_3tra, sonar_5cn10_nc_5_3tst, class_index)
sonar_5cn10_nc_5_4 <- noelia_train(sonar_5cn10_nc_5_4tra, sonar_5cn10_nc_5_4tst, class_index)
sonar_5cn10_nc_5_5 <- noelia_train(sonar_5cn10_nc_5_5tra, sonar_5cn10_nc_5_5tst, class_index)
sonar_5cn10_nc <- bind_rows(sonar_5cn10_nc_5_1,
sonar_5cn10_nc_5_2,
sonar_5cn10_nc_5_3,
sonar_5cn10_nc_5_4,
sonar_5cn10_nc_5_5,) %>%
# evaluate following calls for each value in the rowname column
group_by(k, method, type) %>%
# add all non-grouping variables
summarise_if(is.numeric, mean, na.rm = FALSE) %>%
ungroup()
compare_metric_noisy("sonar_5cn10_nc", metric = "F1", input = "numerical")
|
add79c93218f58ddfa090c4361eb2bac40ab8544
|
1e81deb64a22c92d6cd53842ae7d8be0c3e9d49b
|
/2021-08-24_lemurs/01-get-data.R
|
3cbdef318344d5e296233ac1bf110f520198b702
|
[
"BSD-2-Clause"
] |
permissive
|
jmcastagnetto/tidytuesday-kludges
|
5bfea5ccd4640df4e9e5367794dbb09594ac32a3
|
13dcb24694acff3839a7e1322d725e80bb146ae0
|
refs/heads/main
| 2023-04-07T04:33:13.715619
| 2023-03-29T03:46:12
| 2023-03-29T03:46:12
| 193,815,379
| 9
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 322
|
r
|
01-get-data.R
|
library(readr)
taxons <- read_csv("https://github.com/rfordatascience/tidytuesday/raw/master/data/2021/2021-08-24/taxonomy.csv")
lemurs <- read_csv("https://github.com/rfordatascience/tidytuesday/raw/master/data/2021/2021-08-24/lemur_data.csv")
save(
taxons,
lemurs,
file = "2021-08-24_lemurs/lemurs-data.Rdata"
)
|
d05cc7672816b013d5c518957091c185c4240f71
|
17cabbd6156cc0ab06c3970b06bbad61e984f698
|
/R/ca.R
|
786d3d2c9a865189465aeb587d0c3a8769b98b4b
|
[] |
no_license
|
cran/visae
|
ff51ea4ecedf9ce63f11101277882574017db6c2
|
7dc5b7997c4e76c7a5905b7ed63fec924382412c
|
refs/heads/master
| 2023-08-29T15:01:06.887431
| 2021-11-10T22:40:02
| 2021-11-10T22:40:02
| 334,227,668
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,210
|
r
|
ca.R
|
#'Correspondence Analysis of Adverse Events
#'@param data data.frame or tibble object.
#'@param id unquoted expression indicating the
#'variable name in \code{data} that corresponds to the id variable.
#'@param group unquoted expression indicating the
#'variable name in \code{data} that corresponds to the group variable.
#'@param ae_class unquoted expression indicating the
#'variable name in \code{data} that corresponds to AE class.
#'@param label character value indicating the
#'column name of AE class in resulting tables.
#'@param contr_indicator logical value indicating the
#'use of color intensity to represent the maximum contribution of each \code{ae_class}.
#'@param mass_indicator logical value indicating the
#'use of dot size to represent the overall relative frequency of each \code{ae_class}.
#'@param contr_threshold numerical value between 0 an 1 filtering
#'\code{ae_class} with contribution greater than \code{contr_threshold}.
#'@param mass_threshold numerical value between 0 an 1 filtering
#'\code{ae_class} with mass greater than \code{mass_threshold}.
#'
#'@return a list of
#'\item{tab_abs}{a tibble showing absolute frequency of \code{ae_class} by \code{group};}
#'\item{tab_rel}{a tibble showing percent of \code{ae_class} by \code{group};}
#'\item{total_inertia}{a numerical value indicating the total inertia;}
#'\item{tab_inertia}{a tibble showing inertia broken down by dimension and the percent relative to the total inertia;}
#'\item{asymmetric_plot}{a contribution biplot.}
#'
#'@references Levine RA, Sampson E, Lee TC. Journal of Computational and Graphical Statistics. Wiley Interdisciplinary Reviews: Computational Statistics. 2014 Jul;6(4):233-9.
#'
#'@examples
#'library(magrittr)
#'library(dplyr)
#'
#'id <- rep(1:50, each = 2)
#'group <- c(rep("A", 50), rep("B", 50))
#'ae_grade <- sample(1:5, size = 100, replace = TRUE)
#'ae_domain <- sample(c("D", "E"), size = 100, replace = TRUE)
#'ae_term <- sample(c("F", "G", "H", "I"), size = 100, replace = TRUE)
#'df <- tibble(id = id, trt = group,
#' ae_g = ae_grade, ae_d = ae_domain, ae_t = ae_term)
#'test <- df %>% ca_ae(., id = id, group = trt, ae = ae_g, label = "AE",
#' contr_indicator = TRUE, mass_indicator = TRUE,
#' contr_threshold = 0.01, mass_threshold = 0.01)
#'
#'@import magrittr
#'@import ggplot2
#'@import dplyr
#'@importFrom rlang .data enquos :=
#'@importFrom tidyr pivot_wider separate
#'@importFrom ca ca
#'@importFrom stats addmargins
#'@importFrom ggrepel geom_label_repel
#'
#'@export
ca_ae <- function(data, id, group, ae_class, label = "AE",
contr_indicator = TRUE, mass_indicator = TRUE,
contr_threshold = NULL, mass_threshold = NULL) {
temp <- enquos(group = group,
ae = ae_class, id = id,
.ignore_empty = "all")
aux <- data %>% select(!!!temp) %>% na.exclude() %>%
distinct(id, .data$ae, .keep_all = TRUE)
total <- data %>% select(!!!temp) %>%
distinct(id, .keep_all = TRUE) %>% count(group)
tab <- table(aux$ae, aux$group)
p <- t(t(tab)/as.numeric(total$n))
q <- 1 - p
rownames(q) <- paste0(rownames(q), "_C")
tab.ca <- rbind(p, q)
res.ca <- ca(tab.ca)
names(dimnames(p)) <- c("ae", "group")
average <- round(100*rowMeans(p), 3)
tab_rel <- round(100*p, 3) %>% as_tibble() %>%
pivot_wider(names_from = .data$group, values_from = .data$n) %>%
mutate(Average = average)
if (is.null(contr_threshold))
contr_threshold <- 1/nrow(tab)
if (is.null(mass_threshold))
mass_threshold <- 1/nrow(tab)
expected_threshold <- 1/nrow(tab)
names(dimnames(tab)) <- c("ae", "group")
tab_abs <- tab %>% as_tibble() %>%
pivot_wider(names_from = .data$group, values_from = .data$n)
inertia <- res.ca$sv^2
total_inertia <- sum(inertia)
explained_var <- 100*inertia/total_inertia
tab_inertia = tibble(Dimension = 1:length(inertia),
Inertia = inertia,
'Explained Variance' = explained_var)
if (ncol(tab_abs) < 4){
aux <- res.ca$rowcoord*sqrt(res.ca$rowmass)
contr <- round(100*(res.ca$rowcoord*sqrt(res.ca$rowmass))^2, 2)
tab_contr <- as_tibble(contr, rownames = "labels") %>%
separate(labels, into = c("ae", "delete"),
sep = "_", fill = "right") %>%
group_by(.data$ae) %>%
summarise(across(starts_with("Dim"), sum, .names = "{col}"),
.groups = "drop_last")
colnames(tab_contr)[-1] <- paste0("Dim ", 1:ncol(aux))
standard.coordinates.row <-
as_tibble(aux, rownames = "labels") %>%
separate(labels, into = c("labels", "delete"),
sep = "_", fill = "right") %>%
filter(is.na(.data$delete)) %>% select(-.data$delete) %>%
mutate(type = "row",
contr = tab_contr[[2]]/100,
mass = average/100) %>%
filter(.data$contr > contr_threshold & .data$mass > mass_threshold)
colnames(standard.coordinates.row)[2] <- "dim_1"
group_mass <- ifelse(is.finite(min(standard.coordinates.row$mass, na.rm = TRUE)) &
is.finite(max(standard.coordinates.row$mass, na.rm = TRUE)),
(min(standard.coordinates.row$mass, na.rm = TRUE) +
max(standard.coordinates.row$mass, na.rm = TRUE))/2,
ifelse(is.finite(max(standard.coordinates.row$mass, na.rm = TRUE)),
0.5*max(standard.coordinates.row$mass, na.rm = TRUE),
ifelse(is.finite(min(standard.coordinates.row$mass, na.rm = TRUE)),
1.5*min(standard.coordinates.row$mass, na.rm = TRUE), 0.5)))
principal.coordinates.col <-
tibble(dim_1 = as.numeric(res.ca$colcoord*res.ca$sv)) %>% #
mutate(labels = rownames(res.ca$colcoord),
type = "col", contr = 1, mass = group_mass)
selected_classes <- as.character(standard.coordinates.row$labels)
if (nrow(standard.coordinates.row) > 0)
standard.coordinates.row <- standard.coordinates.row %>%
mutate(contr = .data$contr/max(.data$contr))
dp <- bind_rows(principal.coordinates.col, standard.coordinates.row)
if (mass_indicator & contr_indicator){
asymmetric_plot <- ggplot(dp, aes(x = .data$dim_1, y = NA,
color = .data$type,
alpha = .data$contr,
size = .data$mass))
} else if (mass_indicator & !contr_indicator){
asymmetric_plot <- ggplot(dp, aes(x = .data$dim_1,
y = NA,
color = .data$type,
size = .data$mass))
} else if (!mass_indicator & contr_indicator) {
asymmetric_plot <- ggplot(dp, aes(x = .data$dim_1,
y = NA,
color = .data$type,
alpha = .data$contr))
} else {
asymmetric_plot <- ggplot(dp, aes(x = .data$dim_1,
y = NA,
color = .data$type))
}
asymmetric_plot <- asymmetric_plot +
geom_vline(xintercept = 0, linetype = 2) +
geom_point() +
geom_label_repel(aes(label = .data$labels),
xlim = c(-Inf, Inf), ylim = c(-Inf, Inf),
min.segment.length = 0) +
scale_colour_manual(values = c("red", "blue")) +
labs(x = paste0("Dim 1 ", "(", round(explained_var[1], 2), "%)")) +
theme_minimal() +
theme(
legend.position = "none",
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.title.y = element_blank(),
text = element_text(size = 20)) +
scale_size_continuous(range = c(3, 6)) +
scale_alpha_continuous(range = c(0.3, 1))
temp <- round(100*(res.ca$rowcoord*sqrt(res.ca$rowmass))^2, 2)
tab_contr <- as_tibble(temp, rownames = "ae") %>%
separate(.data$ae, into = c("ae", "delete"),
sep = "_", fill = "right") %>%
group_by(.data$ae) %>%
summarize(across(starts_with("Dim"), sum, .names = "{col}"))
colnames(tab_contr)[-1] <- "Dim 1"
} else {
aux <- res.ca$rowcoord*sqrt(res.ca$rowmass)
colnames(aux) <- paste0("dim_", 1:ncol(aux))
contr <- round(100*(res.ca$rowcoord*sqrt(res.ca$rowmass))^2, 2)
tab_contr <- as_tibble(contr, rownames = "labels") %>%
separate(labels, into = c("ae", "delete"),
sep = "_", fill = "right") %>%
group_by(.data$ae) %>%
summarise(across(starts_with("Dim"), sum, .names = "{col}"),
.groups = "drop_last")
colnames(tab_contr)[-1] <- paste0("Dim ", 1:ncol(aux))
standard.coordinates.row <-
as_tibble(aux, rownames = "labels") %>%
separate(labels, into = c("labels", "delete"),
sep = "_", fill = "right") %>%
filter(is.na(.data$delete)) %>% select(-.data$delete) %>%
mutate(type = "row",
contr = pmax(tab_contr[[2]]/100, tab_contr[[3]]/100),
mass = average/100) %>%
filter(.data$contr > contr_threshold & .data$mass > mass_threshold)
selected_classes <- as.character(standard.coordinates.row$labels)
group_mass <- ifelse(is.finite(min(standard.coordinates.row$mass, na.rm = TRUE)) &
is.finite(max(standard.coordinates.row$mass, na.rm = TRUE)),
(min(standard.coordinates.row$mass, na.rm = TRUE) +
max(standard.coordinates.row$mass, na.rm = TRUE))/2,
ifelse(is.finite(max(standard.coordinates.row$mass, na.rm = TRUE)),
0.5*max(standard.coordinates.row$mass, na.rm = TRUE),
ifelse(is.finite(min(standard.coordinates.row$mass, na.rm = TRUE)),
1.5*min(standard.coordinates.row$mass, na.rm = TRUE), 0.5)))
aux <- res.ca$colcoord%*%diag(res.ca$sv)
colnames(aux) <- paste0("dim_", 1:ncol(aux))
principal.coordinates.col <-
as_tibble(aux) %>%
mutate(labels = rownames(res.ca$colcoord),
type = "col",
contr = 1, mass = group_mass)
if (nrow(standard.coordinates.row) > 0)
standard.coordinates.row <- standard.coordinates.row %>%
mutate(contr = .data$contr/max(.data$contr))
dp <- bind_rows(principal.coordinates.col, standard.coordinates.row)
if (mass_indicator & contr_indicator){
asymmetric_plot <- ggplot(dp, aes(x = .data$dim_1,
y = .data$dim_2,
color = .data$type,
alpha = .data$contr,
size = .data$mass))
} else if (mass_indicator & !contr_indicator){
asymmetric_plot <- ggplot(dp, aes(x = .data$dim_1, y = .data$dim_2,
color = .data$type,
size = .data$mass))
} else if (!mass_indicator & contr_indicator){
asymmetric_plot <- ggplot(dp, aes(x = .data$dim_1,
y = .data$dim_2,
color = .data$type,
alpha = .data$contr))
} else {
asymmetric_plot <- ggplot(dp, aes(x = .data$dim_1,
y = .data$dim_2,
color = .data$type))
}
asymmetric_plot <- asymmetric_plot +
geom_hline(yintercept = 0, linetype = 2) +
geom_vline(xintercept = 0, linetype = 2) +
geom_point() +
geom_label_repel(aes(label = .data$labels),
xlim = c(-Inf, Inf), ylim = c(-Inf, Inf),
min.segment.length = 0) +
scale_colour_manual(values = c("red", "blue")) +
labs(x = paste0("Dim 1 ", "(", round(explained_var[1], 2), "%)"),
y = paste0("Dim 2 ", "(", round(explained_var[2], 2), "%)")) +
theme_minimal() +
theme(legend.position = "none",
text = element_text(size = 20))+
scale_size_continuous(range = c(3, 6)) +
scale_alpha_continuous(range = c(0.3, 1))
}
tab_rel <- tab_rel %>% filter(.data$ae %in% selected_classes) %>%
rename(!!label := .data$ae) %>%
mutate(across(where(is.numeric), ~ format(.x, digits = 2, nsmall = 2)))
colnames(tab_rel)[-c(1, ncol(tab_rel))] <-
paste0(colnames(tab_rel)[-c(1, ncol(tab_rel))], "<br> (n = ", total$n, ")")
tab_contr <- tab_contr %>% filter(.data$ae %in% selected_classes) %>%
rename(!!label := .data$ae)
out <- list(tab_abs = tab_abs, tab_rel = tab_rel,
total_inertia = total_inertia,
tab_inertia = tab_inertia,
tab_contr = tab_contr,
asymmetric_plot = asymmetric_plot)
return(out)
}
|
b61ef76db6562fe6b744073b943aa41e410aa623
|
e9fc4d886ca490bc8c0537ca7ef6ede2ef7c7f78
|
/man/ateRobust.Rd
|
abbd435595b6cab88c4079c0042ed5d006f20775
|
[] |
no_license
|
bozenne/riskRegressionLight
|
b6f06a2f1d4af13a5a1e4b1ce5a68ea539ce2faa
|
0d3fff3062876935e478a84b1e361614a9d851e3
|
refs/heads/master
| 2020-05-19T19:56:26.426169
| 2019-05-06T13:50:59
| 2019-05-06T13:50:59
| 185,192,171
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,679
|
rd
|
ateRobust.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ateRobust.R
\name{ateRobust}
\alias{ateRobust}
\title{Average Treatment Effects (ATE) for survival outcome (with competing risks) using doubly robust estimating equations}
\usage{
ateRobust(data, times, cause, type, formula.event, formula.censor,
formula.treatment, fitter = "coxph", product.limit = NULL,
se = TRUE, augment.cens = TRUE, na.rm = FALSE)
}
\arguments{
\item{data}{[data.frame or data.table] Data set in which to evaluate the ATE.}
\item{times}{[numeric] Time point at which to evaluate average treatment effects.}
\item{cause}{[numeric/character] The cause of interest. Defaults to the first cause.}
\item{type}{[character] When set to \code{"survival"} uses a cox model for modeling the survival,
otherwise when set to \code{"competing.risks"} uses a Cause Specific Cox model for modeling the absolute risk of the event.}
\item{formula.event}{[formula] Cox model for the event of interest (outcome model).
Typically \code{Surv(time,event)~treatment}.}
\item{formula.censor}{[formula] Cox model for the censoring (censoring model).
Typically \code{Surv(time,event==0)~treatment}.}
\item{formula.treatment}{[formula] Logistic regression for the treatment (propensity score model).
Typically \code{treatment~1}.}
\item{fitter}{[character] Routine to fit the Cox regression models.
If \code{coxph} use \code{survival::coxph} else use \code{rms::cph}.}
\item{product.limit}{[logical] If \code{TRUE} the survival is computed using the product limit method.
Otherwise the exponential approximation is used (i.e. exp(-cumulative hazard)).}
\item{se}{[logical] If \code{TRUE} compute and add the standard errors relative to the G-formula and IPTW method to the output.}
\item{augment.cens}{[logical] If \code{TRUE} add an censoring model augmentation term to the estimating equation}
\item{na.rm}{[logical] If \code{TRUE} ignore observations whose influence function is NA.}
}
\description{
Compute the average treatment effect using different methods:
G-formula based on (cause-specific) Cox regression, inverse probability of treatment weighting (IPTW)
combined with inverse probability of censoring weighting (IPCW), augmented inverse probability weighting (AIPTW, AIPCW).
}
\details{
The standard errors/confindence intervals/p-values output by ateRobust
do not account for the uncertainty related to the estimation of the parameters of the censoring model (only relevant for IPCW/AIPCW estimators).
Note that for the AIPTW, this uncertainty is neglectable (i.e. o_p(n^{-1/2})) in correctly specified models.
}
\examples{
library(survival)
library(lava)
library(data.table)
library(prodlim)
set.seed(10)
# survival outcome, binary treatment X1
ds <- sampleData(101,outcome="survival")
out <- ateRobust(data = ds, type = "survival",
formula.event = Surv(time, event) ~ X1+X6,
formula.censor = Surv(time, event==0) ~ X6,
formula.treatment = X1 ~ X6+X2+X7, times = 1)
out
dt.out=as.data.table(out)
dt.out
# competing risk outcome, binary treatment X1
dc=sampleData(101,outcome="competing.risks")
x=ateRobust(data = dc, type = "competing.risks",
formula.event = list(Hist(time, event) ~ X1+X6,Hist(time, event) ~ X6),
formula.censor = Surv(time, event==0) ~ X6,
formula.treatment = X1 ~ X6+X2+X7, times = 1,cause=1,
product.limit = FALSE)
## compare with g-formula
fit= CSC(list(Hist(time, event) ~ X1+X6,Hist(time, event) ~ X6),data=dc)
ate(fit,data = dc,treatment="X1",times=1,cause=1)
x
as.data.table(x)
}
\seealso{
\code{\link{ate}} for the g-formula result in case of more than 2 treatments
}
|
e861ca000662343f6c7a6090520debea38064900
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609889430-test.R
|
f5cf0263b4c2097a272406f35cffb4e5af5c03f9
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 108
|
r
|
1609889430-test.R
|
testlist <- list(type = 0L, z = 4.84176071611214e-305)
result <- do.call(esreg::G1_fun,testlist)
str(result)
|
631c4c2a0003219abaed573fd13a74b7cf703a0a
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615848961-test.R
|
13189247daa41368dcdcf21edc686ab09df40881
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 439
|
r
|
1615848961-test.R
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = c(3.19860037215742e+129, -1.22227646714106e-150, -2.48280557433659e+258, -9.13799141996196e-296, -1.88918554334287e+52, -4.11215093765371e-273, 1.93031268583159e-314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = numeric(0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
aba37ca81ffd509b82bbcaee513153b2d85f4b75
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/cgraph/examples/const.Rd.R
|
e01dae360a820b62e93d5136fa2187c4369664d0
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 236
|
r
|
const.Rd.R
|
library(cgraph)
### Name: const
### Title: Add Constant
### Aliases: const
### ** Examples
# Initialize a new computational graph.
x <- cgraph$new()
# Add a constant with value 1 and name 'c' to the graph.
const(1, name = "c")
|
7fa771f715cf47865f6013ef078f180194399913
|
aaa2fa57565a8689d15eb60fc564f44d3e8b8b1f
|
/Tender_Exploratory Analysis.R
|
0ed2d4c79372662c4b8b0060823b069a438a3694
|
[] |
no_license
|
cocaangle/Sam-s-club-Cusomter-Membership-renewal
|
f33c900296c5b4dcc6931eea5e24870d1a6e45ec
|
f3684768b954ae01979c6f74b681907d0920779a
|
refs/heads/master
| 2020-05-07T01:05:17.829433
| 2019-04-14T04:54:17
| 2019-04-14T04:54:17
| 180,258,990
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,421
|
r
|
Tender_Exploratory Analysis.R
|
#load packages
library(data.table)
library(caret)
library(dplyr)
library(ggplot2)
#import and read data
pos=fread("pos.txt")
tender=fread("tender_type.txt")
member=fread('members.txt')
dmm=fread("dmm_gmm.txt")
#Split training and testing dataset(1:1)
set.seed(5)
member_sample=member[sample(nrow(member), 20000), ]
member_train=member_sample[1:10000,]
member_test=member_sample[10001:20000,]
pos_train=pos[which(pos$MEMBERSHIP_ID %in% member_train$MEMBERSHIP_ID ),]
pos_test=pos[which(pos$MEMBERSHIP_ID %in% member_test$MEMBERSHIP_ID ),]
tender_train_related=tender[which(tender$VISIT_NBR %in% pos_train$VISIT_NBR),]
data=left_join(tender_train_related, pos_train)
data2=data[,1:7]
uni=unique(data2)
tender_train=na.omit(uni)
tender_test_related=tender[which(tender$VISIT_NBR %in% pos_test$VISIT_NBR),]
data=left_join(tender_test_related, pos_test)
data2=data[,1:7]
uni=unique(data2)
tender_test=na.omit(uni)
#Write to csv
write.csv(member_train, 'member_train.csv', row.names = FALSE)
write.csv(member_test, 'member_test.csv', row.names = FALSE)
write.csv(pos_train, 'pos_train.csv', row.names = FALSE)
write.csv(pos_test, 'pos_test.csv', row.names = FALSE)
write.csv(tender_train, 'tender_train.csv', row.names = FALSE)
write.csv(tender_test, 'tender_test.csv', row.names = FALSE)
#Explore for tender data
#Total AMT VS Tender_Type
tender_AMT= tender_train %>% group_by(TENDER_TYPE_DESC) %>%summarise(totalAMT = sum(TENDER_AMT))%>%arrange(desc(totalAMT))
top_tender=top_n(tender_AMT,8)
ggplot(top_tender, aes(x=TENDER_TYPE_DESC,y = totalAMT)) +geom_bar(stat = "identity",fill="#FF6666")
#We can see the TOP3 total transaction amount tender_type is debit card>visa>Sam's consumer credit
#Average AMT VS Tender_type
tender_avg_AMT= tender_train %>% group_by(TENDER_TYPE_DESC) %>%summarise(AVG_AMT = mean(TENDER_AMT))%>%arrange(desc(AVG_AMT))
top_tender_avg=top_n(tender_avg_AMT,8)
ggplot(top_tender_avg, aes(x=TENDER_TYPE_DESC,y = AVG_AMT)) +geom_bar(stat = "identity",fill="#FF6666")
table(tender_train$TENDER_TYPE_DESC)
# We can see that for the average of each transaction amount, Unknown>>Sam's business credit>Sam's direct credit>service income
#(shall "unknown" be removed or not?)--Customers using these tender types are more likely to renew their membership as they spend more than others in each
# of their transaction
#Count of Tender_Type
counts=data.frame(table(tender_train$TENDER_TYPE_DESC))
top_counts=top_n(counts,8)
ggplot(top_counts, aes(x=Var1,y = Freq)) +geom_bar(stat = "identity",fill="#FF6666")
#Similar pattern with total transaction amount, we can see the tender type that's used most is debit card>visa>Sam's consumer credit
#Total AMT VS each visit
each_visit= tender_train %>% group_by(VISIT_DATE,VISIT_NBR,CLUB_NBR) %>%summarise(total_AMT = sum(TENDER_AMT))%>%arrange(desc(total_AMT))
head(each_visit)
#Total AMT VS each member
each_member= tender_train %>% group_by(MEMBERSHIP_ID) %>%summarise(tot_mem = sum(TENDER_AMT))%>%arrange(desc(tot_mem))
top_member=top_n(each_member,8)
top_member
# So these members maybe more likely to renew their membership as they spend more on than others in Sam's club, who maybe the loyalty customers for Sam
# club, to confirm the assumption, we could join these top_membertable with member_train to see the account renew status
#Join table
# Renewed VS top spended member
df=merge(x=each_member,y=member_train,by="MEMBERSHIP_ID",all.x=TRUE)
df=df%>%arrange(desc(tot_mem))
top_df=df[1:3879,]
table(top_df$RENEW_IND)
pct=262/3879
pct
write.csv(df,file="top_fifty_percent_users.csv")
# So we see on the top 50% people who spend most , only 7% of them not renewed, which indicate people who spend more are more likely to renew their memership
# For next step, we can check the pattern of these top people's tenure years, plus_status, market area and categories of the items they buy most to get more
# insights.
#Renewed VS tender type
df1=merge(x=tender_train,y=member_train,by="MEMBERSHIP_ID",all.x=TRUE)
renew_df1=df1%>%filter(RENEW_IND!="UNRENEWED")
type_df=as.data.frame(table(renew_df1$TENDER_TYPE_DESC))
type_df=type_df%>%arrange(desc(Freq))%>%head(8)
ggplot(type_df, aes(x=Var1,y = Freq)) +geom_bar(stat = "identity",fill="#FF6666")
#Same as as the count of tender type in total sample: Debit card>Visa>Sam's consumer credit
# So there may be no difference in tender type for people who renew their membership or who not renew
|
9cc535ef460c34eda8d9e892850c3dba8c6c3bc3
|
a1fa0f12726f2c4afa8b95a68e66c882f8794377
|
/modeling/old/finley_code/model-FH.r
|
0b83af81b98afd4b2937f2e9ca126a33b62fc1e0
|
[] |
no_license
|
Reed-Statistics/thesis_white
|
f71628496102c4622a8a1c3f32f9476267c735b2
|
e046e7c627baa097e63a13046578c1344b2053f6
|
refs/heads/master
| 2023-05-28T15:52:07.929835
| 2021-06-09T17:32:29
| 2021-06-09T17:32:29
| 295,543,496
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,278
|
r
|
model-FH.r
|
##=========================================================================
## FH MODEL
## see Appendix A of You & Zhou (2011) for Full Conditional Distributions
##=========================================================================
## y := m x 1 matrix of direct estimates for response variable (i.e., Aboveground biomass)
## X := m x p matrix of covariates
## m := number of small areas (i.e., forest stands)
## sigma.sq := vector of length m for variance of direct estimate
## G := length of chain
## beta := p x 1 matrix of coefficients
## a0 := scalar shape parameter for prior of sigma.sq.v
## b0 := scalar rate parameter for prior of sigma.sq.v
## sigma.sq.v := spatial dispersion parameter
## g := vector of length m for weighted variances (i.e., sigma.sq and sigma.sq.v)
FH.model <- function(y, X, m, sigma.sq, G, beta, a0, b0, sigma.sq.v){
##Construct Matrices
theta.mat <- matrix(nrow = m, ncol = G)
beta.mat <- matrix(nrow = nrow(beta), ncol = G)
g.mat <- matrix(nrow = m, ncol = G)
sigma.sq.v.vec <- vector(mode = "numeric", length = G)
g <- vector(mode = "numeric", length = m)
mu <- vector(mode = "numeric", length = m)
var.t <- vector(mode = "numeric", length = m)
theta <- matrix(nrow = m, ncol = 1)
for(i in 1:G){
## (1): Draw from Full conditional for theta
for(j in 1:m){
g[j] <- sigma.sq.v / (sigma.sq.v + sigma.sq[j])
mu[j] <- g[j]%*%y[j] + (1 - g[j])%*%t(X[j,])%*%beta
var.t[j] <- sigma.sq[j]%*%g[j]
theta[j,1] <- rnorm(1, mu[j], sqrt(var.t[j]))
}
## (2): Draw from Full conditional for beta
mu.beta <- solve(t(X)%*%X)%*%t(X)%*%theta
var.beta <- sigma.sq.v*solve(t(X)%*%X)
beta <- mvrnorm(1, mu.beta, var.beta)
## (3): Draw from Full conditional for sigma.sq.v
shape.v <- a0 + m/2
scale.v <- b0 + (1/2)*t(theta - X%*%beta)%*%(theta - X%*%beta)
sigma.sq.v <- rinvgamma(1, shape.v, scale.v)
##Parameters to monitor
theta.mat[,i] <- theta
beta.mat[,i] <- beta
sigma.sq.v.vec[i] <- sigma.sq.v
g.mat[,i] <- g
}
out <- list(theta.mat, beta.mat, sigma.sq.v.vec, g.mat)
}
|
d57734886941b38a9b9c5f558f641e305b84bf6b
|
b335df05ada92baaa9c6db7f9a8b2102f7ee05b0
|
/plot1.R
|
6c8071777c7cf194a1184c031e9d1ff64646466b
|
[] |
no_license
|
joerglandskron/ExData_Plotting1
|
bca4880768b77df5c07f89d65daafe4dbbc8f6f7
|
ce047d3146ec5874e363c2f2b187812c9896aa3b
|
refs/heads/master
| 2021-01-16T21:06:48.996508
| 2015-09-13T13:30:03
| 2015-09-13T13:30:03
| 42,396,408
| 0
| 0
| null | 2015-09-13T13:15:59
| 2015-09-13T13:15:59
| null |
UTF-8
|
R
| false
| false
| 1,555
|
r
|
plot1.R
|
##Coursera
##Data Science Specialization Signature Track
##Exploratory Data Analysis
##
##Programming Assignment 1
##plot1.R
#This is only for my own computer
#WorkingDirectory <- "Q:/Eigenes/Joerg/Buero/Coursera/Data Science Specialization/4-Exploratory Data Analysis/Programming Assignments/PA1"
#setwd(WorkingDirectory)
#Define filenames of data-input-file (datfile) and graphic-output-file (outfile)
#The data-input-file should be stored unzipped in the current Working Directory with origin filename
datfile <- "./household_power_consumption.txt"
outfile <- "plot1.png"
#Load data
classes=c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
dat <- read.table(datfile,
sep=";",
dec=".",
colClasses=classes,
comment.char="",
header=TRUE,
na.strings="?",
stringsAsFactors=FALSE)
#Transform Time- and Date columns from character to POSIXlt and Date datatypes
dat$Time <- strptime(paste(dat$Date,dat$Time),format="%d/%m/%Y %H:%M:%S")
dat$Date <- as.Date(dat$Date, format="%d/%m/%Y")
#Subset data to two days
dat <- subset(dat, Date >= "2007-02-01" & Date <= "2007-02-02")
#Open png- graphics-file-device
png(filename = outfile,
width = 480,
height = 480)
#Draw histogram (plot#1)
hist(dat$Global_active_power,
main="Global Active Power",
ylab="Frequency",
xlab="Global Active Power (kilowatts)",
col="red")
#Close device
dev.off()
|
f717efe192e34563ea2a8df7b0b6bb3743d87403
|
79aa6188960a85b751e7bc47d566b3a0fb92354f
|
/app_startup.R
|
66e237ca75342f830c3e6cd85e55267cd5373d26
|
[] |
no_license
|
jimscratch/shiny_big_long
|
43b2046074dd23502d3139de6054d0f6f36afc7f
|
db2106faa04b5cf48a52b3c0a9bf3bae14051f61
|
refs/heads/master
| 2022-11-25T15:12:24.315835
| 2020-07-31T09:45:38
| 2020-07-31T09:45:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 561
|
r
|
app_startup.R
|
library(tidyverse)
library(DBI)
library(RSQLite)
source('package/game.R')
set.seed(1234)
tbl_segment <- tibble(
name = LETTERS[1:4]
, compare_alpha = c(1.01, 1.01, 15, 15)
, compare_beta = c(15, 15, 1.01, 1.01)
, compare_trend = c(0, 0, 0, 0)
, freq_shape = 1
, freq_scale = 1
, freq_trend = c(-.02, .04, -.02, .04)
, sev_shape = 2
, sev_scale = 5e3
, sev_trend = 0.02
) %>%
mutate(
expected_freq = freq_shape * freq_scale
, expected_severity = sev_shape * sev_scale
, expected_cost = expected_freq * expected_severity
)
|
e717417e811aa5e2ca2961f1c92d331f2a025b78
|
86b083f0b1e16c7f5d20379901ab3c2eae6061cf
|
/PAP_coding_demo.R
|
978c71587d3b81644612abb0a052f5f7533b88b1
|
[] |
no_license
|
freedmanguy/PAP_coding_app
|
cb0d416a135c725fe922b78c1c735951f4dc53e9
|
424a24b095f04ebf780f9f1f44e15e96f54adb9e
|
refs/heads/main
| 2023-01-02T13:02:15.786585
| 2020-10-27T23:28:13
| 2020-10-27T23:28:13
| 305,813,776
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,552
|
r
|
PAP_coding_demo.R
|
filename <- "demo.RDS" # change according to the relevant filename
user <- "[user]" # change to the name of the coder
shinyurl <- "papcodingdemo" # change to the name of the app (affects url)
library(shiny)
library(dplyr)
library(rdrop2)
library(DT)
library(rvest)
mydatetime <- function(){
temp <- read_html("https://www.timeanddate.com/worldclock/israel/jerusalem")
temp2 <- html_nodes(temp, "#ct")
temp3 <- html_nodes(temp, "#ctdat")
temp4 <- paste(as.character(as.Date(html_text(temp3),"%A, %B %d, %Y")),html_text(temp2))
if(grepl("am$",temp4,ignore.case = T)){
temp4 <- gsub(" *am$","",temp4, ignore.case = T)
temp4 <- gsub(" 12:"," 00:",temp4, ignore.case = T)
temp4 <- gsub(" 24:"," 00:",temp4, ignore.case = T)
}
if(grepl("pm$",temp4,ignore.case = T)){
temp4 <- gsub(" *pm$","",temp4, ignore.case = T)
temp4 <- gsub(" 0?1:"," 13:",temp4, ignore.case = T)
temp4 <- gsub(" 0?2:"," 14:",temp4, ignore.case = T)
temp4 <- gsub(" 0?3:"," 15:",temp4, ignore.case = T)
temp4 <- gsub(" 0?4:"," 16:",temp4, ignore.case = T)
temp4 <- gsub(" 0?5:"," 17:",temp4, ignore.case = T)
temp4 <- gsub(" 0?6:"," 18:",temp4, ignore.case = T)
temp4 <- gsub(" 0?7:"," 19:",temp4, ignore.case = T)
temp4 <- gsub(" 0?8:"," 20:",temp4, ignore.case = T)
temp4 <- gsub(" 0?9:"," 21:",temp4, ignore.case = T)
temp4 <- gsub(" 10:"," 22:",temp4, ignore.case = T)
temp4 <- gsub(" 11:"," 23:",temp4, ignore.case = T)
temp4 <- gsub(" 24:"," 00:",temp4, ignore.case = T)
}
return(temp4)
}
# Define UI
ui <- navbarPage("PAP Coding App", collapsible = T, inverse = T,
tabPanel("To Code",
fluidPage(
sidebarLayout(
sidebarPanel(
br(),
uiOutput("myrows"),
br(),
textInput("minorc","Minor Code"),
br(),
actionButton("submit", "Submit"),
br(),
uiOutput("completed"),
br(),
actionButton("nextobs", "Next Observation",
onclick = paste0("location.href='https://freedmanguy.shinyapps.io/",shinyurl,"/';"))
),
mainPanel(
br(),
uiOutput("alldone"),
br(),
uiOutput("title"),
uiOutput("description"),
br(),
uiOutput("id"),
uiOutput("Congress"),
uiOutput("year"),
uiOutput("Chamber"),
uiOutput("Committee1"),
uiOutput("Subcommittee1"),
uiOutput("Committee2"),
uiOutput("Subcommittee2"),
br(),
)
),
fluidRow(
column(12,
h3("PAP Codebook:"),
DTOutput("codebook"))
)
)),
tabPanel("Coded",
fluidPage(
mainPanel(
actionButton("displaytable", "Refresh Table"),
br(),
DTOutput("mycoded")
)
)),
tabPanel("Search & Recode",
fluidPage(
sidebarLayout(
sidebarPanel(
textInput("id2","id"),
actionButton("search2", "Search"),
br(),
br(),
h4("If you wish to recode:"),
textInput("minor2","Minor Code"),
actionButton("submit2", "Submit"),
br(),
uiOutput("completed2"),
br(),
# actionButton("next2", "Refresh",
# onclick = "location.href='https://capcoding.shinyapps.io/demo/';")
),
mainPanel(
br(),
uiOutput("title2"),
uiOutput("description2"),
uiOutput("minor2"),
br(),
uiOutput("id2"),
#br(),
uiOutput("Chamber2"),
#br(),
uiOutput("Congress2"),
#br(),
uiOutput("year2"),
#br(),
uiOutput("Committee12"),
#br(),
uiOutput("Subcommittee12"),
uiOutput("Committee22"),
uiOutput("Subcommittee22"),
br(),
br(),
)
)))
)
# Define server
server <- function(input, output, session) {
output$codebook <- renderDT({
codebook <- readRDS("PapCodebook.RDS")
codebook$`Major Code` <- as.character(codebook$`Major Code`)
codebook$`Minor Code` <- as.character(codebook$`Minor Code`)
datatable(codebook, options = list(dom = 't', pageLength = nrow(codebook)), rownames = T, filter = "top")
})
drop_auth(rdstoken = "token.rds")
drop_download(paste0("PAP/",filename), overwrite = T)
mydata <- readRDS(filename)
mydata2code <- filter(mydata, is.na(minor))
i <- sample(mydata2code$id, 1)
tocode <- filter(mydata, id==i)
if(nrow(tocode)>0){
output$id <- renderUI({HTML(paste0('<p><strong>Hearing ID: </strong>',tocode$id,"</p>"))})
output$Chamber <- renderUI({HTML(paste0('<p><strong>Chamber: </strong>',tocode$Chamber,"</p>"))})
output$Congress <- renderUI({HTML(paste0('<p><strong>Congress: </strong>',tocode$Congress,"</p>"))})
output$year <- renderUI({HTML(paste0('<p><strong>Year: </strong>',tocode$year,"</p>"))})
output$Committee1 <- renderUI({HTML(paste0('<p><strong>Committee1: </strong>',tocode$CName1,"</p>"))})
output$Subcommittee1 <- renderUI({HTML(paste0('<p><strong>Subcommittee1: </strong>',tocode$SName1,"</p>"))})
output$Committee2 <- renderUI({HTML(paste0('<p><strong>Committee2: </strong>',tocode$CName2,"</p>"))})
output$Subcommittee2 <- renderUI({HTML(paste0('<p><strong>Subcommittee2: </strong>',tocode$SName2,"</p>"))})
output$title <- renderUI({HTML(paste0('<p><strong>Hearing Title: </strong>',tocode$title,"</p>"))})
output$description <- renderUI({HTML(paste0('<p><strong>Hearing Description: </strong>',tocode$description,"</p>"))})
output$myrows <- renderUI({HTML(paste0('<h3> Hi ',user,", you have ",nrow(mydata2code)," observations remaining.","</h3>"))})
} else {
output$alldone <- renderUI({
HTML(paste0('<p style="color:blue">',"Looks like you're all done. Good job! </p>"))
})
}
goodcodes <- goodcodes <- c(100, 101, 103, 104, 105, 107, 108, 110, 199, 200, 201, 202, 204, 205, 206, 207, 208, 209, 299,
300, 301, 302, 321, 322, 323, 324, 325, 331, 332, 333, 334, 335, 336, 341, 342, 398, 399, 400,
401, 402, 403, 404, 405, 408, 498, 499, 500, 501, 502, 503, 504, 505, 506, 529, 599, 600, 601,
602, 603, 604, 606, 607, 609, 698, 699, 700, 701, 703, 704, 705, 707, 708, 709, 710, 711, 798,
799, 800, 801, 802, 803, 805, 806, 807, 898, 899, 900, 1000, 1001, 1002, 1003, 1005, 1006, 1007, 1010,
1098, 1099, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210, 1211, 1299, 1300, 1301, 1302, 1303,
1304, 1305, 1308, 1399, 1400, 1401, 1403, 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1499, 1500, 1501, 1502, 1504,
1505, 1507, 1520, 1521, 1522, 1523, 1524, 1525, 1526, 1599, 1600, 1602, 1603, 1604, 1605, 1606, 1608, 1609, 1610,
1611, 1612, 1614, 1615, 1616, 1617, 1619, 1620, 1698, 1699, 1700, 1701, 1704, 1705, 1706, 1707, 1708, 1709, 1798,
1799, 1800, 1802, 1803, 1804, 1806, 1807, 1808, 1899, 1900, 1901, 1902, 1905, 1906, 1910, 1915, 1921, 1925, 1926,
1927, 1929, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015,
2030, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2199)
codepap <- observeEvent(input$submit, {
if(input$minorc %in% goodcodes){
i <- tocode$id
codegiven <- input$minorc
hearingsampled <- tocode$id
mydata$DateUpdated[mydata$id==i] <- as.character(mydatetime())
mydata$minor[mydata$id==i] <- input$minorc
mydata$title <- as.character(mydata$title)
mydata$description <- as.character(mydata$description)
saveRDS(mydata, filename)
drop_upload(file = filename, path = "PAP", mode = "overwrite")
output$completed <- renderUI({HTML(paste0("<p>Hearing ",hearingsampled," coded successfully into minor topic ",codegiven,".</p><br><p> Click 'Next Observation' to code the next hearing</p>"))})
} else {
output$completed <- renderUI({
HTML(paste0('<p style="color:red">',"Error: The code entered is not valid. </p><p>Please review. Remember: </p><p><strong>Minor topics</strong> can only receive one of the following: ",paste(goodcodes, collapse = ", "),".</p>"))
})
}
})
showcoded <- observeEvent(input$displaytable,{
drop_download(paste0("PAP/",filename), overwrite = T)
mydf <- readRDS(filename) %>%
as.data.frame() %>%
mutate(id = as.character(id))
output$mycoded <- renderDT({
mydf <- select(mydf, id, Chamber, Congress, year, minor, title, description, DateUpdated, Committee1, Subcommittee1,Committee2, Subcommittee2) %>%
filter(!is.na(minor)) %>%
arrange(desc(DateUpdated))
datatable(mydf, options = list(dom = 't', pageLength = nrow(mydf)), rownames = F, filter = "top")
}) # “default”, “bootstrap”, “bootstrap4”, “foundation”, “jqueryui”, “material”, “semanticui”, “uikit”
})
searchpap <- observeEvent(input$search2, {
drop_download(paste0("PAP/",filename), overwrite = T)
mysearch <- readRDS(filename)
mysearch <- filter(mysearch, id==input$id2)
output$id2 <- renderUI({HTML(paste0('<p><strong>Hearing ID: </strong>',mysearch$id,"</p>"))})
output$Chamber2 <- renderUI({HTML(paste0('<p><strong>Chamber: </strong>',mysearch$Chamber,"</p>"))})
output$Congress2 <- renderUI({HTML(paste0('<p><strong>Congress: </strong>',mysearch$Congress,"</p>"))})
output$year2 <- renderUI({HTML(paste0('<p><strong>Year: </strong>',mysearch$year,"</p>"))})
output$Committee12 <- renderUI({HTML(paste0('<p><strong>Committee1: </strong>',mysearch$CName1,"</p>"))})
output$Subcommittee12 <- renderUI({HTML(paste0('<p><strong>Subcommittee1: </strong>',mysearch$SName1,"</p>"))})
output$Committee22 <- renderUI({HTML(paste0('<p><strong>Committee2: </strong>',mysearch$CName2,"</p>"))})
output$Subcommittee22 <- renderUI({HTML(paste0('<p><strong>Subcommittee2: </strong>',mysearch$SName2,"</p>"))})
output$title2 <- renderUI({HTML(paste0('<p><strong>Hearing Title: </strong>',mysearch$title,"</p>"))})
output$description2 <- renderUI({HTML(paste0('<p><strong>Hearing Description: </strong>',mysearch$description,"</p>"))})
output$minor2 <- renderUI({HTML(paste0('<p><strong>Minor Code: </strong>',mysearch$minor,"</p>"))})
})
recodepap <- observeEvent(input$submit2, {
if(input$minor2 %in% goodcodes){
codegiven2 <- input$minor2
mydata$DateUpdated[mydata$id==input$id2] <- as.character(mydatetime())
mydata$minor[mydata$id==input$id2] <- input$minor2
saveRDS(mydata, filename)
drop_upload(file = filename, path = "PAP", mode = "overwrite")
output$completed2 <- renderUI({HTML(paste0("<p>Hearing ",input$id2," coded successfully into minor topic ",codegiven2,".</p>"))})
updateTextInput(session, "minor2", value = "")
} else {
output$completed2 <- renderUI({
HTML(paste0('<p style="color:red">',"Error: The code entered is not valid. </p><p>Please review. Remember: </p><p><strong>Minor topics</strong> can only receive one of the following: ",paste(goodcodes, collapse = ", "),".</p>"))
})
}
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
e35c66dde2267381fbf58d91e93e5c5ea618d3df
|
19b2bada91976d8b36cfaa1315cb02a228009b59
|
/Utilities/MergeReviews.R
|
d73dfe6d0930aa78397f89b11304602a48585eeb
|
[] |
no_license
|
DiegoPergolini/TextMiningProject
|
7667cc034e937919808f71b51eb2e8bc90268365
|
0a6955a40c7022c9f3571dba0bcedc0841971bc1
|
refs/heads/master
| 2020-04-17T14:28:25.656045
| 2019-01-20T16:06:49
| 2019-01-20T16:06:49
| 166,658,431
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 444
|
r
|
MergeReviews.R
|
filenames <- list.files(path="./",pattern="*.csv")
print(filenames)
## [1] "abc.csv" "pqr.csv"
## Full path to csv filenames
fullpath=file.path("C:/Users/diego/OneDrive/Documenti/R/Mixed",filenames)
## Print Full Path to the files
print(fullpath)
## Merge listed files from the path above
dataset <- do.call("rbind",lapply(fullpath,FUN=function(files){ read.csv(files)}))
write.table(dataset,file="allReviewsCJFN.csv",sep=",",row.names = F)
|
0bf3d041d4c5ad720620cbe02a2311038ae0bdb8
|
6a676c52142be89288e532bdb9b20fb77445143e
|
/run_analysis.R
|
05186c78bf120482569e138744fa567b47184625
|
[] |
no_license
|
skneils/tidydata
|
eb3c7486b253da8719f9f140f18847dbe6db228b
|
14817cfd761b11043bce9bc1bfbfa509a869ea16
|
refs/heads/master
| 2016-09-03T04:10:00.433694
| 2014-04-27T18:15:39
| 2014-04-27T18:15:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,875
|
r
|
run_analysis.R
|
# load test data from ./UCI HAR Dataset/test/
testx=read.table("./UCI HAR Dataset/test/x_test.txt")
testy=read.table("./UCI HAR Dataset/test/y_test.txt")
tests=read.table("./UCI HAR Dataset/test/subject_test.txt")
# load train data from ./UCI HAR Dataset/train/
trainx=read.table("./UCI HAR Dataset/train/X_train.txt")
trainy=read.table("./UCI HAR Dataset/train/y_train.txt")
trains=read.table("./UCI HAR Dataset/train/subject_train.txt")
# load features and labels
features=read.table("./UCI HAR Dataset/features.txt")
actlabels=read.table("./UCI HAR Dataset/activity_labels.txt")
# give columns names using features data
colnames(testx) = features[,2]
colnames(trainx) = features[,2]
# remove columns that don't contain mean or std data
# this is done with the following:
# grepl("[a-z]*mean\\(|std\\([a-z]*",features[,2])
# data is considered if it contains "mean(" or "std("
# this excludes "meanFreq()" and the vectors used in "angle()"
testx = subset(testx, select=grepl("[a-z]*mean\\(|std\\([a-z]*",features[,2]))
trainx = subset(trainx, select=grepl("[a-z]*mean\\(|std\\([a-z]*",features[,2]))
# keep track of number of columns
v=ncol(testx)
# replace numbers with activity names and add column to textx
testx$activity = actlabels[testy[,1],2]
rm(testy)
trainx$activity = actlabels[trainy[,1],2]
rm(trainy)
# add subjects to testx
testx$subject = tests[,1]
trainx$subject = trains[,1]
# combine testx and trainx into one dataset
dataset = merge(testx, trainx, all=TRUE)
# remove the old data
rm(testx)
rm(trainx)
rm(features)
rm(actlabels)
# Now create a tidy data set with average of each variable
# for each activity and each subject
library(reshape2)
tidymelt = melt(dataset, id=c("activity","subject"), measure.vars=names(dataset[,1:v]))
tidydata = dcast(tidymelt, variable~activity+subject, mean )
rm(tidymelt)
|
df471e90a3c32d3185e11499846704769c9330a7
|
7e1602b2e2885c211dee1fad93bf3cb5d823945a
|
/HW2/Code.R
|
69e77b2221d8f859add39c02f0873f31f3f1ed6e
|
[] |
no_license
|
CharlesYWL/ECS132
|
f773e237159347882d24a27e1ba849b9368a76d3
|
d73b6f068da05d3289418cab43ebbc16357ee48d
|
refs/heads/master
| 2020-05-14T19:15:49.126061
| 2019-06-13T05:08:12
| 2019-06-13T05:08:12
| 181,926,290
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,637
|
r
|
Code.R
|
#Weili Yin,912603171
#####P1
sample(0:4,prob=c(.292,.4096,.2312,.068,.064))
sample(0:4,1,prob=c(.292,.4096,.2312,.068,.064))
sample(0:2,2,prob = c(.5,.4,.1))
ls1 <- sample(0:2,10000,prob = c(.5,.4,.1),replace = TRUE)
var(ls1)
getL2 <- function(){
L1 <- sample(0:2,1,prob = c(.5,.4,.1))
NumofPeople <- L1
i <- 0
while (i<NumofPeople) {
if(runif(1) < .2){ #quit one people
L1 <- L1-1
}
i <- i+1
}
return(L1+sample(0:2,1,prob=c(.5,.4,.1)))
}
getL2list <- function(num){
list <- c()
i <- 0
while (i < num) {
list <- c(list ,getL2())
i <- i+1
}
return(list)
}
var(getL2list(10000))
getL2L1 <-function(){
L1 <- sample(0:2,1,prob = c(.5,.4,.1))
NumofPeople <- L1
initL1 <- L1
i <- 0
while (i<NumofPeople) {
if(runif(1) < .2){ #quit one people
L1 <- L1-1
}
i <- i+1
}
return(L1+sample(0:2,1,prob=c(.5,.4,.1))-initL1)
}
getL2L1list <- function(num){
list <- c()
i <- 0
while (i < num) {
list <- c(list ,getL2L1())
i <- i+1
}
return(list)
}
var(getL2L1list(10000))
sim1 <- function(nreps){
ls1 <- sample(0:2,nreps,prob = c(.5,.4,.1),replace = TRUE)
cat("Var(L1) = ", var(ls1),'\n')
cat("Var(L2) = ", var(getL2list(nreps)),'\n')
cat("Var(L2-L1) = ",var(getL2L1list(nreps)),'\n')
}
####2
toss <- function(r,s){
headaccu <- 0
round <- 0
testls <- c()
while (headaccu < r & round < s) {
if(runif(1) <.5){ #get HEAD
headaccu <- headaccu+1
testls <- c('H',testls)
}else{
headaccu <- 0
testls <- c('T',testls)
}
round <- round +1
}
#print(testls)
return(round)
}
gettosslist <- function(r,s,num){
list <- c()
i <- 0
while (i < num) {
list <- c(list ,toss(r,s))
i <- i+1
}
return(list)
}
sim2 <- function(r,s,nreps) {
return(mean(gettosslist(r,s,nreps)))
}
###P3.1
library(gtools)
origin <- function(list){return(list)}
permn <- function(x,m,FUN){
ls <- permutations(n = length(x), r = m, x)
rs <- NA
count <- length(ls)/m
for (i in 1:count) {
rs[i] <- FUN(ls[i])
}
return(rs)
}
perm = function(n, x) {
factorial(n) / factorial(n-x)
}
### Below are for P3.2
#fun <- function(list){
# sum <- numeric(1)
# sum <- 0
# i <- 1
# while (i < 8) {
# sum <- sum + abs(list[i+1]-list[i])
# i <- i+1
# }
# return(sum)
#}
###P5
library(ggplot2)
drawplot <-function(){
num <- c(1:10)
possibility <- NA
sum <- 0
for (i in 1:10) {
possibility[i] <- dbinom(i,10,.97)
}
df <- data.frame(num,possibility)
df$possibility <- as.factor(df$possibility)
head(df)
ggplot(df, aes(x=num, y=possibility)) + geom_point()
}
#drawplot()
|
fb75bb57482a987deb93681536fc115a49634d6a
|
5a2be4b810a31ce3da1fa58fc91a87faabcc7af0
|
/Week3/project.R
|
aba7c886b023d9363ada3c148986289ca438a4f4
|
[] |
no_license
|
chanyayun/Data-Science-Programming
|
85b582c847f5afa66f28493a61519c120b1639ef
|
e0ed329d675e53a630622ac9985b07df051b7f8c
|
refs/heads/master
| 2020-06-16T22:55:24.515071
| 2019-07-28T15:55:29
| 2019-07-28T15:55:29
| 195,725,784
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,939
|
r
|
project.R
|
library(readr)
library(dplyr)
library(tidyr)
library(stringr)
library(tm)
library(qdap)
library(ggplot2)
data <- scan("Week3/data/cnn_2019.txt", what = character(), encoding = "UTF-8")
data_source <- VectorSource(data)
data_corpus <- Corpus(data_source) %>%
tm_map(removeNumbers) %>%
tm_map(content_transformer(tolower)) %>%
tm_map(content_transformer(function(x) gsub("[[:punct:]]", " ", x))) %>% #remove punctuation
tm_map(stripWhitespace) %>%
tm_map(removeWords, c(stopwords("en"), "said", "says", "also", "new", "will", "year", "two", "can", "may", "s", ""))
# stem_corpus <- tm_map(data_corpus, stemDocument)
# aljazeera_corpus <- tm_map(stem_corpus, stemCompletion, dictionary = aljazeera_dict)
cnn_tdm <- TermDocumentMatrix(data_corpus)
cnn_m <- as.matrix(cnn_tdm)
dim(cnn_m)
cnn_m[1:10, 1:20]
term_frequency <- rowSums(cnn_m)
term_frequency <- sort(term_frequency, decreasing = T)
View(term_frequency[1:15])
#graph with ggplot
barplot(term_frequency[1:10],
xlab = "Top 10 Words",
ylab = "Frequency",
col = grey.colors(10), las = 2)
#wordcloud
cnn_freqs <- data.frame(term = names(term_frequency),
num = term_frequency)
brown_bg <- brewer.pal(10, "BrBG")
brown_bg <- brown_bg[-(5:6)]
wordcloud::wordcloud(cnn_freqs$term, cnn_freqs$num,
max.words = 100, colors = brown_bg)
#word association
president_association <- findAssocs(cnn_tdm, "president", 0.1)
china_association <- findAssocs(cnn_tdm, "china", 0.1)
government_association <- findAssocs(cnn_tdm, "government", 0.1)
hk_association <- findAssocs(cnn_tdm, "hong", 0.1)
#using tfidf
tfidf_tdm <- TermDocumentMatrix(data_corpus,
control = list(weighting = weightTfIdf))
tfidf_m <- as.matrix(tfidf_tdm)
dim(tfidf_m)
tfidf_m[50:60, 1:20]
term_frequency <- rowSums(tfidf_m)
term_frequency <- sort(term_frequency, decreasing = T)
term_frequency[1:30]
|
6927d888415363deedeba90d1aa0536bad4c4f74
|
2a1407b8e6552a0caae86d5fa0bb8c898fa4288e
|
/man/score.Rd
|
7fe42cd65a40592b94eedd09e5ec6d88cf74b182
|
[] |
no_license
|
weinroth/corncob
|
29753c06be6941aaa22d8c77044534a37e6d1dd9
|
667280f7eec3ad5715f7a4caa3ecebd7a6601777
|
refs/heads/master
| 2020-03-28T10:00:18.393993
| 2018-08-14T19:45:11
| 2018-08-14T19:45:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 489
|
rd
|
score.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/score.R
\name{score}
\alias{score}
\title{Compute score}
\usage{
score(mod, numerical = FALSE, forHess = FALSE)
}
\arguments{
\item{mod}{model fit from bbdml}
\item{numerical}{Boolean numerical score. Not as stable. Defaults to FALSE}
\item{forHess}{Boolean for whether to use to approximate Hessian. Defaults to FALSE.}
}
\value{
Analytic score
}
\description{
Compute score
}
\examples{
\dontrun{
TODO
}
}
|
4deb775b986630e78defa591884656265eeb9bc9
|
71e23a80daa5d4ac060303733719a25734ad6229
|
/Big.test.r
|
544c61c9fac52c224292802e8ce6c370c4176ee1
|
[] |
no_license
|
PaulPyl/h5array
|
e82920a63122242701c66fccd1d212b8564e3a0f
|
2905c2d653f74bff3d3e32b80be1385c5e0432ca
|
refs/heads/master
| 2021-06-03T15:22:59.654944
| 2016-02-03T13:33:25
| 2016-02-03T13:33:25
| 39,744,744
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,142
|
r
|
Big.test.r
|
require(h5array)
x <- h5arrayCreate(tempfile(), "BigData", c(1e4+1,1e4+1,20), "double", chunk = c(1e3, 1e3, 20))
dimnames(x) <- list( NULL, NULL, letters[1:20] )
x[1,,1] <- 23
h5ls(getFileName(x))
writeDimnamesToFile(x)
h5ls(getFileName(x))
y <- h5array(getFileName(x), location = getLocation(x))
dimnames(y)
dimnames(x)
y <- loadDimnamesFromFile(y)
dimnames(y)
require(h5array)
y <- h5matrixCreate(tempfile(), "Data", c(1e2,20), "double", chunk = c(1e2, 10))
dimnames(y) <- list( paste0("row", 1:1e2), letters[1:20] )
y[,] <- 23
h5ls(getFileName(y))
writeDimnamesToFile(y)
h5ls(getFileName(y))
y[,"a"]
y[c("row23", "row42"),c("a", "l", "k")]
require(h5array)
z <- h5arrayCreate(tempfile(), "SmallData", c(20,100,5), "double")
zc <- h5arrayCreate(tempfile(), "SmallData", c(20,100,5), "double", chunk = c(1,1,5))
foo <- array(1:(20*100*5), dim = dim(z))
z[,,] <- foo
zc[,,] <- foo
require(microbenchmark)
microbenchmark(apply(z, c(1,2), sum), times = 10)
microbenchmark(apply(zc, c(1,2), sum), times = 10)
apply(z, 2, function(x) as.character(median(x)))
knitr::knit(input="vignettes//h5array.Introduction.Rmd", output = "readme.md")
|
10f31d094b54f9db56e18357e675bcfdc79e4a84
|
0e20fdf781cf8489e6d2fe66aaff5d627d4d0f15
|
/animated_bar_plot_in_R_for_male_names.R
|
61da54d97114de070f12eccbf93cdde60cd7a82b
|
[] |
no_license
|
hhnnhh/animated_bar_charts_in_R
|
fc1a47cffed343840b050453cd7e02f7263a4182
|
0aef7ae105708eac6e52caabd09e7eef716325ca
|
refs/heads/master
| 2020-09-01T12:07:25.916987
| 2019-12-11T22:30:46
| 2019-12-11T22:30:46
| 218,954,781
| 1
| 0
| null | 2019-11-01T09:26:37
| 2019-11-01T09:26:37
| null |
UTF-8
|
R
| false
| false
| 3,786
|
r
|
animated_bar_plot_in_R_for_male_names.R
|
library(tidyverse)
library(gganimate)
## --> needed for the nice design "theme_tufte" = optional
#library(extrafont)
#library(ggthemes)
## --> for rendering the GIF
#library(gifski)
#library(png)
setwd("C:/Users/hanna/Dropbox/R_wissen/animated_bar_charts_in_R/data/")
list.files()
getwd()
setwd("./mostfrequentnames/")
#### GIRLS FIRST
myfiles<-list.files(pattern = '^boys')
library(dplyr)
library(readr)
# combine all CSV starting with "girls" to one dataframe named "gnames"
bnames <- list.files(pattern = '^boys') %>%
lapply(read_csv) %>%
bind_rows
#rename the column x with freq for "frequency"
names(bnames)[names(bnames)=='x'] <- 'freq'
test<-bnames %>% group_by(year) %>%
filter(duplicated(freq))
# Ben & David 2012, freq=216; Ben & Oskar 2013, 209;
# Benjamin & Louis, 2013, 177; Jakob & Karl 201, 2014;
# Felix & Oskar, 2014, 255; Anton & Felix, 2015, 283
# Jonathan & Theodor, 2017, 196; Jakob & Luca, 2018, 204
bnames[11, "freq"]<-220
bnames[33, "freq"]<-210
bnames[39, "freq"]<-178
bnames[58, "freq"]<-200
bnames[71, "freq"]<-244
bnames[88, "freq"]<-282
bnames[119, "freq"]<-196
bnames[138, "freq"]<-205
names_formatted <- bnames %>%
group_by(year) %>%
# The * 1 makes it possible to have non-integer ranks while sliding
mutate(rank = rank(-freq),
Value_rel = freq/freq[rank==1],
Value_lbl = freq) %>%
group_by(Vorname) %>%
filter(rank <=20) %>%
ungroup()
#I don't know if "rank" must be rounded, but I tried anyway
names_formatted$rank<-round(names_formatted$rank,digits=0)
# Animation
banim <- ggplot(names_formatted, aes(rank, group = Vorname,
fill = as.factor(Vorname), color = as.factor(Vorname))) +
geom_tile(aes(y = freq/2,
height = freq,
width = 0.9), alpha = 0.8, color = NA) +
geom_text(aes(y = 0, label = paste(Vorname, " ")), vjust = 0.2, hjust = 1, fontface="bold",size=7) +
geom_text(aes(y=freq,label = Value_lbl,hjust=0)) +
coord_flip(clip = "off", expand = FALSE) +
scale_y_continuous(labels = scales::comma) +
scale_x_reverse() +
guides(color = FALSE, fill = FALSE) +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
panel.grid.major.x = element_line( size=.1, color="grey" ),
panel.grid.minor.x = element_line( size=.1, color="grey" ),
plot.title=element_text(size=25, hjust=0.5, face="bold", colour="grey", vjust=-1),
plot.subtitle=element_text(size=18, hjust=0.5, face="italic", color="grey"),
plot.caption =element_text(size=18, hjust=0.5, face="italic", color="grey"),
plot.background=element_blank(),
plot.margin = margin(2,2, 2, 4, "cm")) +
transition_states(year, transition_length = 4, state_length = 1) +
view_follow(fixed_x = TRUE) +
labs(title = 'BOY NAMES in Berlin, year : {closest_state}',
subtitle = "20 most frequent names",
caption = "frequency of first names in Berlin | Data Source: https://daten.berlin.de/ | animation plot: github.com/amrrs | names plot: github.com/hhnnhh")
# For GIF
animate(banim, 400, fps = 20, width = 1200, height = 1000, end_pause = 30,
renderer = gifski_renderer("gganim_boyname.gif"))
# For MP4
devtools::install_github("leonawicz/mapmate")
library(mapmate)
animate(anim, 200, fps = 20, width = 1200, height = 1000, end_pause=50,
renderer = ffmpeg_renderer()) -> for_mp4
anim_save("animation.mp4", animation = for_mp4 )
|
601ab21c65fd78b6532e7cdb07b6257773c9abd0
|
d121f587f7e0678030d33a4c5428e594c5978dad
|
/man/log2_transform.Rd
|
f73129687fca20665c98ac0bcef831462749d4a5
|
[
"Apache-2.0"
] |
permissive
|
kauralasoo/eQTLUtils
|
fcf0907721b3a8f19fe68e611cecb4f16d7a0c9d
|
26242562a4e244334fd9691d03bc1ef4d2d6c1d9
|
refs/heads/master
| 2023-03-05T19:10:45.247191
| 2023-03-03T13:33:08
| 2023-03-03T13:33:08
| 149,779,618
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 443
|
rd
|
log2_transform.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/array_qc_utils.R
\name{log2_transform}
\alias{log2_transform}
\title{Log2 transform intensity values (same approach as in function lumiB)}
\usage{
log2_transform(mat)
}
\arguments{
\item{mat}{Matrix of intensity values}
}
\value{
Log2 transformed matrix
}
\description{
Log2 transform intensity values (same approach as in function lumiB)
}
\author{
Liis Kolberg
}
|
84fa69c7beb4a30a7bd672b2fa0090b7c5adf904
|
81a2fa3228451179b12779bb0149398cbfc8e9b1
|
/R/naOmit.R
|
e0b4827187c5a8dc361d86c6c02896db55fc9bf2
|
[] |
no_license
|
cran/wrMisc
|
c91af4f8d93ad081acef04877fb7558d7de3ffa2
|
22edd90bd9c2e320e7c2302460266a81d1961e31
|
refs/heads/master
| 2023-08-16T21:47:39.481176
| 2023-08-10T18:00:02
| 2023-08-10T19:30:33
| 236,959,523
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 810
|
r
|
naOmit.R
|
#' Fast na.omit
#'
#' \code{naOmit} removes NAs from input vector. This function has no slot for removed elements while \code{na.omit} does so.
#' Resulting objects from \code{naOmit} are smaller in size and subsequent execution (on large vectors) is faster (in particular if many NAs get encountered).
#' Note : Behaves differently to \code{na.omit} with input other than plain vectors. Will not work with data.frames !
#' @param x (vector or matrix) input
#' @return vector without NAs (matrix input will be transformed to vector). Returns NULL if input consists only of NAs.
#' @seealso \code{\link[stats]{na.fail}}, \code{na.omit}
#' @examples
#' aA <- c(11:13,NA,10,NA);
#' naOmit(aA)
#' @export
naOmit <- function(x) {chNa <- is.na(x); if(all(chNa)) NULL else x[which(!chNa)]}
|
8cbf14612c903b8ef49242b3fcd1edcae4aed925
|
f99ce07d94ccb52745532c16a83afef1cb0c9121
|
/man/crd3r3.Rd
|
d9429d827803e60e07d74a8d12f5d2ff9f42f44a
|
[] |
no_license
|
cran/cosa
|
e3b3b2e04fa9d86386611055347acfeb536dc500
|
6cc728f618446c71cef36c5951f70a1d16a903be
|
refs/heads/master
| 2021-11-29T14:07:23.589664
| 2021-11-20T21:50:05
| 2021-11-20T21:50:05
| 120,439,174
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,343
|
rd
|
crd3r3.Rd
|
\name{crd3}
\alias{crd3r3}
\alias{crd3}
\alias{bcrd4f3}
\alias{cosa.crd3r3}
\alias{cosa.crd3}
\alias{cosa.bcrd4f3}
\alias{power.crd3r3}
\alias{power.crd3}
\alias{power.bcrd4f3}
\alias{mdes.crd3r3}
\alias{mdes.crd3}
\alias{mdes.bcrd4f3}
\title{Cluster-level Regression Discontinuity (Three-level Design, Discontinuity at Level 3, w/ or w/o Strata or Fixed Blocks)}
\description{
Use \code{mdes.crd3()} to calculate minimum detectable effect size, \code{power.crd3()} to calculate statistical power, and \code{cosa.crd3()} for bound constrained optimal sample size allocation (BCOSSA). If higher level strata or fixed blocks exist, use \code{mdes.bcrd4f3()} to calculate minimum detectable effect size, \code{power.bcrd4f3()} to calculate statistical power, and \code{cosa.bcrd4f3()} for BCOSSA.
}
\usage{
mdes.crd3(score = NULL, dists = "normal", k1 = -6, k2 = 6,
order = 1, interaction = FALSE,
treat.lower = TRUE, cutoff = 0, p = NULL,
power = .80, alpha = .05, two.tailed = TRUE,
df = n3 - g3 - order * (1 + interaction) - 2,
rho2, rho3, r21 = 0, r22 = 0, r23 = 0,
g3 = 0, rate.tp = 1, rate.cc = 0, n1, n2, n3)
power.crd3(score = NULL, dists = "normal", k1 = -6, k2 = 6,
order = 1, interaction = FALSE,
treat.lower = TRUE, cutoff = 0, p = NULL,
es = .25, alpha = .05, two.tailed = TRUE,
df = n3 - g3 - order * (1 + interaction) - 2,
rho2, rho3, r21 = 0, r22 = 0, r23 = 0,
g3 = 0, rate.tp = 1, rate.cc = 0, n1, n2, n3)
cosa.crd3(score = NULL, dists = "normal", k1 = -6, k2 = 6, rhots = NULL,
order = 1, interaction = FALSE,
treat.lower = TRUE, cutoff = 0, p = NULL,
cn1 = 0, cn2 = 0, cn3 = 0, cost = NULL,
n1 = NULL, n2 = NULL, n3 = NULL,
n0 = c(10, 3, 100), p0 = .499,
constrain = "power", round = TRUE, max.power = FALSE,
local.solver = c("LBFGS", "SLSQP"),
power = .80, es = .25, alpha = .05, two.tailed = TRUE,
rho2, rho3, g3 = 0, r21 = 0, r22 = 0, r23 = 0)
mdes.bcrd4f3(score = NULL, dists = "normal", k1 = -6, k2 = 6,
order = 1, interaction = FALSE,
treat.lower = TRUE, cutoff = 0, p = NULL,
power = .80, alpha = .05, two.tailed = TRUE,
df = n4 * (n3 - 2) - g3 - order * (1 + interaction),
rho2, rho3, r21 = 0, r22 = 0, r23 = 0, g3 = 0,
rate.tp = 1, rate.cc = 0, n1, n2, n3, n4)
power.bcrd4f3(score = NULL, dists = "normal", k1 = -6, k2 = 6,
order = 1, interaction = FALSE,
treat.lower = TRUE, cutoff = 0, p = NULL,
es = .25, alpha = .05, two.tailed = TRUE,
df = n4 * (n3 - 2) - g3 - order * (1 + interaction),
rho2, rho3, r21 = 0, r22 = 0, r23 = 0, g3 = 0,
rate.tp = 1, rate.cc = 0, n1, n2, n3, n4)
cosa.bcrd4f3(score = NULL, dists = "normal", k1 = -6, k2 = 6, rhots = NULL,
order = 1, interaction = FALSE,
treat.lower = TRUE, cutoff = 0, p = NULL,
cn1 = 0, cn2 = 0, cn3 = 0, cn4 = 0, cost = NULL,
n1 = NULL, n2 = NULL, n3 = NULL, n4 = NULL,
n0 = c(10, 3, 100 + g3 + order * (1 + interaction), 5), p0 = .499,
constrain = "power", round = TRUE, max.power = FALSE,
local.solver = c("LBFGS", "SLSQP"),
power = .80, es = .25, alpha = .05, two.tailed = TRUE,
rho2, rho3, g3 = 0, r21 = 0, r22 = 0, r23 = 0)
}
\arguments{
\item{score}{vector or list; an empirical score variable or an object with class 'score' returned from the \code{inspect.score()} function.}
\item{dists}{character; distribution of the score variable, \code{"normal"} or \code{"uniform"}. By default, \code{dists = "normal"} specification implies a truncated normal distribution with \code{k1 = -6} and \code{k2 = 6}.}
\item{k1}{left truncation point for (uncentered) empirical, truncated normal, or uniform distribution. Ignored when \code{rhots = 0} or \code{order = 0}.}
\item{k2}{right truncation point for (uncentered) empirical, truncated normal, or uniform distribution. Ignored when \code{rhots = 0} or \code{order = 0}.}
\item{order}{integer >= 0; order of polynomial functional form specification for the score variable.}
\item{interaction}{logical; if \code{TRUE} polynomial specification interacts with the treatment variable.}
\item{rhots}{obsolote; use \code{order = 0} to obtain results equivalent to random assignment designs.}
\item{treat.lower}{logical; if \code{TRUE} units below the cutoff are treated.}
\item{cutoff}{decision threshold.}
\item{p}{proportion of level 3 units in the treatment condition.}
\item{power}{statistical power (1 - \eqn{\beta}).}
\item{es}{effect size (Cohen's d).}
\item{alpha}{probability of type I error (\eqn{\alpha}).}
\item{two.tailed}{logical; \code{TRUE} for two-tailed hypothesis testing.}
\item{df}{degrees of freedom.}
\item{rho2}{proportion of variance in the outcome between level 2 units (unconditional ICC2).}
\item{rho3}{proportion of variance in the outcome between level 3 units (unconditional ICC3).}
\item{g3}{number of covariates at level 3.}
\item{r21}{proportion of level 1 variance in the outcome explained by level 1 covariates.}
\item{r22}{proportion of level 2 variance in the outcome explained by level 2 covariates.}
\item{r23}{proportion of level 3 variance in the outcome explained by level 3 covariates.}
\item{rate.tp}{treatment group participation rate.}
\item{rate.cc}{control group crossover rate.}
\item{n1}{average number of level 1 units per level 2 unit.}
\item{n2}{average number of level 2 units per level 3 unit.}
\item{n3}{number of level 3 units(per stratum or block, if exists).}
\item{n4}{number of stratum or fixed blocks.}
\item{cn1}{marginal costs per level 1 unit in treatment and control conditions (positional), e.g. \code{c(10, 5)}.}
\item{cn2}{marginal costs per level 2 unit in treatment and control conditions (positional), e.g. \code{c(50, 30)}.}
\item{cn3}{marginal costs per level 3 unit in treatment and control conditions (positional), e.g. \code{c(80, 50)}.}
\item{cn4}{marginal cost per stratum or fixed block.}
\item{cost}{total cost or budget. Ignored when \code{constrain = "power"} or \code{constrain = "es"}. }
\item{p0}{starting value for \code{p} when \code{rhots = 0} and \code{p = NULL}. Starting value is replaced with the average when \code{p} is constrained by bounds.}
\item{n0}{vector of starting values for \code{n1, n2, n3} or \code{n1, n2, n3, n4} (positional). Starting values are replaced with the averages when sample sizes are constrained by bounds.}
\item{constrain}{character; constrains one of the \code{"cost"}, \code{"power"}, or \code{"es"} at the specified value.}
\item{round}{logical; \code{TRUE} for rounded BCOSSA solution.}
\item{max.power}{logical; \code{TRUE} for maximizing the power rate instead of minimizing the variance. Applies when \code{constrain = "cost"}.}
\item{local.solver}{subset of \code{c("LBFGS", "SLSQP")}}
}
\value{
\item{parms}{list of parameters used in the function.}
\item{df}{degrees of freedom.}
\item{sse}{standardized standard error.}
\item{cosa}{BCOSSA solution.}
\item{mdes}{minimum detectable effect size and (1 - \eqn{\alpha})\% confidence limits.}
\item{power}{statistical power (1 - \eqn{\beta})}
}
\examples{
score.obj <- inspect.score(rnorm(1000),
order = 1, interaction = FALSE,
cutoff = 0, k1 = -1, k2 = 1)
# single site (no blocks)
power.crd3(score.obj,
es = .25, rho2 = .20, rho3 = .10,
g3 = 0, r23 = 0, n1 = 20, n2 = 3, n3 = 40)
# with 5 blocks (note that r23 is modified but g3 remains the same)
power.bcrd4f3(score.obj,
es = .25, rho2 = .20, rho3 = .10,
g3 = 0, r23 = .30,
n1 = 20, n2 = 3, n3 = 40, n4 = 5)
# minimum required number of level 3 units for each block
cosa.bcrd4f3(score.obj,
es = .25, rho2 = .20, rho3 = .10,
g3 = 0, r23 = .30,
n1 = 20, n2 = 2, n3 = NULL, n4 = 5)
}
|
2ce7d70e39affa223ad9939201d5f079016fe0ad
|
0084280ad5d1400c280c110c402d3018b7a129af
|
/R/snv/maf-comparison-tcga-pcwag-glass.R
|
1e79c98221ebad1d14f3904440b9b2929f3505fb
|
[
"MIT"
] |
permissive
|
fpbarthel/GLASS
|
457626861206a5b6a6f1c9541a5a7c032a55987a
|
333d5d01477e49bb2cf87be459d4161d4cde4483
|
refs/heads/master
| 2022-09-22T00:45:41.045137
| 2020-06-01T19:12:30
| 2020-06-01T19:12:47
| 131,726,642
| 24
| 10
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,731
|
r
|
maf-comparison-tcga-pcwag-glass.R
|
#######################################################
# Comparisons of maf files from TCGA (PCAWG vs. GLASS-WG Snakemake)
# Date: 2018.08.15
# Author: Kevin J.
#######################################################
# Local directory for github repo.
# Use Kadir's linker file to identify the samples that overlap between PCAWG and GLASS-WG.
mybasedir = "/Users/johnsk/Documents/Life-History/"
setwd(mybasedir)
# Kadir's linker files.
pcawg_id_file <- "PCAWG May 2016 Data Release.xlsx"
tcga_id_file <- "pcawg_specimen_histology_August2016_v8.xlsx"
# Completed life-history barcodes.
life_history_barcodes = "/Users/johnsk/Documents/Life-History/GLASS-WG/data/sequencing-information/master_life_history_uniform_naming_complete.txt"
#######################################################
# Load necessary packages.
library(tidyverse)
library(maftools)
library(data.table)
library(openxlsx)
#######################################################
# Katie Shao (Novogene provided).
pcawg_linker = readWorkbook(pcawg_id_file, sheet = 1, startRow = 1, colNames = TRUE)
tcga_linker = readWorkbook(tcga_id_file, sheet = 1, startRow = 1, colNames = TRUE)
# Gather the TCGA IDs from our GLASS-WG cohort.
Mutect2dir = "~/mnt/scratchhelix/johnsk/GLASS_WG_floris/results/mutect2/m2filter/"
# Inspect Mutect2 filters applied to both SNVs and small indels.
setwd(Mutect2dir)
# Create list of names with the ".filtered2.vep_filters.txt". Sample "GLSS-MD-LP03" was removed due to poor quality.
filenames <- list.files(pattern = "*_filters.txt")
list_names <-substr(filenames, 1, 22)
# Link "list_names" object with original TCGA names using barcodes.
vcf_names <- substring(list_names, 1, 15)
life_history_barcode_sheet = read.delim(life_history_barcodes, as.is=T)
# Identify those samples that are used in the GLASS-WG project.
glass_tcga_samples <- life_history_barcode_sheet %>%
filter(Barcode%in%vcf_names) %>%
filter(grepl("TCGA", Original_ID)) %>%
select(Original_ID) %>%
.[["Original_ID"]]
# 11 samples from PCAWG (Mutect) are also represent in the GLASS-WG cohort.
pcawg_glass_wg_samples <- life_history_barcode_sheet %>%
filter(Original_ID%in%tcga_linker$submitted_sample_id) %>%
select(Original_ID) %>%
.[["Original_ID"]]
# Retrieve the 11 IDs for variants.
tcga_linker %>%
filter(submitted_sample_id%in%pcawg_glass_wg_samples) %>%
inner_join(pcawg_linker, by=c("donor_unique_id" = "donor_unique_id")) %>%
inner_join(life_history_barcode_sheet, by=c("submitted_sample_id"="Original_ID")) %>%
select(tumor_wgs_aliquot_id, submitted_sample_id, Barcode) %>%
write.table(file="/Users/johnsk/Documents/Life-History/PCAWG-GLASS-sample-overlap.txt", sep="\t",
row.names=FALSE)
|
44d2132073c69729996f3fd885c8256eaf767c02
|
7043c353662b2e238e31357bff5bf1dc8ba2abad
|
/R_source/geo_dbscan.R
|
ac7b90515ef23db9349c282ed101503411fef4af
|
[] |
no_license
|
gafalcon/tweet_topic_modelling_clustering
|
afb8fa19e99916b630992bb001164e9a8344b057
|
4b6c500e2fe07e384eb1dd2bee7b2791873acb9b
|
refs/heads/master
| 2023-07-01T19:03:07.446023
| 2021-07-26T15:46:58
| 2021-07-26T15:46:58
| 50,520,851
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,546
|
r
|
geo_dbscan.R
|
#Db Scan of a single user
library(fossil);
library(dbscan);
usr_ids <- list.files(getwd()) #read usrs_ids files
#usr_ids <- read.csv("user_ids", header = FALSE); #Read user ids
#usr_ids <- usr_ids[1][,1]; # transpose column to row
lats <- double(length(usr_ids)); #initialize lats and longs
longs <- double(length(usr_ids));
for (i in 1:length(usr_ids)){
print(usr_ids[i])
latlong <- read.csv(as.character(usr_ids[i])); #Read latlongs of user i
if(length(latlong$lon) > 5){
latlong <- latlong[,c(2,1)]; # Reorder columns
dist <- earth.dist(latlong, dist = T); # calculate distance matrix
#kNNdist(dist, k=3, search="kd")
#kNNdistplot(dist, k=3)
## the knee is around a distance of .5
dens <- dbscan(dist, minPts=3, eps=5); # dbscan to create clusters
print(dens);
latlong$cluster <- dens$cluster; # add column cluster
latlong <- subset(latlong, cluster!=0); # delete rows with cluster == 0
# Determine the cluster with most coordinate points
most_freq <- as.numeric(names(sort(table(latlong$cluster), decreasing = TRUE)[1]));
subset(latlong, cluster==most_freq); # Delete rows that are not part of the biggest cluster
}
mean_lat <- mean(latlong$lat); # Calculate mean value for lats and longs !!!DANGER!!!
mean_lon <- mean(latlong$lon);
lats[i] <- mean_lat; #Add mean values to the arrays
longs[i] <- mean_lon;
}
# Create a data frame with all user locations
df <- data.frame(usr_ids, lats, longs, stringsAsFactors = FALSE)
#write to csv File
write.csv(df, file="madrid_usrs_locations.csv")
|
37292b2c4e1ca321ae95c28e6f4315b8cfeed743
|
536cf445a4a9465270d79f0c37e0eb4f8d61403a
|
/man/exfm20.Rd
|
bea9ab8e7fd57dd707fda7da204bc4977be56897
|
[
"MIT"
] |
permissive
|
sollano/forestmangr
|
a01cc90925e582c69c3c77033c806adae5b38781
|
e14b62fafd859871277755cfa097d8d032a4af82
|
refs/heads/master
| 2023-02-19T21:53:44.213144
| 2023-02-15T21:57:42
| 2023-02-15T21:57:42
| 152,138,499
| 13
| 7
|
NOASSERTION
| 2022-12-11T22:43:03
| 2018-10-08T19:59:13
|
R
|
UTF-8
|
R
| false
| true
| 1,164
|
rd
|
exfm20.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exfm20.R
\docType{data}
\name{exfm20}
\alias{exfm20}
\title{Inventory data of a natural forest in Brazil}
\format{
A data frame with 12295 observations and 18 variables:
\describe{
\item{cod}{area code}
\item{transect}{plot number}
\item{tree}{tree number}
\item{common.name}{species common name}
\item{scientific.name}{species scientific name}
\item{family}{species family name}
\item{dbh}{diameter at breast height, in meters}
\item{canopy.pos}{canopy position}
\item{light}{level of light received by the tree}
\item{dead}{tells if the tree is dead or not}
\item{Hcom}{commercial height, in meters}
\item{Htot}{total height, in meters}
\item{date}{date of measurement}
\item{utm.east}{utm east position value}
\item{utm.north}{utm north position value}
\item{vol}{volume of trees, in cubic meters}
\item{plot.area}{plot area, in square meters}
\item{total.area}{total area, in hectares}
}
}
\usage{
data(exfm20)
}
\description{
In this data, each observation is a tree.
}
\author{
Sollano Rabelo Braga \email{sollanorb@gmail.com}
}
\keyword{data}
|
4e914b95d207326ae9491931fbf81f9e487cc092
|
aa6cce49e3cb87260b6546e0829b72dc79e415d8
|
/code/NB_exercise.R
|
bf890598cd66edc0b4438cc22f8dc70b0a0347b7
|
[] |
no_license
|
anthonyhung/MSTPsummerstatistics
|
037fc7499345aea4c0c4fb9aac6f15eeb6fdb347
|
dc53f74f5c1ca44d06ff9d443d71df8acc2fd5ef
|
refs/heads/master
| 2021-07-09T17:33:53.439306
| 2020-06-23T14:55:00
| 2020-06-23T14:55:00
| 150,667,061
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 314
|
r
|
NB_exercise.R
|
mush_data <- read.csv("data/mushrooms.csv")
#Create a Naive Bayes classifier to predict whether a mushroom has bruises or not. No need to perform any cross-validation, just split your samples into a 20% test set and 80% training set. What is the AUC of the ROC curve for your model?
library(caret)
library(pROC)
|
6919222a94ace3c0f604a539c10717c8803b26a9
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.customer.engagement/man/ses_delete_identity.Rd
|
0dfd18cf5de51a638391652d7aba6e14dfd5d731
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 638
|
rd
|
ses_delete_identity.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ses_operations.R
\name{ses_delete_identity}
\alias{ses_delete_identity}
\title{Deletes the specified identity (an email address or a domain) from the
list of verified identities}
\usage{
ses_delete_identity(Identity)
}
\arguments{
\item{Identity}{[required] The identity to be removed from the list of identities for the AWS
Account.}
}
\description{
Deletes the specified identity (an email address or a domain) from the list of verified identities.
See \url{https://www.paws-r-sdk.com/docs/ses_delete_identity/} for full documentation.
}
\keyword{internal}
|
dc6ca005e7dbf7767344afd0edd9715ba3b17647
|
6957f0d9b9ebc660cf9031794269f929cdcb6de9
|
/medical_prediction/gene information extraction and analysis/decision_tree_2mer.r
|
216c254cb99d00c52d26d80f748ceed0c72144f7
|
[] |
no_license
|
jializhou/undergraduate_projects
|
24ab6c77a6fcfb02f162017738cec82c557b7307
|
f59aef33fdfbae59df60535b6f5b28133139c94e
|
refs/heads/master
| 2021-05-31T04:26:22.301177
| 2016-03-21T01:31:57
| 2016-03-21T01:31:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,085
|
r
|
decision_tree_2mer.r
|
setwd("C:/Users/I311161/Desktop/gencode/whole gene/transcripts/k_mer/2/1")
library(rpart)
library(ROCR)
##training set##
foreground_train <- read.csv("foreground_train.csv",header = F)
foreground_train[,17]<-1
background_train <- read.csv("background_train.csv",header = F)
background_train[,17]<-0
train_set<-rbind(foreground_train,background_train)
attach(train_set)
## classification mode
# default with factor response:
train_err_rate0<-rep(0,times=50)
train_FP_rate0<-rep(0,times=50)
train_FN_rate0<-rep(0,times=50)
err_rate_2mer<-rep(0,times=50)
test_FP_rate0<-rep(0,times=50)
test_FN_rate0<-rep(0,times=50)
for (copt in 1:1){
model<-rpart(as.factor(train_set[,17]) ~ train_set[,1]+train_set[,2]+train_set[,3]+train_set[,4]+train_set[,5]
+train_set[,6]+train_set[,7]+train_set[,8]+train_set[,9]+train_set[,10]
+train_set[,11]+train_set[,12]+train_set[,13]+train_set[,14]+train_set[,15]+train_set[,16],
data=train_set,method="class",control=rpart.control(cp=0))
#puned tree model##
#model_pruned<-prune(model,cp=model$cptable[which.min(model$cptable[,"xerror"]),"CP"])
#test with training data
pred <- predict(model_pruned, newdata=train_set,type = "prob")
scores_train<-prediction(pred[,2],train_set[,17])
auc_train<-performance(scores_train,"auc")
auc_score_train<-auc_train@y.values
pred_train<-predict(model_pruned,newdata=train_set, type="class")
# Check accuracy:
train_set[,18]<-pred_train
train_err_rate<-0
train_FP_rate<-0
train_FN_rate<-0
for (i in 1:nrow(train_set)){
if(train_set[i,18] != train_set[i,17]){
train_err_rate <- train_err_rate+1
if(train_set[i,18]==1)
train_FP_rate<-train_FP_rate+1
else
train_FN_rate<-train_FN_rate+1
}
}
train_err_rate0[copt]<-train_err_rate/nrow(train_set)
train_FP_rate0[copt]<-train_FP_rate/sum(train_set[,18]==0)
train_FN_rate0[copt]<-train_FN_rate/sum(train_set[,18]==1)
# test with test data
foreground_test <- read.csv("foreground_test.csv",header = F)
foreground_test[,17]<-1
background_test <- read.csv("background_test.csv",header = F)
background_test[,17]<-0
test_set<-rbind(foreground_test,background_test)
pred_auc_test <- predict(model_pruned, newdata=test_set,type = "prob")
scores_test<-prediction(pred_auc_test[1:nrow(test_set),2],test_set[,17])
auc_test<-performance(scores_test,"auc")
auc_score_test<-auc_test@y.values
pred_test<-predict(model,newdata=test_set, type="class")
# Check accuracy:
test_set[,18]<-pred_test[1:nrow(test_set)]
test_err_rate<-0
test_FP_rate<-0
test_FN_rate<-0
for (i in 1:nrow(test_set)){
if(test_set[i,18] != test_set[i,17]){
test_err_rate <- test_err_rate+1
if(test_set[i,18]==1)
test_FP_rate<-test_FP_rate+1
else
test_FN_rate<-test_FN_rate+1
}
}
err_rate_2mer[copt]<-test_err_rate/nrow(test_set)
test_FP_rate0[copt]<-test_FP_rate/sum(test_set[,18]==0)
test_FN_rate0[copt]<-test_FN_rate/sum(test_set[,18]==1)
}
# visualize (classes by color, SV by crosses):
copt<-1:50
plot(copt, test_err_rate_2mer,type="l")
|
0e0f88f2f583de235dfc6abe1e1e1cec6084326d
|
85d8f91b58f912130362bd6415bbdb5e2e0cc7c0
|
/tests/testthat/test-flipformat.R
|
d8c15b26550406e940c846bf6ec791529d8c1ba7
|
[] |
no_license
|
Displayr/flipRegression
|
c8ab22ffc875ca09deac2ec01ffaf5371501c860
|
871819d800ebb24a7331336bd4cfea24b35afb48
|
refs/heads/master
| 2023-08-21T21:39:02.916680
| 2023-07-19T05:50:48
| 2023-07-19T05:50:48
| 59,715,681
| 7
| 5
| null | 2023-08-03T07:19:54
| 2016-05-26T03:09:43
|
R
|
UTF-8
|
R
| false
| false
| 14,277
|
r
|
test-flipformat.R
|
context("flipFormat tests")
data(bank, package = "flipExampleData")
zformula <- formula("Overall ~ Fees + Interest + Phone + Branch + Online + ATM")
sb <- bank$ID > 100
attr(sb, "label") <- "ID greater than 100"
wgt <- bank$ID
attr(wgt, "label") <- "ID"
bank$dep <- (unclass(bank$Overall) - 1) / 6
attr(bank$dep, "label") <- "Overall satisfaction"
attr(bank$Fees, "label") <- "Fees paid"
attr(bank$Online, "label") <- "Online banking"
bank$fBranch <- factor(bank$Branch)
attr(bank$fBranch, "label") <- "Branch as a factor"
attr(bank$Overall, "label") <- "Overall satisfaction"
library(flipRegression)
# Below tests fail (optimization errors when run inside testthat run on the subset of 200 rows.
# Hence are run on the full data set. They do not fail when run outside testthat.
test_that("Regression: Variable names to labels",
{
# Variable labels
z <- suppressWarnings(Regression(Overall ~ Fees + Interest + Phone + fBranch + Online + ATM, data = bank, type = "Ordered Logit",
subset = sb, weights = wgt, detail = FALSE, show.labels = TRUE))
expect_equal(rownames(z$summary$coefficients)[1], "Fees paid")
expect_equal(rownames(z$summary$coefficients)[4], "Branch as a factor: 2")
# Multiple imputation
z <- suppressWarnings(Regression(Overall ~ Fees + Interest + Phone + fBranch + Online + ATM, data = bank, type = "Linear", subset = sb, weights = wgt, detail = FALSE, show.labels = TRUE, missing = "Multiple imputation"))
expect_equal(rownames(z$summary$coefficients)[2], "Fees paid")
expect_equal(rownames(z$summary$coefficients)[5], "Branch as a factor: 2")
})
#### REDUCE DATA SIZE FOR TESTS WITHOUT NUMERICAL EQUALITY ###
bank <- bank[sample(nrow(bank), 200), ] # random sample of 200 rows to improve perfomance
zformula <- formula("Overall ~ Fees + Interest + Phone + Branch + Online + ATM")
sb <- bank$ID > 100
attr(sb, "label") <- "ID greater than 100"
wgt <- bank$ID
attr(wgt, "label") <- "ID"
bank$dep <- (unclass(bank$Overall) - 1) / 6
attr(bank$dep, "label") <- "Overall satisfaction"
attr(bank$Fees, "label") <- "Fees paid"
attr(bank$Online, "label") <- "Online banking"
bank$fBranch <- factor(bank$Branch)
attr(bank$fBranch, "label") <- "Branch as a factor"
attr(bank$Overall, "label") <- "Overall satisfaction"
test_that("DS-1467 and 1468",
{
state = factor(rep(c("NSW","VC"), 2))
attr(state, "label") = "State"
attr(state, "question") = "State1"
expect_equal(Labels(state), "State1: State")
attr(state, "questiontype") = "PickAny"
expect_equal(Labels(state), "State1: State")
z = data.frame(state, state)
expect_equal(as.character(Labels(z)), rep("State1: State", 2))
attr(state, "questiontype") = "Date"
expect_equal(Labels(state), "State1")
attr(state, "questiontype") = "Number"
expect_equal(Labels(state), "State1")
attr(state, "questiontype") = "PickOne"
expect_equal(Labels(state), "State1")
attr(state, "questiontype") = "Text"
expect_equal(Labels(state), "State1")
z = data.frame(state, state)
expect_equal(as.character(Labels(z)), rep("State1", 2))
attr(state, "question") = ""
expect_equal(Labels(state), "State")
attr(state, "question") = NULL
expect_equal(Labels(state), "State")
bank$overall_dog = bank$Overall
bank$zx = bank$Fees
attr(bank$zx, "label") = "Label"
attr(bank$zx, "question") = "Question"
z = Regression(overall_dog ~ zx, data = bank, show.labels = TRUE)
expect_equal(rownames(z$summary$coefficients)[[2]], "Question: Label")
attr(bank$zx, "questiontype") = "PickAny"
z = Regression(overall_dog ~ zx, data = bank, show.labels = TRUE)
expect_equal(rownames(z$summary$coefficients)[[2]], "Question: Label")
attr(bank$zx, "questiontype") = "PickOne"
z = Regression(overall_dog ~ zx, data = bank, show.labels = TRUE)
expect_equal(rownames(z$summary$coefficients)[[2]], "Question")
attr(bank$zx, "question") = "Label"
z = Regression(overall_dog ~ zx, data = bank, show.labels = TRUE)
expect_equal(rownames(z$summary$coefficients)[[2]], "Label")
#DS-1468
Regression(overall_dog ~ Fees, data = bank, show.labels = TRUE)
})
test_that("Labels",
{
data("phone", package = "flipExampleData")
# A data frame with labels for everything
expect_equal(as.character(Labels(phone, names(phone))), as.character(unlist(sapply(phone, function(x) attr(x, "label")))))
# A few missing labels
phone <- phone[,1:6]
attr(phone[, 1], "label") <- NULL
attr(phone[, 3], "label") <- NULL
expect_equal(Labels(phone, names(phone)), c("id", "Does respondent have a mobile phone?", "q2", "Occupation", "Age", "Top of mind awareness"))
# Backticks put in manually.
names(phone) <- paste0("`", names(phone), "`")
expect_equal(Labels(phone, names(phone)), c("id", "Does respondent have a mobile phone?", "q2", "Occupation", "Age", "Top of mind awareness"))
#Factors in regression models
data("cola", package = "flipExampleData")
factor.coefficient.names <- suppressWarnings(names(coef(lm(Q2 ~ Q3, data = cola))))
expect_equal(Labels(cola, factor.coefficient.names), c("(Intercept)", "Q3. Age: 25 to 29", "Q3. Age: 30 to 34", "Q3. Age: 35 to 39", "Q3. Age: 40 to 44", "Q3. Age: 45 to 49", "Q3. Age: 50 to 54", "Q3. Age: 55 to 64", "Q3. Age: 65 or more"))
# Regression.
data("bank", package = "flipExampleData")
zbank <- bank[1:200,]
set.seed(23442)
zbank$rnd <- runif(nrow(zbank))
zbank$rnd1 <- runif(nrow(zbank))
zbank$rnd2 <- runif(nrow(zbank))
zbank$rnd3 <- runif(nrow(zbank))
zbank$rnd4 <- runif(nrow(zbank))
attr(bank$Overall, "label") <- "Overall satisfaction"
attr(bank$Fees, "label") <- "Fees paid"
attr(bank$Online, "label") <- "Online banking"
data("cola", package = "flipExampleData")
cola <- cola[1:150,]
cola$Q3[1:100] <- NA
cola$Q3 <- unclass(cola$Q3)
suppressWarnings(Regression(Overall ~ Fees, data = bank, type = "Ordered Logit", missing = "Multiple imputation", detail = FALSE, show.labels = TRUE))
# DS-1467
attr(bank$Fees, "question") <- "Fees paid1"
suppressWarnings(Regression(Overall ~ Fees, data = bank, type = "Ordered Logit", missing = "Multiple imputation", detail = FALSE, show.labels = TRUE))
attr(bank$Fees, "questiontype") <- "PickAny"
suppressWarnings(Regression(Overall ~ Fees, data = bank, type = "Ordered Logit", missing = "Multiple imputation", detail = FALSE, show.labels = TRUE))
attr(bank$Fees, "questiontype") <- "PickOne"
suppressWarnings(Regression(Overall ~ Fees, data = bank, type = "Ordered Logit", missing = "Multiple imputation", detail = FALSE, show.labels = TRUE))
# Some variables have labels and others do not.
z <- data.frame(a = 1:10, b = 1:10, c = 1:10)
flipFormat::Labels(z) <- c("A", "B")
expect_equal(as.character(flipFormat::Labels(z)), c("A", "B", "c"))
})
data(bank, package = "flipExampleData")
test_that("BaseDescription",
{
library(flipRegression)
# Unweighted, unfiltered
expect_error(z <- suppressWarnings(Regression(Overall ~ Fees + Interest + Phone + Branch + Online + ATM, data = bank)),
NA)
#z$sample.description
# Weighted
expect_error(z <- suppressWarnings(Regression(Overall ~ Fees + Interest + Phone + Branch + Online + ATM, data = bank, weights = ID)),
NA)
#z$sample.description
# Filtered
expect_error(z <- suppressWarnings(Regression(Overall ~ Fees + Interest + Phone + Branch + Online + ATM, data = bank, subset = ID > 0.5)),
NA)
#z$sample.description
# Weighted, Filtered
expect_error(z <- suppressWarnings(Regression(Overall ~ Fees + Interest + Phone + Branch + Online + ATM, data = bank, weights = ID)),
NA)
#z$sample.description
})
test_that("Numeric dependent ~ numeric, factor, numeric factor",
{
data(colas, package= "flipExampleData")
library(flipRegression)
colas$num <- colas$q1a
colas$q1b <- as.numeric(unclass(colas$q1a))
colas$q1c <- as.numeric(unclass(colas$q1c))
z <-suppressWarnings(Regression(num ~ q1b + q3 + q1c + d3, data = colas, detail = FALSE, show.labels = TRUE))
expect_equal( rownames(z$summary$coefficients)[11], "Gender: Female")
})
test_that("Regression: labels are extracted from variables containing $",
{
library(flipRegression)
attach(bank)
z = data.frame(q = Fees)
zz <- rownames(Regression(Overall ~ z$q + Phone, detail = FALSE, show.labels = TRUE)$summary$coef)[2]
expect_equal(zz, "Fees paid")
detach(bank)
})
test_that("Regression: Variable names to labels",
{
# Variable names
z <- suppressWarnings(Regression(Overall ~ Fees + Interest + Phone + fBranch + Online + ATM, data = bank, type = "Linear", subset = sb, weights = wgt, detail = FALSE, show.labels = FALSE))
expect_equal(rownames(z$summary$coefficients)[5], "fBranch2")
expect_equal(rownames(z$summary$coefficients)[2], "Fees")
z <- suppressWarnings(Regression(Overall ~ Fees + Interest + Phone + fBranch + Online + ATM, data = bank, type = "Binary Logit", subset = sb, weights = wgt, detail = FALSE, show.labels = FALSE))
expect_equal(rownames(z$summary$coefficients)[5], "fBranch2")
expect_equal(rownames(z$summary$coefficients)[2], "Fees")
# Variable labels
z <- suppressWarnings(Regression(Overall ~ Fees + Interest + Phone + fBranch + Online + ATM, data = bank, type = "Linear", subset = sb, weights = wgt, detail = FALSE, show.labels = TRUE))
expect_equal(rownames(z$summary$coefficients)[2], "Fees paid")
expect_equal(rownames(z$summary$coefficients)[5], "Branch as a factor: 2")
z <- suppressWarnings(Regression(Overall ~ Fees + Interest + Phone + fBranch + Online + ATM, data = bank, type = "Binary Logit", subset = sb, weights = wgt, detail = FALSE, show.labels = TRUE))
expect_equal(rownames(z$summary$coefficients)[2], "Fees paid")
expect_equal(rownames(z$summary$coefficients)[5], "Branch as a factor: 2")
z <- suppressWarnings(Regression(Overall ~ Fees + Interest + Phone + fBranch + Online + ATM, data = bank, type = "Multinomial Logit", subset = sb, weights = wgt, detail = FALSE, show.labels = TRUE))
expect_equal(colnames(z$summary$coefficients)[2], "Fees paid")
expect_equal(colnames(z$summary$coefficients)[5], "Branch as a factor: 2")
# This test is run above on the full data set
#z <- suppressWarnings(Regression(Overall ~ Fees + Interest + Phone + fBranch + Online + ATM, data = bank, type = "Ordered Logit",
# subset = sb, weights = wgt, detail = FALSE, show.labels = TRUE)
#expect_equal(rownames(z$summary$coefficients)[1], "Fees paid")
#expect_equal(rownames(z$summary$coefficients)[4], "Branch as a factor: 2")
z <- suppressWarnings(Regression(Overall ~ Fees + Interest + Phone + fBranch + Online + ATM, data = bank, type = "Poisson", subset = sb, weights = wgt, detail = FALSE, show.labels = TRUE))
expect_equal(rownames(z$summary$coefficients)[2], "Fees paid")
expect_equal(rownames(z$summary$coefficients)[5], "Branch as a factor: 2")
# Small binary logit
data(cola, package = "flipExampleData")
attr(cola$Q2, "label") <- "Gender"
attr(cola$Q3, "label") <- "Age of person"
z <- suppressWarnings(Regression(Q3 ~ Q2, data = cola, type = "Binary Logit", detail = FALSE, show.labels = TRUE))
expect_equal(rownames(z$summary$coefficients)[2], "Gender: Female")
# Multiple imputation
# This test is run above on the full data set
#z <- suppressWarnings(Regression(Overall ~ Fees + Interest + Phone + fBranch + Online + ATM, data = bank, type = "Linear", subset = sb, weights = wgt, detail = FALSE, show.labels = TRUE, missing = "Multiple imputation"))
#expect_equal(rownames(z$summary$coefficients)[2], "Fees paid")
#expect_equal(rownames(z$summary$coefficients)[5], "Branch as a factor: 2")
})
test_that("RegressionTable",{
ft <- "Yo! This footer specifically designed
to communicate important information.
Since it is so important, it will of course
extend over many lines. In fact, on narrow tables,
it might take >3. On wide tables, it might only
require one. Feel free to adjust the width,
and the importance and significance does not
go away."
data(weight, package = "flipExampleData")
z = summary(lm(Weight ~ Height + Age, data = weight))$coef
RegressionTable(z, TRUE, footer = ft, title = "My awesome regression", subtitle = "Big brown dog", p.cutoff = 0.05)
expect_error(RegressionTable(z, TRUE, footer = ft, title = "My awesome regression", subtitle = "Big brown dog"), NA)
## Linear regression
suppressWarnings(Regression(Overall ~ Fees + Interest + Phone + Branch + Online +ATM, data = bank, detail = FALSE))
# Linear regression with robust se
suppressWarnings(Regression(Overall ~ Fees + Interest + Phone + Branch + Online +ATM, data = bank, robust.se = TRUE, detail = FALSE))
# Ordered logit (has a z statistic rather than a t)
suppressWarnings(Regression(Overall ~ Fees + Interest + Phone + Branch + Online +ATM, data = bank, type = "Ordered Logit", detail = FALSE))
coef.matrix <- summary(lm(Sepal.Length ~ Species * Sepal.Width, iris))$coef
rownames(coef.matrix)[1] <- "Big dog"
RegressionTable(coef.matrix, TRUE, footer = ft, title = "My awesome regression", subtitle = "Big brown dog")
expect_error(RegressionTable(coef.matrix, TRUE, footer = ft, title = "My awesome regression", subtitle = "Big brown dog"), NA)
})
|
8f9434a4b7d42e83f4165010dc1ddf67ec03320a
|
f6a65b54568fdc34f0a66796191e34950c383115
|
/man/makeSweave.Rd
|
bd46c88856557ffa96bec1214ce82b992e2abb8f
|
[] |
no_license
|
cran/indirect
|
b60113a5c188434b29a426b1e36bbb8c3a5414d5
|
8ad3d93f172db38c9cab34a04998ab55bb6c1ad8
|
refs/heads/master
| 2022-02-17T05:15:23.814030
| 2022-02-09T04:30:02
| 2022-02-09T04:30:02
| 128,393,529
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,379
|
rd
|
makeSweave.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elicit_functions.R
\name{makeSweave}
\alias{makeSweave}
\title{Function to create summary document from a saved elicitation record.}
\usage{
makeSweave(
filename.rds = "",
reportname = "",
title = "Elicitation record",
contact.details = "none",
fitted.fractiles = TRUE,
cumul.prob.bounds = c(0.05, 0.95)
)
}
\arguments{
\item{filename.rds}{character, filename of the record saved as an RDS object,
see \code{?saveRDS}.}
\item{reportname}{character, filename without extension to be used for the
generated Sweave (\code{.Rnw}) file. The Sweave file supports the creation
of report (\code{.pdf}) documentation and accompanying files such as the
\code{.tex} file generated by using \code{\link[utils]{Sweave}} followed by
\code{tools::texi2pdf()}.}
\item{title}{character, a title for the report}
\item{contact.details}{character, an email address or other mechanism by
which the expert may contact the facilitator or rapporteur}
\item{fitted.fractiles}{logical or numeric vector. A logical value of
\code{FALSE} will not plot any fitted fractiles from the fitted subjective
probability distribution. A logical value of \code{TRUE} will plot the
fitted fractiles that correspond to the final iteration of the raw elicited
fractiles. Alternatively, a numeric vector can specify arbitrary fractiles
for plotting from the fitted distribution, e.g., \code{c(1/10, 1/4, 1/2,
3/4, 9/10)}}
\item{cumul.prob.bounds}{numeric vector that specifies the upper and lower
plot bounds determined by this credible interval. The default is the 0.90
central credible interval, \code{c(0.05, 0.95)}}
}
\description{
Creates a Sweave file that can be used to generate a pdf document of the
summary report.
}
\examples{
\dontrun{
X <- matrix(c(1, 1, 0, 1), nrow = 2) # design
Z <- designLink(design = X)
Z <- elicitPt(Z, design.pt = 1,
lower.CI.bound = -1,
median = 0,
upper.CI.bound = 1,
comment = "A completed elicitation scenario.")
tmp.rds <- tempfile(pattern = "record", fileext =".rds")
saveRecord(Z, file = tmp.rds)
tmpReport <- tempfile(pattern = "report")
makeSweave(filename.rds = tmp.rds, reportname = tmpReport)
setwd(tempdir())
utils::Sweave(paste0(tmpReport, ".Rnw"))
tools::texi2pdf(paste0(tmpReport, ".tex"))
}
}
|
91733dd0efa331cfe7bf241c8a60305d8dfbd5ac
|
40f4cb44ab742a168ca3f82d36a3e38dcaa6f844
|
/R/dumpNcbiTax.R
|
26cb4b6148b78d30665e628ea6251a7935cc3e53
|
[] |
no_license
|
sankleta/BED
|
34e3f91fceffbb1164e65ab8a4cb24e6431b898b
|
85c5c5ba4bbc927155d454dc6612512c7b197805
|
refs/heads/master
| 2021-04-30T05:55:28.535605
| 2018-02-06T11:18:59
| 2018-02-06T11:18:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,172
|
r
|
dumpNcbiTax.R
|
#' Feeding BED: Dump tables with taxonomic information from NCBI
#'
#' Not exported to avoid unintended modifications of the DB.
#'
#' @param reDumpThr time difference threshold between 2 downloads
#' @param toDump the list of tables to load
#' @param env the R environment in which to load the tables when downloaded
#' @param curDate current date as given by \code{\link{Sys.Date}}
#'
#' @importFrom utils download.file read.table
#'
dumpNcbiTax <- function(
reDumpThr,
toDump=c("names.dmp"),
env=parent.frame(n=1),
curDate
){
dumpDir <- "taxdump"
if(file.exists(dumpDir)){
load(file.path(dumpDir, "dumpDate.rda"))
message("Last download: ", dumpDate)
if(curDate - dumpDate > reDumpThr){
toDownload <- TRUE
}else{
toDownload <- FALSE
}
}else{
message("Not downloaded yet")
toDownload <- TRUE
}
if(toDownload){
if(file.exists(dumpDir)){
dumpDirBck <- paste0(dumpDir,"-BCK")
file.remove(list.files(path = dumpDirBck, full.names = T))
file.remove(dumpDirBck)
file.rename(dumpDir, dumpDirBck)
}
dir.create(dumpDir)
download.file(
"ftp://ftp.ncbi.nlm.nih.gov/pub/taxonomy/taxdump.tar.gz",
file.path(dumpDir, "taxdump.tar.gz"),
quiet=TRUE
)
system(
sprintf('cd %s ; tar xzf taxdump.tar.gz ; cd -', dumpDir),
ignore.stdout=TRUE
)
dumpDate <- curDate
save(dumpDate, file=file.path(dumpDir, "dumpDate.rda"))
message("Data have been downloaded")
}else{
message("Existing data are going to be used")
}
## Data files
for(td in toDump){
lf <- file.path(dumpDir, td)
df <- file.path(dumpDir, paste0(td, ".rda"))
if(!file.exists(df)){
assign(td, read.table(
lf,
sep="\t",
header=F,
stringsAsFactors=F,
quote="",
comment.char=""
))
save(list=td, file= df)
}
load(df, envir=env)
}
}
|
3c9a5f088df36e621d6335356fb3f9b72405ce4a
|
556f65c5ef3c3cec789b58b29c684ea0f772eb34
|
/src/M11_elasticnet.R
|
424fa537f7cd36ea444bbfab8bf9b08edcc8fefa
|
[] |
no_license
|
adamcone/higgs_boson
|
4696b3fa1684e039b94362062f506f8c1543ce09
|
0ac7e3475e0c48d7ad8a8362d8bac6c2c735a4ef
|
refs/heads/master
| 2021-01-20T18:38:51.564684
| 2016-07-14T17:50:21
| 2016-07-14T17:50:21
| 63,354,505
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,002
|
r
|
M11_elasticnet.R
|
setwd('/Users/adamcone/Desktop/projects/Kaggle/code')
load('kaggle.RData')
library(dplyr)
library(glmnet)
library(caret)
# M11: 0 jets, no candidate mass estimate.
# No jets recorded, so all of the continous jet metrics are undefined.
# Removed PRI_jet_num and PRI_jet_all_pt because both are constant at 0
# for this missingness category. Therefore neither provides predictive
# value and cannot be scaled.
# 17 independent variables.
# [1] "DER_mass_MMC": The estimated mass mH of the Higgs boson candidate, obtained
# through a probabilistic phase space integration (may be undefined if the
# topology of the event is too far from the expected topology)
# The neutrinos are not measured in the detector, so their presence in the
# final state makes it difficult to evaluate the mass of the Higgs candidate
# on an event-by-event basis.
# [2] "DER_deltaeta_jet_jet": The absolute value of the pseudorapidity
# separation (22) between the two jets (undefined if PRI jet num ≤ 1).
# [3] "DER_mass_jet_jet": The invariant mass (20) of the two jets (undefined if
# PRI jet num ≤ 1).
# [4] "DER_prodeta_jet_jet": The product of the pseudorapidities of the two jets
# (undefined if PRI jet num ≤ 1).
# [5] "DER_lep_eta_centrality": The centrality of the pseudorapidity of the
# lepton w.r.t. the two jets (undefined if PRI jet num ≤ 1) where ηlep is
# the pseudorapidity of the lepton and η1 and η2 are the pseudorapidities
# of the two jets. The centrality is 1 when the lepton is on the bisector
# of the two jets, decreases to 1/e when it is collinear to one of the
# jets, and decreases further to zero at infinity.
# [6] "PRI_jet_num": The number of jets (integer with value of 0, 1, 2 or 3;
# possible larger values have been capped at 3).
# [7] "PRI_jet_leading_pt": The transverse momentum p2x + p2y of the leading
# jet, that is the jet with largest transverse momentum (undefined if PRI
# jet num = 0).
# [8] "PRI_jet_leading_eta": The pseudorapidity η of the leading jet (undefined
# if PRI jet num = 0).
# [9] "PRI_jet_leading_phi": The azimuth angle φ of the leading jet (undefined
# if PRI jet num = 0).
# [10] "PRI_jet_subleading_pt": The transverse momentum p2x + p2y of the
# leading jet, that is, the jet with second largest transverse momentum
# (undefined if PRI jet num ≤ 1).
# [11] "PRI_jet_subleading_eta": The pseudorapidity η of the subleading jet
# (undefined if PRI jet num ≤ 1).
# [12] "PRI_jet_subleading_phi": The azimuth angle φ of the subleading jet
# (undefined if PRI jet num ≤ 1).
# [13] "PRI_jet_all_pt": The scalar sum of the transverse momentum of all the
# jets of the events.
#------------------------------------------------------------------------------
# Elasticnet Regularization
#------------------------------------------------------------------------------
elasticnet.trainControl = trainControl(method = 'cv',
number = 10
)
#preparing data for glmnet-facilitated lasso regression
x = model.matrix(Label ~ ., M11.training[, -c(1, ncol(M11.training) - 1)])[, -1]
y = M11.training$Label
#Alpha = 1 for lasso regression. Getting a range for lambda.
ptm <- proc.time()
M11.lasso.models = glmnet(x = x,
y = y,
family = 'binomial',
alpha = 1,
lambda = seq(from = 10^0,
to = 10^-5,
length.out = 100)
)
proc.time() - ptm
#Visualizing the lasso regression shrinkage to verify lambda range.
plot(M11.lasso.models,
xvar = "lambda",
label = TRUE,
main = "M11 Lasso Regression")
# performing ridge regression to further hone lambda range.
ptm <- proc.time()
M11.ridge.models = glmnet(x = x,
y = y,
family = 'binomial',
alpha = 0,
lambda = seq(from = 10^1,
to = 10^-4,
length.out = 100)
)
proc.time() - ptm
#Visualizing the ridge regression shrinkage to verify lambda range.
plot(M11.ridge.models,
xvar = "lambda",
label = TRUE,
main = "M11 Ridge Regression")
#Running 10-fold cross validation to determine best (alpha, lambda) pair
set.seed(0)
ptm <- proc.time()
M11.best.model <- train(x = M11.training[, !names(M11.training) %in% c('EventId', 'Weight', 'Label')],
y = M11.training[, 'Label'],
method='glmnet',
metric = "Accuracy",
preProc = c('center', 'scale'),
tuneGrid = expand.grid(.alpha=seq(from = 0,
to = 1,
length.out = 20),
.lambda = 10^seq(from = -5,
to = 1,
length.out = 20)
),
trControl = elasticnet.trainControl
)
proc.time() - ptm
# This took five minutes to run. With 100 (alpha, lambda) combinations, the best
# tuning parameters were (alpha = 0.3684211, lambda = 0.000379269), which resulted in
# a cross-validation accuracy of 0.9480917.
M11.alpha = M11.best.model$bestTune$alpha
M11.lambda = M11.best.model$bestTune$lambda
# Now, I want to use these parameters on all the training data to get the
# coefficients. I'll try that now.
ptm <- proc.time()
M11.final.model = glmnet(x = x,
y = y,
family = 'binomial',
alpha = M11.alpha,
lambda = M11.lambda
)
proc.time() - ptm
M11.coef = coef(M11.final.model)
plot(2:nrow(M11.coef), abs(as.vector(M11.coef))[-1])
# empirical beta threshold = 0.2 Beta's above this are, to me, significant.
# There are three beta coefficients that have magnitudes above this threshold.
# I will extract these from the coefficient matrix:
beta_threshold = 0.2
M11.sig.coef.df = data.frame(Variable_Name = 0, Beta = 0)
counter = 1
for (i in 2:nrow(M11.coef)) {
if (abs(M11.coef[i]) > beta_threshold) {
M11.sig.coef.df[counter, ] = c(row.names(M11.coef)[i], M11.coef[i])
counter = counter + 1
}
}
M11.sig.coef.df$Beta = as.numeric(M11.sig.coef.df$Beta)
M11.sig.coef.df = arrange(M11.sig.coef.df, desc(abs(Beta)))
# #Now, the variables that came up as significant for the full data are, in
# #descending order by coefficient magnitude:
# Variable_Name Beta
# 1 DER_deltar_tau_lep 1.8328526
# 2 DER_pt_ratio_lep_tau -1.2149866
# 3 DER_met_phi_centrality -0.2346662
|
e0c77d67ea45eaac9a04897cab5522cd889a758c
|
3bc4732d0260fc8865da8354efe9778915bd53d6
|
/R/reader.R
|
f2e8c7ecf5964634194944a9a93c3b05253414b6
|
[] |
no_license
|
tpopenfoose/fitFileR
|
a92c63d8e2fd5cbfe1832b45cf87213ebb456191
|
3e152b5b11702d7e51d7afd96a08db03f304de63
|
refs/heads/master
| 2022-11-06T11:42:42.724848
| 2020-06-11T15:34:32
| 2020-06-11T15:34:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,819
|
r
|
reader.R
|
#' @export
readFitFile <- function(fileName, dropUnknown = TRUE, mergeMessages = TRUE) {
data("data_type_lookup", package = "fitFileR", envir = parent.frame())
tmp <- .readFile(fileName)
all_records <- .renameMessages(tmp[[1]], tmp[[2]], merge = mergeMessages)
for(i in names(all_records)) {
all_records <- .processMessageType(all_records, name = i, drop = dropUnknown)
}
return(all_records)
}
.readFile <- function(fileName) {
con <- file(fileName, "rb")
on.exit(close(con))
file_header <- .readFileHeader(con)
message_defs <- list()
defs_idx <- 1
plmt <- "-1"
prev_lmt <- "0"
defs_count <- list()
pseudoMessageTab <- NULL
scaffold <- list()
while(seek(con, where = NA) < (file_header$data_size + 14)) {
record_header <- .readRecordHeader(con)
lmt <- as.character(record_header$local_message_type)
if(record_header$message_type == "definition") {
#message("Def: ", lmt)
if(lmt %in% pseudoMessageTab[,2]) {
plmt <- as.character(as.integer(plmt) + 1)
} else {
plmt <- lmt
}
pseudoMessageTab <- rbind(pseudoMessageTab, c(lmt, plmt))
prev_lmt <- lmt
## read the message definition just to get through the bytes
message_res <- .readMessage.definition(con, devFields = record_header$developer_data)
message_defs[[ plmt ]] <- message_res$message
defs_idx <- defs_idx + 1
defs_count[[ plmt ]] <- 1
} else if(record_header$message_type == "data") {
#message("Data: ", lmt)
if(record_header$type == "compressed_timestamp") {
# message("Compressed")
defIdx <- pseudoMessageTab[ max(which(pseudoMessageTab[,1] == lmt)), 2]
message <- .readMessage.data(con, message_defs[[ defIdx ]], compressed_timestamp = TRUE)$message
scaffold[[ defIdx ]] <- rbind(scaffold[[ defIdx ]],
message)
} else {
defIdx <- pseudoMessageTab[ max(which(pseudoMessageTab[,1] == lmt)), 2]
message <- .readMessage.data(con, message_defs[[ defIdx ]], compressed_timestamp = FALSE)
scaffold[[ defIdx ]] <- dplyr::bind_rows(scaffold[[ defIdx ]],
message)
}
} else {
stop("unknown message type")
}
}
if(length(message_defs) != length(scaffold)) {
stop("Unequal lengths")
}
scaffold <- lapply(scaffold, as_tibble)
return(list(scaffold, message_defs))
}
|
3e04d9887e81ac45c9bef14892ba683195f2ee64
|
3c85d8c213dfb13f7ab69c300b93e8f419831a02
|
/cachematrix.R
|
fa09384e95afb77bb3b847de33750f8ae5bb3701
|
[] |
no_license
|
jimtheba/ProgrammingAssignment2
|
d876da24b83483b07d38a08e4c2bc94ace04eb49
|
bb0d901cb887da70be87d282b3f1f0a97fc5cabf
|
refs/heads/master
| 2021-01-20T17:29:09.437614
| 2015-07-26T18:25:17
| 2015-07-26T18:25:17
| 39,733,827
| 0
| 0
| null | 2015-07-26T17:08:58
| 2015-07-26T17:08:57
| null |
UTF-8
|
R
| false
| false
| 1,855
|
r
|
cachematrix.R
|
## The purpose of the two functions makeCacheMatrix and cacheSolve
## is to created a cached matrix object that holds the inverse of
## a matrix. This allows for the computation to be done just once
## instead of computing repeatedly as needed.
## This first function creates the matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL ## Create the inverse matrix, set to NULL
## Create the set function, which sets the value of the matrix with the argument entered in y
set <- function(y) {
## Use the <<- operator to set the value of x outside of the current environment
x <<- y
## Use the <<- operator to set the value of m outside of the current environment
m <<- NULL
}
## Create the get function, which retrieves the value of x
get <- function() x
## Create the setmatrix function, which uses the solve function to return the
## inverse of the matrix; solve returns the inverse if the b argument is missing.
setmatrix <- function(solve) m <<- solve
## Create the getmatrix function, which gets the inverse
getmatrix <- function() m
## Create our list of functions and their corresponding names
list(set=set, get=get, setmatrix=setmatrix, getmatrix=getmatrix)
}
## This second function computes the inverse of the matrix from makeCacheMatrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## First, use the getmatrix function from makeCacheMatrix and assign to m
m <- x$getmatrix()
## Check to see if m is already the inverse; if it is, return m and skip the rest
if (!is.null(m)) {
message("Getting cached data.")
return(m)
}
## If we reach this point, it means we need to calculate the inverse
the_matrix <- x$get()
m <- solve(the_matrix, ...)
x$setmatrix(m)
return(m)
}
|
8658288683daf0e8733197940f1be161b8285f16
|
aece010c3572eaf59a791569ae60fec62a260ee6
|
/man/emodel.object.Rd
|
1b4dc65da8a1e0a7ef5e8fda01d8de55addb25e3
|
[] |
no_license
|
cran/msm
|
edb92247a14b77f5a6726a80623884f29cce20e2
|
fa420503596991f9e0c5e903474c1e24954c9451
|
refs/heads/master
| 2022-12-03T03:59:27.043063
| 2022-11-28T16:30:02
| 2022-11-28T16:30:02
| 17,697,695
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,117
|
rd
|
emodel.object.Rd
|
\name{emodel.object}
\alias{emodel.object}
\title{Developer documentation: misclassification model structure object}
\description{
A list giving information about the misclassifications assumed in a
multi-state model fitted with the \code{ematrix} argument of
\code{\link{msm}}. Returned in a fitted \code{\link{msm}} model object.
This information is converted internally to a \code{hmodel}
object (see \code{\link{hmodel.object}}) for use in likelihood computations.
}
\value{
\item{nstates}{Number of states (same as \code{qmodel$nstates}).}
\item{npars}{Number of allowed misclassifications, equal to \code{sum(imatrix)}.}
\item{imatrix}{Indicator matrix for allowed misclassifications. This has
\eqn{(r,s)} entry 1 if misclassification of true state \eqn{r} as
observed state \eqn{s} is possible.
diagonal entries are arbitrarily set to 0.}
\item{ematrix}{Matrix of initial values for the misclassification probabilities, supplied as the \code{ematrix} argument of \code{\link{msm}}.}
\item{inits}{Vector of these initial values, reading across rows of
\code{qmatrix} and excluding the diagonal and disallowed transitions.}
\item{constr}{Indicators for equality constraints on baseline misclassification
probabilities, taken from the \code{econstraint} argument to
\code{\link{msm}}, and mapped if necessary to the set (1,2,3,...)}
\item{ndpars}{Number of distinct misclassification probabilities, after applying
equality constraints.}
\item{nipars}{Number of initial state occupancy probabilities being
estimated. This is zero if \code{est.initprobs=FALSE}, otherwise equal to
the number of states.}
\item{initprobs}{Initial state occupancy probabilities, as supplied to
\code{\link{msm}} (initial values before estimation, if \code{est.initprobs=TRUE}.)}
\item{est.initprobs}{Are initial state occupancy probabilities
estimated (\code{TRUE} or \code{FALSE}), as supplied in the
\code{est.initprobs} argument of \code{\link{msm}}.}
}
\seealso{
\code{\link{msm.object}},\code{\link{qmodel.object}}, \code{\link{hmodel.object}}.
}
|
25b2c8c2bce38d7e959f69b7b5c77f0a45d288c3
|
638a9479734ffdc5c504ccf4d9438a04803648dd
|
/StephensMacCall_EW.R
|
812e30c074be322f9f41a607dd4ce6ef50e421be
|
[] |
no_license
|
ellewibisono/Chapter3
|
886096949a46fea493c7c9f5faaabf8e025b57c7
|
c26f6b5d9450f90abe5fc05c671d4c6ab217c1e1
|
refs/heads/master
| 2022-11-26T23:55:23.347896
| 2020-07-24T21:15:12
| 2020-07-24T21:15:12
| 273,325,685
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,181
|
r
|
StephensMacCall_EW.R
|
#Load in data fron PgAdmin
m <- dbDriver("PostgreSQL")
con <- dbConnect(m,host='localhost', port='5432',user="postgres", password="fishy", dbname="ifish_03242020")
rs2 <-dbSendQuery(con, "SELECT f.oid, f.var_a, f.var_b, f.fish_genus, f.fish_species, f.lmat, f.lopt, f.linf, f.lmax, f.fish_code, s.cm,
d.boat_id, d.landing_date, d.wpp1, d.first_codrs_picture_date,
d.expenses_fuel, d.expenses_bait, d.expenses_ice, d.doc_status, d.post_status,
s.landing_id, s.codrs_picture_date,
b.oid, b.registration_port,
b.gt_estimate, b.gt_declared, b.program_type, b.fishing_gear, b. boat_name, b.category
FROM ifish_fish f
INNER JOIN ifish_sizing s on f.oid= s.fish_id
INNER JOIN ifish_deepslope d on s.landing_id = d.oid
INNER JOIN ifish_boat b on d.boat_id= b.oid
WHERE s.fish_id > 0 and s.data_quality = 1 and d.doc_status= 'Posted' and d.post_status= 'Posted'
ORDER BY s.landing_id")
dffish <- fetch(rs2, n=-1)
dffish <- dffish %>%setNames(make.unique(names(.))) %>%
unite(fishname, fish_genus, fish_species, sep=" ", remove= FALSE) %>%
dplyr::mutate(weight=(var_a *(cm^var_b)/1000))
#******************* Prepare Data for Stephens and McCall ****************
#Create a data set that just has the trip id value and the species codes caught on that trip
scheduleSpec = data.frame(SCHEDULE=dffish$landing_id, SPECIES=dffish$fish_code)
#remove any duplicates: get unique presence/absence those species caught on each trip
scheduleSpec = unique(scheduleSpec)
#count number of trips that each species was caught on
numSpeciesOnEachTrip = as.data.frame(table(scheduleSpec$SPECIES))
names(numSpeciesOnEachTrip) = c("SPECIES", "numTrips")
numSpeciesOnEachTrip = numSpeciesOnEachTrip[order(numSpeciesOnEachTrip$numTrips, decreasing=TRUE),]
numSpeciesOnEachTrip$percent = (numSpeciesOnEachTrip$numTrips/sum(numSpeciesOnEachTrip$numTrips))*100
#get a list of the species that occur on 1% or more of trips
numSpeciesOnEachTrip = subset(numSpeciesOnEachTrip, numSpeciesOnEachTrip$percent >= 1)
#Construct the Presence-Absence Matrix for Analysis in Stephens and McCall Approach
dateOnlyVec_vec = lapply(X=dffish$codrs_picture_date,FUN=function(x) {unlist(strsplit(as.character(x),"-"))})
dateOnlyVec_vec = do.call(rbind.data.frame,dateOnlyVec_vec)
rownames(dateOnlyVec_vec)=NULL
names(dateOnlyVec_vec) = c("Year","Month","Day")
dateOnlyVec_vec$Year = as.numeric(as.character(dateOnlyVec_vec$Year))
dateOnlyVec_vec$Month = as.numeric(as.character(dateOnlyVec_vec$Month))
dateOnlyVec_vec$Day = as.numeric(as.character(dateOnlyVec_vec$Day))
dffish = cbind(dffish,dateOnlyVec_vec)
scheduleSpecYear = data.frame(SCHEDULE=dffish$landing_id, SPECIES=dffish$fish_code, year=dffish$Year)
scheduleSpecYear = unique(scheduleSpecYear) #must remove any duplicate species codes on a trip
scheduleSpecYear$count <- 1 #add count variable
options(scipen=100)
presAbsMatrix_temp=xtabs(count~ SCHEDULE + SPECIES, data=scheduleSpecYear)
presAbsMatrix=as.data.frame.matrix(presAbsMatrix_temp)
#SAME SPEDIES AS Nancie used last time hardcoded here for continuity
#keep only those species that occur on at least one percent of the trips across the time period
speciesOnePerc = as.vector(as.character(factor(numSpeciesOnEachTrip[['SPECIES']])))
isTargetPresent = length(subset(speciesOnePerc, speciesOnePerc==as.character('LL021')))
if(isTargetPresent==0){ #If the target species is not in the 1%, keep that column anyway
speciesOnePerc = c(speciesOnePerc,'LL021')
}
presAbsMatrix = subset(presAbsMatrix, select=speciesOnePerc)
colNames_old = names(presAbsMatrix)
#provide a letter in front of column names in preparation of formula in S & M
for(i in 1:length(colNames_old)){
if(i==1)
{
colNames = paste("x",colNames_old[i],sep="")
}
if(i!=1)
{
temp = paste("x",colNames_old[i],sep="")
colNames = c(colNames,temp)
}
}
names(presAbsMatrix) = colNames
presAbsMatrix$SCHEDULE = rownames(presAbsMatrix)
tripYear = data.frame(SCHEDULE=scheduleSpecYear$SCHEDULE,year=scheduleSpecYear$year)
tripYear = unique(tripYear)
presAbsMatrix = merge(presAbsMatrix, tripYear)
#*************** Stephens and McCall Species Association Approach ***************#
spec4formula = subset(colNames, colNames!=paste("x",as.character('LL021'),sep=""))
names(presAbsMatrix)[names(presAbsMatrix)==paste("x",as.character('LL021'),sep="")] = "TARGET" #re-name as target
for(i in 1:length(spec4formula)){
if(i==1)
{
Formula = paste("TARGET ~",spec4formula[i])
}
if(i!=1)
{
Formula = paste(paste(Formula,"+"),spec4formula[i])
}
}
Formula=as.formula(Formula)
presAbsMatrix$TARGET <- as.factor(presAbsMatrix$TARGET)
my.lm=glm(formula=Formula,family=binomial,data=presAbsMatrix) # Regress on all species
presAbsMatrix$TARGET <- as.numeric(presAbsMatrix$TARGET)
obs = sum(presAbsMatrix$TARGET)
thresh=seq(0,1,by=0.01)
thresh.effect=thresh
thresh.count=thresh
for(i in 1:length(thresh)){
thresh.effect[i] = abs(obs - sum(fitted.values(my.lm) > thresh[i]))
thresh.count[i] = sum(fitted.values(my.lm) > thresh[i])
}
mythresh=cbind(thresh,thresh.effect,thresh.count)
best = min(thresh.effect)
best.thresh = thresh[thresh.effect == best]
TARGET_Trips.pred=ifelse(fitted.values(my.lm) > best.thresh,1,0)
False.neg = sum(ifelse(presAbsMatrix$TARGET > TARGET_Trips.pred,1,0))
False.pos = sum(ifelse(presAbsMatrix$TARGET < TARGET_Trips.pred,1,0))
Correct.pred = sum(ifelse(presAbsMatrix$TARGET == TARGET_Trips.pred,1,0))
Trips = length(presAbsMatrix[,1])
Pct.correct = Correct.pred/Trips*100
Pct.correct
False.neg/Trips*100
False.pos/Trips*100
years = seq(min(scheduleSpecYear$year),max(scheduleSpecYear$year))
foo = hist(fitted.values(my.lm),plot=F, breaks=9)
myhist = data.frame(cbind(foo$mids,foo$count))
foo = coefficients(my.lm)
mycoeffs = data.frame(cbind(names(foo), foo))
#Threshold Plot
#windows(5,5)
plot(thresh, thresh.effect, main='Lutjanus malabaricus', ylab = 'Difference between actual and predicted trips', xlab = 'Probablility Threshold', pch=16)
lines(thresh, thresh.effect)
#jpeg(paste(PATH,'Threshold_Plot.jpg',sep='/'),units="in",width=5,height=5,res=144)
plot(thresh, thresh.effect, main='Lutjanus malabaricus', ylab = 'Difference between actual and predicted trips', xlab = 'Probablility Threshold', pch=16)
lines(thresh, thresh.effect)
dev.off()
#Actual and Predicted Plot
yr.byspcs=data.frame(Year=years,Actual=rep(0,length(years)),Predicted=rep(0,length(years)))
for (i in 1:length(years)){
yr.byspcs$Actual[i] = sum(presAbsMatrix$TARGET[presAbsMatrix$year == years[i]])
yr.byspcs$Predicted[i] = sum(fitted.values(my.lm)[presAbsMatrix$year == years[i]])
}
#windows(5,5)
leg.txt = c("Observed", "Predicted")
plot(yr.byspcs[,1], yr.byspcs[,2], xlab='Year', ylab='Trips (Actual and Predicted)', main='Lutjanus malabaricus', pch=15)
lines(yr.byspcs[,1], yr.byspcs[,3], col=2)
lines(yr.byspcs[,1], yr.byspcs[,2])
points(yr.byspcs[,1], yr.byspcs[,3], col=2, pch=16)
legend("topleft", legend=leg.txt, col=1:2, pch=15:16)
#jpeg(paste(PATH,'Predicted_Trips.jpg',sep='/'),units="in",width=5,height=5,res=144)
plot(yr.byspcs[,1], yr.byspcs[,2], xlab='Year', ylab='Trips (Actual and Predicted)', main='Lutjanus malabaricus', pch=15)
lines(yr.byspcs[,1], yr.byspcs[,3], col=2)
lines(yr.byspcs[,1], yr.byspcs[,2])
points(yr.byspcs[,1], yr.byspcs[,3], col=2, pch=16)
legend("topleft", legend=leg.txt, col=1:2, pch=15:16)
dev.off()
#Probability histograms
#windows(5,5)
hist(fitted.values(my.lm), xlab='Probability', ylab='Frequency', main='Lutjanus malabaricus')
#jpeg(paste(PATH,'probability_hist.jpg',sep='/'),units="in",width=5,height=5,res=144)
#hist(fitted.values(my.lm), xlab='Probability', ylab='Frequency', main=Title)
dev.off()
#Write Diagnostic Tables if Desired
#if(steph_mccall_diagnos_tables==TRUE)
#{
#write.table(yr.byspcs, quote=F,row=F,sep=',', file =(paste(PATH,'yr_byspcs_out.csv',sep='/')))
#write.table(myhist, quote=F,row=F,sep=',', file =(paste(PATH,'hist_out.csv',sep='/')))
#write.table(mycoeffs, quote=F,row=F,sep=',', file =(paste(PATH,'coeffs_out.csv',sep='/')))
#write.table(mythresh, quote=F,row=F,sep=',', file =(paste(PATH,'thresh_out.csv',sep='/')))
#}
#Finally....Select the trips based on the threshold determined by the regression
selectedTrips = as.data.frame((presAbsMatrix[fitted.values(my.lm)> best.thresh,])$SCHEDULE)
names(selectedTrips) = "SCHEDULE"
#My coeffs
#SpecCor = mycoeffs
#names(SpecCor) = c("sp_code_x","corr")
#rownames(SpecCor)=NULL
#SpecCor = SpecCor[-1,]
#SpecCor$sp_code_x = as.character(SpecCor$sp_code_x)
#SpecCor$SPECIES = as.numeric(as.character((unlist(strsplit(SpecCor$sp_code_x,split="x",fixed=TRUE)))[seq(0,length(SpecCor[,1])*2,by=2)]))
#numSpeciesOnEachTrip$SPECIES = as.numeric(as.character(numSpeciesOnEachTrip$SPECIES))
#speciesNames = data.frame(SPECIES = dataIN$SPECIES, COMMON=dataIN$COMMON)
#speciesNames = unique(speciesNames)
#pecCor = merge(SpecCor, speciesNames)
#SpecCor = SpecCor[order(SpecCor$corr),]
#write.table(SpecCor, paste(PATH,"spec_corr.csv",sep="/"),col.names=TRUE,row.names=FALSE,sep=",")
|
6fd7ceeafe580f43dc65ce3aef9a0c473f322a48
|
70520c160449323e7a03dbdcbd85bc723e2a2f58
|
/exercise.r
|
27b147f5b55a95897b4ee48926a82ebcaae17f59
|
[] |
no_license
|
davemfish/maptime-r
|
72e3b1b3ba10f56a54510d490619355d1c79a3c8
|
54efd29e8f4fc84dec92a25e29fe5099bcb4025d
|
refs/heads/master
| 2021-01-20T20:31:59.808782
| 2016-07-09T19:46:27
| 2016-07-09T19:46:27
| 60,197,037
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,537
|
r
|
exercise.r
|
# Install packages if needed:
# packages <- c("rgdal", "rgeos", "leaflet", "RColorBrewer", "raster")
# install.packages(packages)
## load packages:
library(rgdal)
library(rgeos)
library(leaflet)
library(RColorBrewer)
library(raster)
## set a working directory
setwd("/home/dmf/maptime-r")
## load species data from a csv file
birds <- read.csv("data/birds.csv", header=T, stringsAsFactors=F)
## explore data
head(birds)
dim(birds)
names(birds)
## make a crude map by plotting xy coordinates
plot(x=birds$lon, y=birds$lat)
##############
# LEAFLET MAPS
##############
## 1) make a more useful map with leaflet:
# make sure the map centers on your data
# store the mean lon and lat to use in leaflet's setView()
x1 <- mean(birds$lon)
y1 <- mean(birds$lat)
## make a leaflet map
## high quality R leaflet tutorial: https://rstudio.github.io/leaflet/
m1 <- leaflet() %>%
setView(lng=x1, lat=y1, zoom=10) %>%
## notice notation below:
## '~' means "look for this name in the birds dataframe"
addCircleMarkers(data=birds, lng=~lon, lat=~lat,
fill=T,
fillColor="red",
fillOpacity=0.8,
opacity=0.8,
radius=5,
color="black",
weight=1) %>%
addTiles()
print(m1)
## QUESTION: how many corvids in the dataset?
## use a conditional statement to identify rows where family == 'Corvidae'
ids <- which(birds$family == "Corvidae")
length(ids)
## the 'ids' object now holds the row numbers
## for all elements of birds$family that met the condition
head(ids)
## use those row numbers to subset the birds dataframe:
corv <- birds[ids, ]
## 2) make leaflet map of the corvids & color the dots by species.
## how many different colors do I need?
unique(corv$species) # lists the unique values in the species column
ncol <- length(unique(corv$species))
ncol
## create a function to assign colors to species
# get the right number of colors from a palette
bpal <- brewer.pal(ncol, "Set2")
bpal
# create a custom function for choropleth mapping:
# leaflet provides the useful colorFactor function
?colorFactor
spal <- colorFactor(palette=bpal, domain=corv$species)
m2 <- leaflet(data=corv) %>%
setView(lng=x1, lat=y1, zoom=10) %>%
addCircleMarkers(lng=~lon, lat=~lat,
fill=T,
fillColor=~spal(species), # here we call the color function
fillOpacity=0.8,
opacity=0.8,
radius=5,
color="black",
weight=1) %>%
addTiles() %>%
addLegend(pal = spal, values = ~species, opacity = 1)
print(m2)
##################
# OVERLAY ANALYSIS
##################
## Which watershed in Mt. Rainier has the most bird observations?
## we will intersect the bird points with watershed polygons
## and count number of points in each polygon
## load watershed boundary shapefile
watersheds <- readOGR(dsn="data", layer="mora_sheds")
## NOTE: if you were unable to install the rgdal package,
## use this line below instead of the readOGR line above:
# watersheds <- readRDS("data/watersheds.rds")
class(watersheds)
## plot the shapefile
## the plot function recognizes the sp class object and knows what to do:
plot(watersheds)
## check the projection of the watersheds
watersheds@proj4string
## store projection for use later
wgs84ll <- watersheds@proj4string
## In order to do an 'intersection' with the polygons
## the points must also be an sp class object
## make the points into a sp class object
pts <- SpatialPointsDataFrame(coords=birds[,c("lon", "lat")],
data=birds,
proj4string = wgs84ll)
plot(pts)
## do overlay
pts.sheds <- over(pts, watersheds)
## now we have a table with same # of rows as the points,
## but with the polygon attributes
head(pts.sheds)
dim(pts.sheds)
## count records in each watershed
table(pts.sheds$Name)
## improve the format:
freq <- data.frame(table(pts.sheds$Name))
names(freq) <- c("Name", "frequency")
freq
## CHALLENGE: which watershed has the most "Pileated Woodpecker" observations
#################
# RASTER ANALYSIS
#################
## use a digital elevation model to
## find the elevation of each bird observation
## load a DEM:
dem <- raster("data/mora_dem.tif")
dem
## make the points into a sp class object
## you already did this if completed the "Overlay" section above.
pts <- SpatialPointsDataFrame(coords=birds[,c("lon", "lat")],
data=birds,
proj4string = wgs84ll)
## map the points on top of the DEM
plot(dem)
plot(pts, add=T)
## use the extract function to extract DEM values at each point:
?extract
el <- extract(dem, pts)
## explore the result:
summary(el)
class(el)
length(el)
hist(el, breaks=40)
## attach this new elevation vector to the birds dataframe
## confirm the vector has same number of elements as the dataframe has rows:
nrow(birds) == length(el)
## if so, it is safe to add the elevation vector as a column:
birds.el <- cbind(birds, el)
names(birds.el)
## rename that column:
names(birds.el)[10] <- "elevation"
## explore the new dataset with a boxplot:
boxplot(elevation ~ order, data=birds.el)
## try out ggplot for fancier plots:
library(ggplot2)
## ggplot help docs:
# http://docs.ggplot2.org/current/
ggbox <- ggplot(data=birds.el) +
geom_boxplot(aes(x=order, y=elevation)) +
theme(axis.text.x=element_text(angle=-45, vjust=0.5, size=10))
ggbox
|
a273e0068a0b31b5d14437c60595347662811b37
|
fc6a16ff52ee0a1aed32706a2bc3e0bbd7594f8f
|
/data-wrangling/lubridate/datetime.R
|
69f137c4a1d5a855697637f367de94cbd5f862c9
|
[] |
no_license
|
rsquaredacademy-education/tutorial_slides
|
80931f8ac3ec07f56c90c1f8b0fe1196761f2322
|
f4597c32b1111f73d3e91d974c7417cce8ade64a
|
refs/heads/master
| 2022-12-11T19:18:30.401786
| 2020-09-07T11:02:52
| 2020-09-07T11:02:52
| 97,691,482
| 0
| 1
| null | 2020-09-07T11:02:53
| 2017-07-19T08:19:31
|
HTML
|
UTF-8
|
R
| false
| false
| 3,908
|
r
|
datetime.R
|
## load libraries
library(lubridate)
library(dplyr)
library(magrittr)
library(readr)
## origin
lubridate::origin
## today
now()
today()
am(now())
pm(now())
## read case study data
transact <- read_csv('https://raw.githubusercontent.com/rsquaredacademy/datasets/master/transact.csv')
transact
## day, month and year
this_day <- as_date('2017-03-23')
day(this_day)
month(this_day)
year(this_day)
## extract day, month and year from due data
transact %>%
mutate(
due_day = day(Due),
due_month = month(Due),
due_year = year(Due)
)
## course duration
course_start <- as_date('2017-04-12')
course_end <- as_date('2017-04-21')
course_duration <- course_end - course_start
course_duration
## compute days to settle invoice
transact %>%
mutate(
days_to_pay = Payment - Invoice
)
## compute days over due
transact %>%
mutate(
delay = Payment - Due
)
## leap year
transact %>%
mutate(
due_year = year(Due),
is_leap = leap_year(due_year)
)
## if due day is February 29, is it a leap year?
transact %>%
mutate(
due_day = day(Due),
due_month = month(Due),
due_year = year(Due),
is_leap = leap_year(due_year)
) %>%
select(-(Invoice), -(Payment)) %>%
filter(due_month == 2 & due_day == 29)
## shift dates
course_start + days(2)
course_start + weeks(1)
course_start + years(1)
## interval
interval(course_start, course_end)
## shift interval
course_interval <- interval(course_start, course_end)
course_interval %>%
int_shift(by = days(1))
course_interval %>%
int_shift(by = weeks(1))
course_interval %>%
int_shift(by = years(1))
## overlapping intervals
vacation_start <- as_date('2017-04-19')
vacation_end <- as_date('2017-04-25')
vacation_interval <- interval(vacation_start, vacation_end)
int_overlaps(course_interval, vacation_interval)
## how many invoices were settled within due date?
transact %>%
mutate(
inv_due_interval = interval(Invoice, Due),
due_next = Due + days(1),
due_pay_interval = interval(due_next, Payment),
overlaps = int_overlaps(inv_due_interval, due_pay_interval)
) %>%
select(Invoice, Due, Payment, overlaps)
## how many invoices were settled within due date?
## using int_shift
transact %>%
mutate(
inv_due_interval = interval(Invoice, Due),
due_pay_interval = interval(Due, Payment),
due_pay_next = int_shift(due_pay_interval, by = days(1)),
overlaps = int_overlaps(inv_due_interval, due_pay_next)
) %>%
select(Invoice, Due, Payment, overlaps)
## within
conference <- as_date('2017-04-15')
conference %within% interval(course_start, course_end)
## within
transact %>%
mutate(
inv_due_interval = interval(Invoice, Due),
overlaps = Payment %within% inv_due_interval
) %>%
select(Due, Payment, overlaps)
## quarter
course_start
course_start %>%
quarter()
course_start %>%
quarter(with_year = TRUE)
## extract quarter from due date
transact %>%
mutate(
Quarter = quarter(Due)
)
## extract quarter with year from due date
transact %>%
mutate(
Quarter = quarter(Due, with_year = TRUE)
)
## duration
course_duration %>%
as.numeric() %>%
duration(units = 'days')
## convert unite
interval(course_start, course_end) / dseconds()
interval(course_start, course_end) / dminutes()
interval(course_start, course_end) / dhours()
interval(course_start, course_end) / dweeks()
interval(course_start, course_end) / dyears()
## convert units
interval(course_start, course_end) %>%
time_length(unit = "seconds")
interval(course_start, course_end) %>%
time_length(unit = "minutes")
interval(course_start, course_end) %>%
time_length(unit = "hours")
## convert units
interval(course_start, course_end) %>%
as.period(unit = "seconds")
interval(course_start, course_end) %>%
as.period(unit = "minutes")
interval(course_start, course_end) %>%
as.period(unit = "hours")
|
43c4e3f479b654db2c098e9dd369a711ef329e02
|
1b515549f0d9689d7c08d9b19d1078839e0cc39c
|
/run_analysis.R
|
50d2c6d3b10bae5c364cc85993539fb4a5bb5287
|
[] |
no_license
|
za-gor-te-nai/getting-and-cleaning-data
|
21856ba3757318c8fb4d26f8b00f6c7a91bb59d0
|
97f6d2d5c8903f4e28221bfb1ded55fad2f7252d
|
refs/heads/master
| 2020-05-05T11:03:12.871267
| 2019-04-07T14:38:11
| 2019-04-07T14:38:11
| 179,972,718
| 1
| 0
| null | 2019-04-07T14:12:06
| 2019-04-07T13:49:37
|
R
|
UTF-8
|
R
| false
| false
| 2,255
|
r
|
run_analysis.R
|
rm(list=ls())
graphics.off()
require(readr)
require(dplyr)
## load variable names
con <- file('UCI HAR Dataset/features.txt', open='r')
varNames <- readLines(con)
close(con)
## read the training data set (561 features?)
xTrain <- read_table('UCI HAR Dataset/train/X_train.txt', col_names=F)
## read the activity description (training data)
yTrain <- scan('UCI HAR Dataset/train/y_train.txt')
## read subject identification (training data)
idTrain <- scan('UCI HAR Dataset/train/subject_train.txt')
## Include the subject and activity identification variables in the training dataframe as factors
xTrain <- data.frame(as.factor(idTrain),as.factor(yTrain),xTrain)
varNames <- c('id','activity',varNames)
names(xTrain) <- varNames
## read the test data set (561 features)
xTest <- read_table('UCI HAR Dataset/test/X_test.txt', col_names=F)
## read the activity description (test data)
yTest <- scan('UCI HAR Dataset/test/y_test.txt')
## read subject identification (test data)
idTest <- scan('UCI HAR Dataset/test/subject_test.txt')
## Include the subject and activity identification variables in the test dataframe as factors
xTest <- data.frame(id=as.factor(idTest),activity=as.factor(yTest),xTest)
names(xTest) <- varNames
## Merge the training and test dataframes
x <- merge(xTrain,xTest, all=TRUE)
## Build a new dataframe by selecting the mean and std variables
selVars <- grep('id|activity|mean\\(\\)|std\\(\\)',names(x))
lsv <- length(selVars)
xSelected <- select(x, selVars)
xSelected <- arrange(xSelected, id, activity)
## rename activities
levels(xSelected$activity) <- c('WALKING',
'WALKING_UPSTAIRS',
'WALKING_DOWNSTAIRS',
'SITTING',
'STANDING',
'LAYING')
names(xSelected) <- sub('(^[0-9]{1,3} )','',names(xSelected))
## Build a dataframe providing the average of the selected variables for each subject and each activity
xSelectedGrouped <- xSelected %>%
group_by(id,activity) %>%
summarise_all(funs(mean))
names(xSelectedGrouped)[3:ncol(xSelected)] <- paste('ave',names(xSelected)[3:ncol(xSelected)], sep='')
write_csv(xSelected, 'xselected.csv')
write_csv(xSelectedGrouped, 'xgrouped.csv')
|
43ebc4ef9e6f911755c92dcda2e0c202933c4cfa
|
28eb040ee4e8487e5c6cc43a8fb014b427f95c90
|
/read_data.R
|
bd6677e0a8a34d9280fe76cf43f965dfd5de8733
|
[
"MIT"
] |
permissive
|
blatoo/testOutskewer
|
b963512b06d5e6ab034a91d2970beb41a1e9900a
|
432e023b7ad1b043f669c667f9a5865c3d6b4dbc
|
refs/heads/master
| 2020-06-04T07:31:34.264821
| 2015-07-21T06:42:36
| 2015-07-21T06:42:36
| 39,428,028
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,510
|
r
|
read_data.R
|
#get my databases
setwd("D:/Ying/coding/RStudio/outskewer")
mydataset <- local({
data <- read.csv(file="data/my_results.txt", sep = "", stringsAsFactors = FALSE, header = TRUE)
data$x <- as.numeric(data$x)
data$yes <- as.numeric(data$yes)
data$maybe <- as.numeric(data$maybe)
data$no <- as.numeric(data$no)
data$unknown <- as.numeric(data$unknown)
return(data)
})
get_marks <- function(result_df){
yes <- subset(result_df, yes == 1, select = "x")
maybe <- subset(result_df, maybe == 1, select = "x")
unknown <- subset(result_df, unknown == 1, select = "x")
return(list(yes = yes, maybe = maybe, unknow = unknown))
}
get_points <- function(marks_list){
outliers.x <- rownames(marks_list$yes)
outliers.y <- marks_list$yes$x
maybe.x <- rownames(marks_list$maybe)
maybe.y <- marks_list$maybe$x
unknown.x <- rownames(marks_list$unknown)
unknown.y <- marks_list$unknown$x
return(list(outliers.x = outliers.x, outliers.y = outliers.y, maybe.x = maybe.x, maybe.y = maybe.y, unknown.x = unknown.x, unknown.y = unknown.y))
}
results <- get_marks(mydataset)
points_pos <- get_points(results)
plot(mydataset$x, type = "l", col = "red")
points(points_pos$outliers.x, points_pos$outliers.y, pch = 21, bg = "green")
points(points_pos$maybe.x, points_pos$maybe.y, pch = 25, bg = "blue")
legendNames = c("Outliers", "May be")
legend('topright', legendNames, col = c('green', 'blue'), cex = 1.5, bty="n", pch = c(21, 25), pt.bg = c("green", "blue"))
|
c282a7d384e02cfaf15569f30f44c833a4ec849b
|
a00a5de7e7e1226a095af8e871c24656cc138a55
|
/Figure1B_Broader_Molecular_Descriptors_043020.R
|
dcf928256fe459fbd84dbbe27df224f1f7b8febb
|
[] |
no_license
|
ndfriedman/WES-RECAPTURES-IMPACT
|
a3d4ebfd8134ab0158e8c994a910157c2750334b
|
9cde92d724145b6f6ea54cc171203ffe86b1cf14
|
refs/heads/master
| 2022-09-06T23:21:35.019606
| 2020-05-31T09:01:03
| 2020-05-31T09:01:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,421
|
r
|
Figure1B_Broader_Molecular_Descriptors_043020.R
|
#libraries & custom functions
suppressWarnings(library(data.table))
suppressWarnings(library(plyr))
suppressWarnings(library(dplyr))
suppressWarnings(library(stringr))
suppressWarnings(library(ggplot2))
suppressWarnings(library(reshape2))
suppressWarnings(library(forcats))
suppressWarnings(library(RColorBrewer))
library(cowplot)
library(purrr)
library(grid)
library(gridExtra)
source('/ifs/res/taylorlab/chavans/scripts-orphan/multiplot.R')
specify_decimal = function(x, k) format(round(x, k), nsmall=k)
"%ni%" = Negate("%in%")
curr_date = format(Sys.Date(),"%d-%b-%y")
ex_clin = fread('~/tempo-cohort-level/WES_metadata_040620.txt') %>% arrange(desc(TMB));
dim(ex_clin); head(ex_clin)
im_clin = fread('~/tempo-cohort-level/data_clinical_sample_011920.txt',skip = 4) %>%
select(SAMPLE_ID, CANCER_TYPE) %>%
distinct(.);
dim(im_clin); head(im_clin)
theme_set(theme_classic(base_size = 12))
SigWES = ex_clin %>% filter_at(vars(starts_with("Signature")),any_vars(. >= 0.2))
mol_desc = ex_clin %>%
mutate(SigWESPresent = ifelse(DMP %in% SigWES$DMP,TRUE,FALSE)) %>%
select(DMP,
Cancer_Type_Aggregate,
MSIscore,
SigWESPresent,
MutationsPerSample,
TMB,
TMBWES_NonIMgenes,
NMBWES,
NMBWES_NonIMGenes,
Purity_Reviewed,
WGD_status,
FGA)
table(mol_desc$SigWESPresent)
mol_desc$DMP = factor(mol_desc$DMP, levels=mol_desc$DMP[order(mol_desc$Cancer_Type_Aggregate)])
colourCount = length(unique(mol_desc$Cancer_Type_Aggregate))
getPalette = colorRampPalette(brewer.pal(9, "Set1"))
cancer = ggplot(mol_desc, aes(x=reorder(DMP, mol_desc$DMP), y = "Cancer_Type", fill=Cancer_Type_Aggregate)) +
geom_bar(show.legend = F, stat = 'identity', width =1, na.rm = F) +
scale_fill_manual(values = getPalette(colourCount)) +
theme(axis.ticks.x = element_blank(), axis.text.x = element_blank(), axis.title.x=element_blank(),
axis.ticks.y = element_blank(), axis.title.y=element_blank(),
panel.grid = element_blank(), panel.background = element_blank(),
legend.position="bottom", legend.direction="horizontal", legend.title = element_blank())
#cancer
msiexome = ggplot(mol_desc, aes(x=reorder(DMP, mol_desc$DMP), y = "MSIscore", fill=MSIscore)) +
geom_bar(show.legend = F, stat = 'identity', width = 1, na.rm = F) +
scale_fill_gradient(low="cornsilk", high="darkgrey", na.value = 'grey80') +
theme(axis.ticks.x = element_blank(), axis.text.x = element_blank(), axis.title.x=element_blank(),
axis.ticks.y = element_blank(), axis.title.y=element_blank(),
panel.grid = element_blank(), panel.background = element_blank(),
legend.position="bottom", legend.direction="horizontal")
sigexome = ggplot(mol_desc, aes(x=reorder(DMP, mol_desc$DMP), y = "SignatureWES", fill=SigWESPresent)) +
geom_bar(show.legend = F, stat = 'identity', width = 1, na.rm = F) +
scale_fill_manual(values = c('cornsilk','darkgrey'), na.value = 'grey80') +
theme(axis.ticks.x = element_blank(), axis.text.x = element_blank(), axis.title.x=element_blank(),
axis.ticks.y = element_blank(), axis.title.y=element_blank(),
panel.grid = element_blank(), panel.background = element_blank(),
legend.position="bottom", legend.direction="horizontal")
# ###
# coverage = ggplot(mol_desc, aes(x=reorder(DMP, mol_desc$DMP), y = CoverageWES, fill=CoverageWES)) +
# geom_bar(show.legend = F, stat = 'identity', width =1, na.rm = F) +
# #scale_fill_gradient(low="white", high="forestgreen", na.value = 'grey80') +
# theme(axis.ticks.x = element_blank(), axis.text.x = element_blank(), axis.title.x=element_blank(),
# axis.ticks.y = element_blank(), axis.title.y = element_text(size = 7),
# panel.grid = element_blank(), panel.background = element_blank())
#
# baitset = ggplot(mol_desc, aes(x=reorder(DMP, mol_desc$DMP), y = "BaitSet", fill=BaitSet)) +
# geom_bar(show.legend = F, stat = 'identity', width =1, na.rm = F) +
# scale_fill_manual(values = c('cornsilk','grey','khaki','seashell'),na.value = 'grey80') +
# theme(axis.ticks.x = element_blank(), axis.text.x = element_blank(), axis.title.x=element_blank(),
# axis.ticks.y = element_blank(), axis.title.y=element_blank(),
# panel.grid = element_blank(), panel.background = element_blank(),
# legend.position="bottom", legend.direction="horizontal")
# ###
purity = ggplot(mol_desc, aes(x=reorder(DMP, mol_desc$DMP), y = "Purity", fill=Purity_Reviewed)) +
geom_bar(show.legend = F, stat = 'identity', width =1, na.rm = F) +
scale_fill_gradient(low="darkgrey", high="skyblue", na.value = 'grey80') +
theme(axis.ticks.x = element_blank(), axis.text.x = element_blank(), axis.title.x=element_blank(),
axis.ticks.y = element_blank(), axis.title.y=element_blank(),
panel.grid = element_blank(), panel.background = element_blank(),
legend.position="bottom", legend.direction="horizontal")
wgd = ggplot(mol_desc, aes(x=reorder(DMP, mol_desc$DMP), y = "WGD", fill=WGD_status)) +
geom_bar(show.legend = F, stat = 'identity', width =1, na.rm = F) +
scale_fill_manual(values = c('cornsilk','darkgrey'), na.value = 'grey80') +
theme(axis.ticks.x = element_blank(), axis.text.x = element_blank(), axis.title.x=element_blank(),
axis.ticks.y = element_blank(), axis.title.y=element_blank(),
panel.grid = element_blank(), panel.background = element_blank(),
legend.position="bottom", legend.direction="horizontal")
fga = ggplot(mol_desc, aes(x=reorder(DMP, mol_desc$DMP), y = "FGA", fill=FGA)) +
geom_bar(show.legend = F, stat = 'identity', width =1, na.rm = F) +
scale_fill_gradient(low="darkgrey", high="skyblue", na.value = 'grey80') +
theme(axis.ticks.x = element_blank(), axis.text.x = element_blank(), axis.title.x=element_blank(),
axis.ticks.y = element_blank(), axis.title.y=element_blank(),
panel.grid = element_blank(), panel.background = element_blank(),
legend.position="bottom", legend.direction="horizontal")
###
wesmutcnt = ggplot(mol_desc, aes(x=reorder(DMP, mol_desc$DMP), y = MutationsPerSample, fill=MutationsPerSample)) +
geom_bar(show.legend = F, stat = 'identity', width =1, na.rm = F) + scale_y_log10() + ylab('Mutation Count') +
#scale_fill_gradient(low="white", high="skyblue", na.value = 'grey80') +
theme(axis.ticks.x = element_blank(), axis.text.x = element_blank(), axis.title.x=element_blank(),
axis.ticks.y = element_blank(), axis.title.y = element_text(size = 9),
panel.grid = element_blank(), panel.background = element_blank())
westmb = ggplot(mol_desc, aes(x=reorder(DMP, mol_desc$DMP), y = TMB, fill=TMB)) +
geom_bar(show.legend = F, stat = 'identity', width =1, na.rm = F) + scale_y_log10() + ylab('TMB') +
#scale_fill_gradient(low="skyblue", high="skyblue", na.value = 'grey80') +
theme(axis.ticks.x = element_blank(), axis.text.x = element_blank(), axis.title.x=element_blank(),
axis.ticks.y = element_blank(), axis.title.y = element_text(size = 9),
panel.grid = element_blank(), panel.background = element_blank())
westmb_nim = ggplot(mol_desc, aes(x=reorder(DMP, mol_desc$DMP), y = TMBWES_NonIMgenes, fill=TMBWES_NonIMgenes)) +
geom_bar(show.legend = F, stat = 'identity', width =1, na.rm = F) + scale_y_log10() + ylab('TMB_NIM') +
#scale_fill_gradient(low="white", high="dodgerblue", na.value = 'grey80') +
theme(axis.ticks.x = element_blank(), axis.text.x = element_blank(), axis.title.x=element_blank(),
axis.ticks.y = element_blank(), axis.title.y = element_text(size = 9),
panel.grid = element_blank(), panel.background = element_blank())
wesnmb = ggplot(mol_desc, aes(x=reorder(DMP, mol_desc$DMP), y = NMBWES, fill=NMBWES)) +
geom_bar(show.legend = F, stat = 'identity', width =1, na.rm = F) + scale_y_log10() + ylab('NMB') +
#scale_fill_gradient(low="white", high="skyblue", na.value = 'grey80') +
theme(axis.ticks.x = element_blank(), axis.text.x = element_blank(), axis.title.x=element_blank(),
axis.ticks.y = element_blank(), axis.title.y = element_text(size = 9),
panel.grid = element_blank(), panel.background = element_blank())
wesnmb_nim = ggplot(mol_desc, aes(x=reorder(DMP, mol_desc$DMP), y = NMBWES_NonIMGenes, fill=NMBWES_NonIMGenes)) +
geom_bar(show.legend = F, stat = 'identity', width =1, na.rm = F) + scale_y_log10() + ylab('NMB_NIM') +
#scale_fill_gradient(low="white", high="skyblue", na.value = 'grey80') +
theme(axis.ticks.x = element_blank(), axis.text.x = element_blank(), axis.title.x=element_blank(),
axis.ticks.y = element_blank(), axis.title.y = element_text(size = 9),
panel.grid = element_blank(), panel.background = element_blank())
###
plot = plot_grid(msiexome, sigexome, purity, wgd, fga, wesmutcnt,westmb, wesnmb,westmb_nim, wesnmb_nim, cancer, align = "hv", ncol = 1, rel_heights = c(0.15,0.15,0.15,0.15,0.15,0.25,0.25,0.25,0.25,0.25,0.15))
plot
ggsave('~/tempo-cohort-level/Figure1B_MolecularDescriptors.pdf', plot = plot, height=8,width =11)
|
e26437e1cce0c5299d34545621137c6ac4f8fba7
|
1586a418d5d558c1c1c8d00d781fc6df4797053a
|
/R/create_db.R
|
2e98f5ba195a8d4d63fc4509de88dbcd62244540
|
[] |
no_license
|
CRI-iAtlas/iatlas-data
|
95a6ee640e34167e4657860e91266683d2031691
|
606b1a8f89be6cd4a2523fc8bcc059125f3466d8
|
refs/heads/staging
| 2020-12-08T08:02:25.380880
| 2020-06-25T23:17:51
| 2020-06-25T23:17:51
| 232,928,065
| 1
| 1
| null | 2020-06-24T22:17:52
| 2020-01-09T23:45:22
|
R
|
UTF-8
|
R
| false
| false
| 692
|
r
|
create_db.R
|
# Global function that may be used to spin-up, create, or reset the Postgres DB.
# env may be "prod", "dev", "test", or NULL. If NULL is passed, it will default to dev.
# If "prod" is passed as the env argument, the shell script will NOT be executed.
# reset may be "create", "reset", or NULL. If NULL is passed, it won't rebuild the DB and tables.
# NOTE: If "create" or "reset" are passed, the DB and tables will be built, wiping out any existing DB and tables.
create_db <- function(env = "dev", reset = NULL, script_path = "scripts") {
if (env != "prod") {
system(paste(
"bash",
paste0(script_path, "/create_db.sh"),
env,
reset,
sep = " "
))
}
}
|
ed1646eb5bec73f0b17e300ed35e457ab7dcb0f6
|
57cf2b17ad01b78b9f7f4c7e4e229c48dcd3b2f3
|
/R/lowcost.matrix.rep.R
|
fe13a1e559d4f19a92f77163d424a103e1cc3d20
|
[] |
no_license
|
tf2/CNsolidate
|
b5c62014f4c06751697d2332df7e7787bdf3faa7
|
c856aa54604d5a0e8cfcd4eb9790bef70cc8c7f4
|
refs/heads/master
| 2021-01-22T09:16:50.336608
| 2015-06-24T07:57:56
| 2015-06-24T07:57:56
| 9,937,180
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,067
|
r
|
lowcost.matrix.rep.R
|
`lowcost.matrix.rep` <- function(dat, segLen, cpPex) {
s = seq(1, length(dat[,1]), by=segLen)
so = segLen
cpPex = (segLen/100)*cpPex
starts = NULL
stops = NULL
mens = NULL
lens = NULL
for(x in 1:length(s)) {
st = s[x]
tst = st
ss = 1
if (so > length(dat[,1])) {
so = length(dat[,1])
tlen = so-st
cpPex = (tlen/100)*cpPex
if(cpPex < 1) {
cpPex=1
}
}
m = dat[st:so,4]
if(length(m) > 1) {
res = findsegments(m, maxcp=cpPex, maxk=length(m), verbose=0)
p = res$th[length(res$th[,1]),]
for(y in 1:length(p)) {
soo = p[y] -1
tso = (st+soo)-1
mu = mean(m[ss:soo])
lens = c(lens, length(m[ss:soo]))
ss = p[y]
if (tst> length(dat[,1])) {
tst=length(dat[,1])-1
}
if (tso> length(dat[,1])) {
tso=length(dat[,1])
}
starts = c(starts, dat[tst,2])
stops = c(stops, dat[tso,3])
mens = c(mens, mu)
tst = st+p[y]
}
so=so+segLen
}
}
print(so)
return(cbind(dat[1,1], starts, stops, mens, lens))
}
|
53e3d401b40e13a2081e666891e34caefa43f934
|
e73e42b43f6f539ab626881c190f9d3cd61056f9
|
/files/Barret/qtdot.r
|
71e325657a88e61c5447e39fdc672c662f1723cd
|
[] |
no_license
|
ghubona/cranvasOLD
|
59fd1aa314b618779b716aaad0bf42aa777964fd
|
119682c49a656a515a9acdc4d32f5392f92dfc4d
|
refs/heads/master
| 2021-01-09T05:19:45.170404
| 2010-12-16T20:38:16
| 2010-12-16T20:38:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,223
|
r
|
qtdot.r
|
#' Create a dot plot
#' Create a dot plot from 1-D numeric data
#'
#' http://content.answcdn.com/main/content/img/oxford/Oxford_Statistics/0199541454.dot-plot.1.jpg
#'
#' @param data vector of numeric data to be made into a histogram
#' @param horizontal boolean to decide if the bars are horizontal or vertical
#' @param ... arguments supplied to hist() or the hist layer
#' @author Barret Schloerke \email{bigbear@@iastate.edu}
#' @keywords hplot
#' @examples
#' # toture
#' qdot(rnorm(1000000), floor(rnorm(1000000)*3))
#'
#' # each column is split evenly
#' qdot(rnorm(1000000), floor(runif(1000000)*15), title = "Toture - stack")
#'
#' # each column has similar height colors
#' qdot(rnorm(1000000), floor(runif(1000000)*15), title = "Toture - dodge", position = "dodge")
#'
#' # range from 0 to 1
#' qdot(rnorm(1000000), floor(runif(1000000)*15), title = "Toture - relative", position = "relative")
#'
#' # color tests
#' # all color is defined
#' qdot(mtcars$disp, horizontal = TRUE, fill = "gold", stroke = "red4")
#'
#' # stacked items
#' qdot(mtcars$disp, mtcars$cyl, stroke = "black", position = "stack")
#'
#' # raw value items
#' qdot(mtcars$disp, mtcars$cyl, stroke = "black", position = "identity")
#'
#' # dodged items
#' qdot(mtcars$disp, mtcars$cyl, stroke = "black", position = "dodge")
#'
#' # range from 0 to 1
#' qdot(mtcars$disp, mtcars$cyl, stroke = "black", position = "relative")
qdot <- function(
data,
splitBy = rep(1, length(data)),
horizontal = TRUE,
position = "none",
color = NULL,
fill = NULL,
stroke = NULL,
title = NULL,
name = names(data),
...
) {
bars_info <- continuous_to_bars(data, splitBy, position, color, fill, stroke, ...)
bars <- bars_info$data
color <- bars$color
# contains c(x_min, x_max, y_min, y_max)
if (horizontal) {
ranges <- c(make_data_ranges(c(0, bars$top)), make_data_ranges(bars_info$breaks))
} else {
ranges <- c(make_data_ranges(bars_info$breaks), make_data_ranges( c(0, bars$top)))
}
if (horizontal) {
ylab = name
xlab = "count"
} else {
ylab = "count"
xlab = name
}
coords <- function(item, painter, exposed) {
# grey background with grid lines
if (horizontal) {
draw_grid_with_positions_fun(painter, ranges, horiPos = make_pretty_axes(ranges[1:2], ranges[1], ranges[2]))
} else {
draw_grid_with_positions_fun(painter, ranges, vertPos = make_pretty_axes(ranges[3:4], ranges[3], ranges[4]))
}
# put labels, if appropriate
draw_x_axes_fun(painter, ranges, xlab)
draw_y_axes_fun(painter, ranges, ylab)
# title
if(!is.null(title))
add_title_fun(painter, ranges, title)
}
dot.all <- function(item, painter, exposed) {
if (horizontal) {
qdrawRect(painter,
xleft = c(bars$bottom), #left
ybottom = c(bars$left), # bottom
xright = c(bars$top), # right
ytop = c(bars$right), # top
stroke = c(bars$stroke),
fill = c(bars$fill)# fill
)
} else {
qdrawRect(painter,
xleft = c(bars$left), #left
ybottom = c(bars$bottom), # bottom
xright = c(bars$right), # right
ytop = c(bars$top), # top
stroke = c(bars$stroke),
fill = c(bars$fill)# fill
)
}
}
windowRanges <- make_window_ranges(ranges, xlab, ylab)
lims <- qrect(windowRanges[c(1,2)], windowRanges[c(3,4)])
scene = qscene()
bglayer = qlayer(scene, coords, limits = lims, clip = FALSE
# , keyPressFun=keyPressFun
)
datalayer = qlayer(scene, dot.all, limits = lims, clip = FALSE)
# brushing_layer = qlayer(scene, brushing_draw,
# # mousePressFun = brushing_mouse_press, mouseMoveFun = brushing_mouse_move,
# # mouseReleaseFun = brushing_mouse_release,
# limits = lims, clip = FALSE
# )
# querylayer = qlayer(scene, query_draw, limits = lims, clip = FALSE,
# # hoverMoveFun = query_hover, hoverLeaveFun = query_hover_leave
# )
# # update the brush layer in case of any modifications to the mutaframe
# if (is.mutaframe(odata)) {
# add_listener(odata, function(i,j) {
# if (j == ".brushed") {
# qupdate(brushing_layer)
# }
# })
# }
# add_listener(.brush.attr, function(i, j) {
# # wouldn't need to call recalchiliting ...
# qupdate(brushing_layer)
# })
qplotView(scene = scene)
}
# # create the plot
# # window size 600 x 600; xrange and yrange from above
# windowRanges <- make_window_ranges(ranges, xlab, ylab)
# plot1<-make_new_plot(windowRanges)
#
#
#
# # for different representations of the data (shape, color, etc) pass vecor arguments for shape, color, x, y
# # c(obj) makes a matrix into a vector
# if (horizontal) {
# plot1$add_layer(
# hbar(
# bottom = c(bars$left),
# top = c(bars$right),
# width = c(bars$top),
# left = c(bars$bottom),
# fill = c(bars$fill),
# stroke = c(bars$stroke)
# )
# )
# } else {
# plot1$add_layer(
# vbar(
# left = c(bars$left),
# right = c(bars$right),
# height = c(bars$top),
# bottom = c(bars$bottom),
# fill = c(bars$fill),
# stroke = c(bars$stroke)
# )
# )
# }
#
# draw_x_axes(plot1, ranges, xlab)
# draw_y_axes(plot1, ranges, ylab)
#
# if(!is.null(title))
# add_title(plot1, ranges, title)
#
# plot1
|
7c046b03b3e1f9be01a4637d3ee52f7c11bad728
|
69b7540b543e5a08f4af3da605ab58cff8f8d1f4
|
/analysis.R
|
335aaba2f2eae73b33e12881cba79793143591fe
|
[] |
no_license
|
leosaenger/members-stats-workshop
|
19a1affbdd2ce8d30913e5ea4e579fc43fa3cb0e
|
f53ac9b295613ebd65416c0e9d66780b2f9e451b
|
refs/heads/main
| 2023-03-04T17:42:03.631249
| 2021-02-11T00:43:26
| 2021-02-11T00:43:26
| 336,910,802
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,468
|
r
|
analysis.R
|
# r supplemental bootcamp materials
# stats supplement
# leo saenger feb 6 2021 for HODP
library(tidyverse)
library(ggplot2)
library(estimatr)
# set it to wherever you have it
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# let's take some data - 3044 firms in Cambridge/Somerville w/ less than 150k in PPP loans (really, grants)
# about PPP: https://www.sba.gov/funding-programs/loans/coronavirus-relief-options/paycheck-protection-program/ppp-loan-forgiveness
df <- readRDS('ppp_dataset.RDS')
# i've gone ahead and cleaned the data for you already and flagged a
# few harvard organizations that applied for loans.
# let's start by getting some key metrics:
df %>% summarize(avg = mean(current_approval_amount, na.rm = T), n = n()) # average loan size
# how about for harvard student orgs?
df %>% filter(harvard_org == TRUE) %>% summarize(avg = mean(current_approval_amount, na.rm = T), n = n()) # average loan size
df %>% filter(harvard_org == FALSE) %>% summarize(avg = mean(current_approval_amount, na.rm = T), n = n()) # for contrast
# that seems high, but it's hard to say - we want to condition on a few things
# let's get the conditional means: we could do a t test:
# eg via: t.test(x ~ y, data = df)
# but regression makes our job simple
# eg via lm_robust( , data = df) # how does this compare?
# let's add some covariates
# now try for harvard student-founded orgs
# or, look at avg payroll, or try something entirely different!
|
820c8f397d49716bb55cda812774d4e675418e57
|
b6aba9b09f3fd4672c17c0311f5ee3eafdb29968
|
/Codes/FeatureSelectionMDS/Suvrel_normalization.R
|
e1722f84debbdc84aa47e95e9262e1445dcf8c75
|
[] |
no_license
|
tambonis/GA_RNA_Seq
|
fc31d0e0674b334adfdf727555149aa2a42b7585
|
4ddb2e45e5d37f6e4437c1f3234b05346f0fc03b
|
refs/heads/master
| 2022-03-01T13:03:41.041835
| 2019-11-05T13:23:45
| 2019-11-05T13:23:45
| 105,914,262
| 1
| 0
| null | 2017-10-09T12:05:04
| 2017-10-05T16:34:12
| null |
UTF-8
|
R
| false
| false
| 624
|
r
|
Suvrel_normalization.R
|
################################################################################
##Mean 0, variance 1 normalization.
##Tiago Tambonis, 06/15.
################################################################################
Suvrel.normalization <- function(counts, group){
sd_g <- sqrt(apply(counts, 1, var))
filter <- sd_g > 0
sd_g.min <- min(sd_g[filter])
MEAN <- apply(counts, 1, mean)
for (i in seq(dim(counts)[1]))
{
if (filter[i]==TRUE){
counts[i,] <- (counts[i,] - MEAN[i])/sd_g[i]
}else {(counts[i,]-MEAN[i])/sd_g.min}
}
return(counts)
}
|
ed618e6519d2bcce1edc3b5e4d3abfd0d641f455
|
2ed991300219427268e844e3dd37012ec0000f5f
|
/R/StatisticFactory.R
|
b58ebea708f9c011ccf27220285f47dcd45d12f5
|
[
"MIT"
] |
permissive
|
riccardoporreca/powerly
|
3ceeba85900ff11f30ad9afe588f3c66cd40fe8e
|
725d11819fa35cc4c75351c12b367a580a83f010
|
refs/heads/main
| 2023-08-26T01:51:31.984149
| 2021-11-08T08:39:23
| 2021-11-08T08:39:23
| 425,935,717
| 1
| 0
|
NOASSERTION
| 2021-11-08T17:40:31
| 2021-11-08T17:40:31
| null |
UTF-8
|
R
| false
| false
| 343
|
r
|
StatisticFactory.R
|
#' @include PowerStatistic.R
StatisticFactory <- R6::R6Class("StatisticFactory",
public = list(
get_statistic = function(type) {
return(
switch(type,
power = PowerStatistic$new(),
stop(.__ERRORS__$not_developed)
)
)
}
)
)
|
504b475a3d77b7e7b8eba712923e50dfe515aced
|
ba95ca23cd4d1463fba07d6f88cdfa5fb0e7ebce
|
/man/downloadDB.Rd
|
811e87c9c1667b7f792dbb8955fac0d12db56fe8
|
[] |
no_license
|
adrianacarmo/FunctSNP
|
b0ae937ac0dfb09f54a2658bc066b8cc24c58c2a
|
1b52238c1d203d2c3df8ef49f7f18cbe5cbbf7f6
|
refs/heads/master
| 2021-01-18T02:39:48.461738
| 2010-02-01T00:00:00
| 2010-02-01T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,421
|
rd
|
downloadDB.Rd
|
\name{downloadDB}
\alias{downloadDB}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Download pre-assembled species-specific databases
}
\description{
Download or update one or more pre-assembled databases for selected species.
}
\usage{
downloadDB(speciesCode, db.list=FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{speciesCode}{
A vector containing 3 letter species codes [Default = the species code set by setSpecies()]
}
\item{db.list}{
TRUE or FALSE [Default = FALSE]
}
}
\details{
Use the argument db.list=TRUE to display the species codes for databases that can be downloaded
}
\value{
The function is called for its side-effect. It downloads a species specific database and saves it in the FunctSNP library.
}
\references{
http://www.csiro.au/science/FunctSNP.html
}
\note{
Databases on the FunctSNP ftp site are updated on the first day of each month
}
\author{
S. J. Goodswen <Stephen.Goodswen@csiro.au>
}
\seealso{
\code{\link{setSpecies}}
\code{\link{makeDB}}
}
\examples{
# list databases that can be downloaded
downloadDB(db.list=TRUE)
\dontrun{
# Download a database for Bos taurus
downloadDB("bta")
# Download the databases for Gallus gallus and Sus scrofu
species <- c("gga","ssc")
downloadDB(species)
# Download default species database set by setSpecies()
downloadDB()
}
}
|
a9ab90264f28cd5da7b4bd34f52ad8551fce922d
|
7612f4d040ba14b99544587e357f46466f85c1eb
|
/R/TFM_R/.Rproj.user/F4600158/sources/per/t/21A29B5C-contents
|
128cc8ffa6bb34eca9830fac79a19d73aab12dc8
|
[] |
no_license
|
vargasde/TFM_EAE
|
765c0ab900cf2498cf385f224e0d227bad4b5441
|
9b3475010fac30965da0cc3d49a16200caa4cfee
|
refs/heads/master
| 2020-04-21T09:47:38.170681
| 2019-06-09T12:47:32
| 2019-06-09T12:47:32
| 169,455,449
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,786
|
21A29B5C-contents
|
#TFM
#Importar data individual
# multas <- read.csv("MultasMAD.csv", header = TRUE, sep = ",", fill = FALSE, blank.lines.skip = FALSE)
#Path para ubicacion de archivos
folder<- "Users/Diego/OneDrive/Documents/Maestr?a/Clases/TFM/Codigos/TFM_EAE/Datos"
#Lista de archivos .csv a hacer read
file_list <- list.files(path=folder, pattern="*.csv")
# Loop para leer cada archivo y crear un dataframe con el mismo nombre del .csv
for (i in 1:length(file_list)){
assign(file_list[i],
read.csv(paste(file_list[i]), header = TRUE, sep = ";", fill = TRUE, blank.lines.skip = TRUE)
)}
#Juntar todas las tablas de multas de 12 columnas
Multas12v <- rbind(`201602_detalle.csv`, `201601_detalle.csv`,
`201512_detalle.csv`, `201511_detalle.csv`, `201510_detalle.csv`,
`201509_detalle.csv`, `201508_detalle.csv`, `201507_detalle.csv`,
`201506_detalle.csv`, `201505_detalle.csv`, `201504_detalle.csv`,
`201503_detalle.csv`, `201502_detalle.csv`, `201501_detalle.csv`,
`201411_12_detalle.csv`, `201409_10_detalle.csv`)
#Agregar dos columnas a la tabla creada
Multas12v$COORDENADA_X <- 'NA'
Multas12v$COORDENADA_Y <- 'NA'
head(Multas12v)
#Juntamos la nueva tabla con los archivos de 14 columnas
Multas <- rbind(Multas12v,`201809_detalle.csv`, `201808_detalle.csv`, `201807_detalle.csv`,
`201806_detalle.csv`, `201805_detalle.csv`, `201804_detalle.csv`,
`201803_detalle.csv`, `201802_detalle.csv`, `201801_detalle.csv`,
`201712_detalle.csv`, `201711_detalle.csv`, `201710_detalle.csv`,
`201709_detalle.csv`, `201708_detalle.csv`, `201707_detalle.csv`,
`201706_detalle.csv`, `201705_detalle.csv`, `201704_detalle.csv`,
`201703_detalle.csv`, `201702_detalle.csv`, `201701_detalle.csv`,
`201612_detalle.csv`, `201611_detalle.csv`, `201610_detalle.csv`,
`201609_detalle.csv`, `201608_detalle.csv`, `201607_detalle.csv`)
#Verificacion de un archivo con problema en el nombre
colnames(Multas12v)
colnames(`201603_detalle.csv`)
#Arreglamos columnas de archivo con problemas
colnames(`201603_detalle.csv`)[14] <- "COORDENADA_Y"
colnames(`201603_detalle.csv`)[13] <- "COORDENADA_X"
#Juntamos el archivo corregido con la tabla anterior
Multasmad <- rbind(Multas, `201603_detalle.csv`)
#Quitamos dos columnas que estan en los archivos de 2014 de abril a junio
`201604-06_detalle.csv` <- read.csv("201604-06_detalle.csv", header = TRUE, sep = ";", fill = FALSE, blank.lines.skip = FALSE)
#Agregamos los ultimos dataframes al consolidado
multas <- rbind(Multasmad, `201604-06_detalle.csv`)
#Escribimos el .csv final
write.csv(multas, "MultasMAD.csv")
|
|
551a791a6a972882e09d39ac10bfebace7821df5
|
f8e58a4b8ee11502f1b07c08e93ae96de19574e1
|
/R/gen.stat.miss.R
|
2177e9dbaf8a6fabd6c4fc8cf90463ce2413fb73
|
[] |
no_license
|
cran/ARTP2
|
648609beb0d95088aabe373208f0070e8bc4a863
|
3d4401daa50050ac020c4612a2b819913bd2f549
|
refs/heads/master
| 2021-01-15T15:25:21.875869
| 2018-11-30T20:30:03
| 2018-11-30T20:30:03
| 51,500,259
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,206
|
r
|
gen.stat.miss.R
|
gen.stat.miss <- function(resp.var, null, family, G, X, lambda, options, cid){
if(options$impute & any(is.na(G))){
msg <- paste0('Imputing missing genotype with means in chromosome ', cid, ': ', date())
if(options$print) message(msg)
for(j in 1:ncol(G)){
id <- which(is.na(G[, j]))
if(length(id) > 0){
G[id, j] <- mean(G[, j], na.rm = TRUE)
}
}
}
formula <- paste(resp.var, "~ . -1")
mdl0 <- glm(formula, data = null, family = family)
ng <- ncol(G)
nx <- ncol(X)
if(family == 'binomial'){
y0 <- mdl0$fitted.values
A0 <- y0 * (1 - y0)
r0 <- null[, resp.var] - y0
tmp <- try(V <- t(G) %*% (A0 * G) - t(G) %*% (A0 * X) %*% solve(t(X) %*% (A0 * X)) %*% t(X) %*% (A0 * G), silent = TRUE)
if(error.try(tmp)){
msg <- "Potential existence of multicollinearity detected and ARTP2 cannot automatically deal with it right now. Please check your covariates specified in formula"
stop(msg)
}
score0 <- as.vector(t(G) %*% r0)
}else{
r0 <- mdl0$residuals
s02 <- sum(r0^2)/(length(r0) - nx)
tmp <- try(V <- (t(G) %*% G - t(G) %*% X %*% solve(t(X) %*% X) %*% t(X) %*% G) / s02, silent = TRUE)
if(error.try(tmp)){
msg <- "Potential existence of multicollinearity detected and ARTP2 cannot automatically deal with it right now. Please check your covariates specified in formula"
stop(msg)
}
score0 <- as.vector(t(G) %*% r0 / s02)
}
if(any(is.na(G))){
msg <- paste0('Some genotypes are missing in chromosome ', cid, ': ', date())
if(options$print) message(msg)
V.m <- matrix(NA, ng, ng)
score0.m <- rep(NA, ng)
rs <- colnames(G)
rownames(V.m) <- rs
colnames(V.m) <- rs
names(score0.m) <- rs
I.ab <- matrix(NA, nx, ng)
obs.id <- !is.na(G)
suff.n <- t(obs.id) %*% obs.id
if(family == 'binomial'){
inv.I.aa <- solve(t(X[,,drop = FALSE]) %*% (A0 * X[,,drop = FALSE])/length(y0))
for(k in 1:ng){
id <- as.vector(which(obs.id[,k]))
if(length(id) == 0){
msg <- paste('All genotypes of SNP', rs[k], 'are missing')
stop(msg)
}
if(length(id) == nrow(X)){
mdl <- mdl0
}else{
mdl <- glm(formula, data = null[id,,drop=FALSE], family = 'binomial')
}
y.hat <- as.vector(mdl$fitted.values)
res <- null[id, resp.var] - y.hat
A <- y.hat * (1 - y.hat)
I.ab[, k] <- t(X[id, ,drop=FALSE]) %*% (A * G[id, k]) / length(y.hat)
V.m[k, k] <- t(G[id, k]) %*% (A * G[id, k]) / length(y.hat) - t(I.ab[, k]) %*% inv.I.aa %*% I.ab[, k]
score0.m[k] <- sum(G[id,k] * res)
}
if(ng > 1){
for(k in 1:(ng-1)){
for(l in (k+1):ng){
if(!is.na(V[k,l])){
next
}
id <- as.vector(which(obs.id[, k] & obs.id[, l]))
if(length(id) == 0){
V.m[k, l] <- 0
V.m[l, k] <- 0
next
}
mdl <- glm(formula, data = null[id,,drop=FALSE], family = 'binomial')
y.hat <- mdl$fitted.values
A <- y.hat * (1 - y.hat)
V.m[k, l] <- t(G[id, k]) %*% (A * G[id, l]) / length(y.hat) - t(I.ab[, k]) %*% inv.I.aa %*% I.ab[, l]
V.m[l, k] <- V.m[k, l]
}
}
}
}else{
inv.I.aa <- solve(t(X[,,drop=FALSE]) %*% X[,,drop=FALSE] / s02/ng)
for(k in 1:ng){
id <- as.vector(which(obs.id[,k]))
if(length(id) == 0){
msg <- paste('All genotypes of SNP', rs[k], 'are missing')
stop(msg)
}
if(length(id) == nrow(X)){
mdl <- mdl0
}else{
mdl <- lm(formula,data=null,subset = id)
}
res <- mdl$residuals
s2 <- sum(res^2)/(length(res)-nx)
I.ab[,k] <- t(X[id,,drop=FALSE]) %*% G[id,k]/s2/length(res)
V.m[k,k] <- t(G[id,k]) %*% G[id,k] /s2/length(res) - t(I.ab[,k]) %*% inv.I.aa %*% I.ab[,k]
score0.m[k] <- sum(G[id,k] *res)/s2
}
if(ng > 1){
for(k in 1:(ng-1)){
for(l in (k+1):ng){
if(!is.na(V[k,l])){
next
}
id <- as.vector(which(obs.id[,k] & obs.id[,l]))
if(length(id) == 0){
V.m[k,l] <- 0
V.m[l,k] <- 0
next
}
mdl <- lm(formula,data = null, subset = id)
res <- mdl$residuals
s2 <- sum(res^2)/(length(res)-nx)
V.m[k,l] <- t(G[id,k]) %*% G[id,l] /s2/length(res) - t(I.ab[,k]) %*% inv.I.aa %*% I.ab[,l]
V.m[l,k] <- V.m[k,l]
}
}
}
}
V.m <- V.m * suff.n
V[is.na(V)] <- V.m[is.na(V)]
score0[is.na(score0)] <- score0.m[is.na(score0)]
}
score0 <- score0 / sqrt(nrow(X)) / sqrt(lambda)
V <- V / nrow(X)
names(score0) <- colnames(V)
rs <- sort(names(score0))
score0 <- score0[rs]
V <- V[rs, rs, drop = FALSE]
return(list(score0 = score0, V = V))
}
|
56101eda034121c808f36a622fad8fdd82901ef1
|
5c2374557193bd5a741aa36bf44532dc462003ae
|
/tests/testthat/test-block.R
|
90040e0b1a4c6d6ada9bd51330d89e09186b594b
|
[] |
no_license
|
andrie/pandocfilters
|
fab302e760702ec437188bb0da23a4b92d08c255
|
2fa1f1ee40168c4ccb6ac5c294877b5cc8349c53
|
refs/heads/master
| 2021-09-14T11:06:43.009929
| 2018-02-23T20:00:03
| 2018-02-23T20:00:03
| 105,012,941
| 0
| 1
| null | 2017-09-27T12:02:42
| 2017-09-27T12:02:41
| null |
UTF-8
|
R
| false
| false
| 4,285
|
r
|
test-block.R
|
if(interactive()) library(testthat)
context("block")
context(" - Plain")
test_that("Plain", {
## Test Str with Plain
x <- pandocfilters:::test(list(Plain(list(Str("Hello R!")))))
expect_equal(x, "Hello R!")
x <- pandocfilters:::test(list(Plain(Str("Hello R!"))))
expect_equal(x, "Hello R!")
} )
context(" - Para")
test_that("Para", {
## Test Str with Para
x <- pandocfilters:::test(list(Para(list(Str("Hello R!")))))
expect_equal(x, "<p>Hello R!</p>")
x <- pandocfilters:::test(list(Para(Str("Hello R!"))))
expect_equal(x, "<p>Hello R!</p>")
} )
context(" - CodeBlock")
test_that("CodeBlock", {
attr <- Attr("id", c("Programming Language"), list(c("key", "value")))
code <- "x <- 3\nprint('Hello R!')"
block <- CodeBlock(attr, code)
y <- if(get_pandoc_version() < "2.0"){
collapse_newline(
'<pre id="id" class="Programming Language" key="value"><code>x <- 3',
'print('Hello R!')</code></pre>'
)
} else {
collapse_newline(
'<pre id="id" class="Programming Language" data-key="value"><code>x <- 3',
'print('Hello R!')</code></pre>'
)
}
## Test Str with CodeBlock
x <- pandocfilters:::test(list(block))
expect_equal(x, y)
} )
context(" - BlockQuote")
test_that("BlockQuote", {
block <- BlockQuote(list(Plain(list(Str("Hello R!")))))
y <- collapse_newline("<blockquote>", "Hello R!", "</blockquote>")
## Test Str with BlockQuote
x <- pandocfilters:::test(list(block))
expect_equal(x, y)
} )
context(" - OrderedList")
test_that("OrderedList", {
y <- collapse_newline("<ol>", "<li>A</li>", "<li>B</li>", "<li>C</li>", "</ol>")
ordered_1 <- Plain("A")
ordered_2 <- list(Plain(Str("B")))
ordered_3 <- list(Plain(list(Str("C"))))
block <- OrderedList(ListAttributes(), list(ordered_1, ordered_2, ordered_3))
x <- pandocfilters:::test(list(block))
expect_equal(x, y)
} )
context(" - BulletList")
test_that("BulletList", {
y <- collapse_newline("<ul>", "<li>A</li>", "<li>B</li>", "<li>C</li>", "</ul>")
bullet_1 <- list(Plain(list(Str("A"))))
bullet_2 <- list(Plain(list(Str("B"))))
bullet_3 <- list(Plain(list(Str("C"))))
block <- BulletList(list(bullet_1, bullet_2, bullet_3))
x <- pandocfilters:::test(list(block))
expect_equal(x, y)
} )
context(" - DefinitionList")
test_that("DefinitionList", {
y <- collapse_newline("<dl>", "<dt>key</dt>", "<dd>value", "</dd>", "<dt>key</dt>",
"<dd>value", "</dd>", "</dl>")
key <- list(Str("key"))
value <- list(list(Plain(list(Str("value")))))
block <- DefinitionList(list(list(key, value), list(key, value)))
x <- pandocfilters:::test(list(block))
expect_equal(x, y)
} )
context(" - Header")
test_that("Header", {
## Test Str with Header
x <- pandocfilters:::test(list(Header(list(Str("Hello R!")))))
expect_equal(x, "<h1>Hello R!</h1>")
x <- pandocfilters:::test(list(Header(Str("Hello R!"))))
expect_equal(x, "<h1>Hello R!</h1>")
} )
context(" - HorizontalRule")
test_that("HorizontalRule", {
block <- HorizontalRule()
## Test Str with Plain
x <- pandocfilters:::test(list(block))
expect_equal(x, "<hr />")
} )
context(" - Table")
test_that("Table", {
M <- matrix(1:4, 2)
T <- Table(M, col_names=c("A", "B"))
x <- pandocfilters:::test(list(T), "markdown")
y <- if(get_pandoc_version() < "2.0"){
collapse_newline(" A B", " --- ---", " 1 3", " 2 4", "", "")
} else {
collapse_newline(" A B", " --- ---", " 1 3", " 2 4")
}
expect_equal(x, y)
} )
context(" - Div")
test_that("Div", {
blocks <- list(Plain(list(Str("Hello R!"))))
block <- Div(blocks)
## Test Div
x <- pandocfilters:::test(list(block))
y <- collapse_newline("<div>", "Hello R!", "</div>")
expect_equal(x, y)
} )
context(" - Null")
test_that("Null", {
block <- Null()
y <- ""
## Test Null
x <- pandocfilters:::test(list(block))
expect_equal(x, y)
} )
context(" - Block Combine")
test_that("Block Combine", {
## Test Str with Plain
x <- Header("Hello")
y <- Plain("R")
z <- Plain("!")
expect_equal(
pandocfilters:::test(c(x, y, z)),
collapse_newline("<h1>Hello</h1>", "R", "!")
)
} )
|
008e25c387e5f0051875dd66a029919f05d8d669
|
5399a61f1c003bbc9eca544331b5d695667748f6
|
/man/getPia.Rd
|
73d63a12e3d9e37f5de5d6b9df959fcaa5f2f45b
|
[] |
no_license
|
twjacobs/oasdir
|
432da05d953d65aee89c1a484d0f7c335773a50f
|
f80745e60b4da16ee34425fe7bcb7ea0146f6c0f
|
refs/heads/master
| 2021-07-17T08:42:15.372815
| 2019-07-24T13:36:38
| 2019-07-24T13:36:38
| 97,974,383
| 0
| 1
| null | 2018-03-23T23:57:24
| 2017-07-21T18:03:09
|
R
|
UTF-8
|
R
| false
| true
| 746
|
rd
|
getPia.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculations.R
\name{getPia}
\alias{getPia}
\title{The Primary Insurance Amount (PIA) is the actual Social Security
benefit. It is based on the average of the highest 35 years of
indexed earnings.}
\usage{
getPia(piaBends = NULL, aime = NULL)
}
\arguments{
\item{piaBends}{the PIA bend points from \code{getPiaBendPts()}}
\item{aime}{The Average Indexed Monthly Value from \code{getAimeValue()}}
}
\value{
The Primary Insurance Amount
}
\description{
The Primary Insurance Amount (PIA) is the actual Social Security
benefit. It is based on the average of the highest 35 years of
indexed earnings.
}
\examples{
getPia(piaBends = JQPublicBends, aime = JQPublicAime)
}
|
646c0c8c6f24debc978373af7ae73193653fa805
|
487e11bb17dbb49cca33e607e663be0e3292fc31
|
/data-raw/student-survey.R
|
32e2193a36b956ff42219c44f120c9465abd0c8d
|
[
"MIT"
] |
permissive
|
IVI-M/dsbox
|
d1d0f4a4b87d56c622e4fdb0ea85e3e54722c25f
|
a7c4430491e602164f6584ea7c5e36a3be7b5501
|
refs/heads/master
| 2021-09-23T15:29:06.834150
| 2018-09-25T05:09:28
| 2018-09-25T05:09:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 168
|
r
|
student-survey.R
|
# Read csv, save as rda.
library(tidyverse)
library(here)
library(usethis)
student_survey <- read_csv(here("data-raw", "student-survey.csv"))
use_data(student_survey)
|
21fcc8a138b3a08ffb99ea9e020703347cdf8b7f
|
36adeff66a7d5a822a3d9971a28ae91bae3bf140
|
/R/ClassMethods.R
|
f134eff5393bdb8a04908039582e7fa683e6798b
|
[] |
no_license
|
cran/GAS
|
befe93225465c4d51cf0a2943e83cef6438c2239
|
e588e3a10bf22cb7dff4a49a848baac63d743c3f
|
refs/heads/master
| 2022-02-20T20:59:36.560819
| 2022-02-04T09:30:12
| 2022-02-04T09:30:12
| 65,930,586
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 40,874
|
r
|
ClassMethods.R
|
setClass("uGASFit", representation(ModelInfo = "list", GASDyn = "list", Estimates = "list", Testing = "list",
Data = "list"))
setClass("mGASFit", representation(ModelInfo = "list", GASDyn = "list", Estimates = "list", Data = "list"))
setClass("uGASSim", representation(ModelInfo = "list", GASDyn = "list", Data = "list"))
setClass("mGASSim", representation(ModelInfo = "list", GASDyn = "list", Data = "list"))
setClass("uGASSpec", representation(Spec = "list"))
setClass("mGASSpec", representation(Spec = "list"))
setClass("uGASFor", representation(Forecast = "list", Bands = "array", Draws = "matrix", Info = "list",
Data = "list"))
setClass("mGASFor", representation(Forecast = "list", Bands = "array", Draws = "array", Info = "list",
Data = "list"))
setClass("uGASRoll", representation(Forecast = "list", Info = "list", Testing = "list", Data = "list"))
setClass("mGASRoll", representation(Forecast = "list", Info = "list", Data = "list"))
setMethod("show", "uGASSpec", function(object) {
Dist = getDist(object)
ScalingType = getScalingType(object)
GASPar = unlist(getGASPar(object))
GASPar = names(GASPar[GASPar])
cat("\n-------------------------------------------------------")
cat("\n- Univariate GAS Specification -")
cat("\n-------------------------------------------------------")
cat(paste("\nConditional distribution"))
DistInfo(Dist, FULL = FALSE)
cat(paste("\nGAS specification"))
cat("\n-------------------------------------------------------")
cat(paste("\nScore scaling type: ", ScalingType))
cat(paste("\nTime varying parameters: ", paste(GASPar, collapse = ", ")))
#
cat("\n-------------------------------------------------------")
})
setMethod("show", "mGASSpec", function(object) {
Dist = getDist(object)
ScalingType = getScalingType(object)
GASPar = unlist(getGASPar(object))
GASPar = names(GASPar[GASPar])
ScalarParameters = object@Spec$ScalarParameters
cat("\n-------------------------------------------------------")
cat("\n- Multivariate GAS Specification -")
cat("\n-------------------------------------------------------")
cat(paste("\nConditional distribution"))
DistInfo(Dist, FULL = FALSE)
cat(paste("\nGAS specification"))
cat("\n-------------------------------------------------------")
cat(paste("\nScore scaling type: ", ScalingType))
cat(paste("\nTime varying parameters: ", paste(GASPar, collapse = ", ")))
cat(paste("\nScalar Parameters: ", paste(TypeOfParameters(ScalarParameters))))
#
cat("\n-------------------------------------------------------")
})
setMethod("show", "uGASFit", function(object) {
Spec = getSpec(object)
Dist = getDist(object)
iT = object@ModelInfo$iT
iK = NumberParameters(Dist)
IC = getIC(object)
ParNames = FullNamesUni(Dist)
ScalingType = getScalingType(Spec)
GASPar = unlist(getGASPar(Spec))
GASPar = names(GASPar[GASPar])
Inference = object@Estimates$Inference
vKappa = object@Estimates$lParList$vKappa
mB = object@Estimates$lParList$mB
vTheta_Tilde_Unc = solve(diag(iK) - mB) %*% vKappa
vTheta_Unc = c(MapParameters_univ(vTheta_Tilde_Unc, Dist, iK))
names(vTheta_Unc) = ParNames
elapsedTime = object@ModelInfo$elapsedTime
cat(paste("\n------------------------------------------"))
cat(paste("\n- Univariate GAS Fit -"))
cat(paste("\n------------------------------------------"))
cat("\n\nModel Specification:\t")
cat(paste("\nT = ", iT))
cat(paste("\nConditional distribution: ", Dist))
cat(paste("\nScore scaling type: ", ScalingType))
cat(paste("\nTime varying parameters: ", paste(GASPar, collapse = ", ")))
#
cat(paste("\n------------------------------------------"))
cat(paste("\nEstimates:\n"))
print(Inference)
cat(paste("\n------------------------------------------"))
cat(paste("\nUnconditional Parameters:\n"))
print(vTheta_Unc)
cat(paste("\n------------------------------------------"))
cat(paste("\nInformation Criteria:\n"))
print(IC)
cat(paste("\n------------------------------------------"))
cat(paste("\nConvergence:\t"))
cat(convergence(object))
cat(paste("\n------------------------------------------"))
cat(paste("\n\nElapsed time:", round(as.double(elapsedTime, units = "mins"), 2L), "mins"))
})
setMethod("summary", "uGASFit", function(object) {
Spec = getSpec(object)
Dist = getDist(object)
iT = object@ModelInfo$iT
iK = NumberParameters(Dist)
IC = getIC(object)
ParNames = FullNamesUni(Dist)
ScalingType = getScalingType(Spec)
GASPar = unlist(getGASPar(Spec))
GASPar = names(GASPar[GASPar])
Inference = object@Estimates$Inference
vKappa = object@Estimates$lParList$vKappa
mB = object@Estimates$lParList$mB
vTheta_Tilde_Unc = solve(diag(iK) - mB) %*% vKappa
vTheta_Unc = c(MapParameters_univ(vTheta_Tilde_Unc, Dist, iK))
names(vTheta_Unc) = ParNames
elapsedTime = object@ModelInfo$elapsedTime
vRes = residuals(object, standardize = TRUE)
JB = JarqueBera(vRes)
mBoxRes = LjungBox(vRes)
mBoxRes2 = LjungBox(vRes^2)
mTest = rbind(JB[c("Statistic", "p-Value")],
mBoxRes,
mBoxRes2)
rownames(mTest) = c(
" Jarque-Bera Test R Chi^2 ",
" Ljung-Box Test R Q(10) ",
" Ljung-Box Test R Q(15) ",
" Ljung-Box Test R Q(20) ",
" Ljung-Box Test R^2 Q(10) ",
" Ljung-Box Test R^2 Q(15) ",
" Ljung-Box Test R^2 Q(20) ")
cat(paste("\n------------------------------------------"))
cat(paste("\n- Univariate GAS Fit -"))
cat(paste("\n------------------------------------------"))
cat("\n\nModel Specification:\t")
cat(paste("\nT = ", iT))
cat(paste("\nConditional distribution: ", Dist))
cat(paste("\nScore scaling type: ", ScalingType))
cat(paste("\nTime varying parameters: ", paste(GASPar, collapse = ", ")))
#
cat(paste("\n------------------------------------------"))
cat(paste("\nEstimates:\n"))
print(Inference)
cat(paste("\n------------------------------------------"))
cat(paste("\nUnconditional Parameters:\n"))
print(vTheta_Unc)
cat(paste("\n------------------------------------------"))
cat(paste("\nInformation Criteria:\n"))
print(IC)
cat(paste("\n------------------------------------------"))
cat(paste("\nAnalysis of Residuals:\n"))
print(mTest)
cat(paste("\n------------------------------------------"))
cat(paste("\nConvergence:\t"))
cat(convergence(object))
cat(paste("\n------------------------------------------"))
cat(paste("\n\nElapsed time:", round(as.double(elapsedTime, units = "mins"), 2L), "mins"))
})
setMethod("show", "mGASFit", function(object) {
Spec = getSpec(object)
iT = object@ModelInfo$iT
iN = object@ModelInfo$iN
iK = object@ModelInfo$iK
IC = getIC(object)
Dist = getDist(Spec)
ScalingType = getScalingType(Spec)
GASPar = unlist(getGASPar(Spec))
GASPar = names(GASPar[GASPar])
ParNames = FullNamesMulti(iN, Dist)
vKappa = object@Estimates$lParList$vKappa
mB = object@Estimates$lParList$mB
vTheta_Tilde_Unc = solve(diag(iK) - mB) %*% vKappa
vTheta_Unc = c(MapParameters_multi(vTheta_Tilde_Unc, Dist, iN, iK))
names(vTheta_Unc) = ParNames
Inference = object@Estimates$Inference
elapsedTime = object@ModelInfo$elapsedTime
cat(paste("\n------------------------------------------"))
cat(paste("\n- Multivariate GAS Fit -"))
cat(paste("\n------------------------------------------"))
cat("\n\nModel Specification:\t")
cat(paste("\nT = ", iT))
cat(paste("\nConditional distribution: ", Dist))
cat(paste("\nScore scaling type: ", ScalingType))
cat(paste("\nTime varying parameters: ", paste(GASPar, collapse = ", ")))
#
cat(paste("\n------------------------------------------"))
cat(paste("\nEstimates:\n"))
print(Inference)
cat(paste("\n------------------------------------------"))
cat(paste("\nUnconditional Parameters:\n"))
print(vTheta_Unc)
cat(paste("\n------------------------------------------"))
cat(paste("\nInformation Criteria:\n"))
print(IC)
cat(paste("\n------------------------------------------"))
cat(paste("\nConvergence:\t"))
cat(convergence(object))
cat(paste("\n------------------------------------------"))
cat(paste("\n\nElapsed time:", round(as.double(elapsedTime, units = "mins"), 2L), "mins"))
})
setMethod("summary", "mGASFit", function(object) {
Spec = getSpec(object)
iT = object@ModelInfo$iT
iN = object@ModelInfo$iN
iK = object@ModelInfo$iK
IC = getIC(object)
Dist = getDist(Spec)
ScalingType = getScalingType(Spec)
GASPar = unlist(getGASPar(Spec))
GASPar = names(GASPar[GASPar])
ParNames = FullNamesMulti(iN, Dist)
vKappa = object@Estimates$lParList$vKappa
mB = object@Estimates$lParList$mB
vTheta_Tilde_Unc = solve(diag(iK) - mB) %*% vKappa
vTheta_Unc = c(MapParameters_multi(vTheta_Tilde_Unc, Dist, iN, iK))
names(vTheta_Unc) = ParNames
Inference = object@Estimates$Inference
elapsedTime = object@ModelInfo$elapsedTime
cat(paste("\n------------------------------------------"))
cat(paste("\n- Multivariate GAS Fit -"))
cat(paste("\n------------------------------------------"))
cat("\n\nModel Specification:\t")
cat(paste("\nT = ", iT))
cat(paste("\nConditional distribution: ", Dist))
cat(paste("\nScore scaling type: ", ScalingType))
cat(paste("\nTime varying parameters: ", paste(GASPar, collapse = ", ")))
#
cat(paste("\n------------------------------------------"))
cat(paste("\nEstimates:\n"))
print(Inference)
cat(paste("\n------------------------------------------"))
cat(paste("\nUnconditional Parameters:\n"))
print(vTheta_Unc)
cat(paste("\n------------------------------------------"))
cat(paste("\nInformation Criteria:\n"))
print(IC)
cat(paste("\n------------------------------------------"))
cat(paste("\nConvergence:\t"))
cat(convergence(object))
cat(paste("\n------------------------------------------"))
cat(paste("\n\nElapsed time:", round(as.double(elapsedTime, units = "mins"), 2L), "mins"))
})
setMethod("show", "uGASSim", function(object) {
iT = object@ModelInfo$iT
Dist = getDist(object)
ScalingType = getScalingType(object)
ParNames = FullNamesUni(Dist)
iK = NumberParameters(Dist)
mA = object@ModelInfo$mA
mB = object@ModelInfo$mB
vKappa = object@ModelInfo$vKappa
names(vKappa) = ParNames
vTheta_Tilde_Unc = solve(diag(iK) - mB) %*% vKappa
vTheta_Unc = c(MapParameters_univ(vTheta_Tilde_Unc, Dist, iK))
names(vTheta_Unc) = ParNames
cat(paste("\n------------------------------------------"))
cat(paste("\n- Univariate GAS Sim -"))
cat(paste("\n------------------------------------------"))
cat("\n\nModel Specification:\t")
cat(paste("\nT = ", iT))
cat(paste("\nConditional distribution: ", Dist))
cat(paste("\nScore scaling type: ", ScalingType))
#
cat(paste("\n------------------------------------------"))
cat(paste("\nParameters:\n"))
cat("vKappa:\n")
print(vKappa)
cat("mA:\n")
print(mA)
cat("mB:\n")
print(mB)
cat(paste("\n------------------------------------------"))
cat(paste("\nUnconditional Parameters:\n"))
print(vTheta_Unc)
})
setMethod("show", "mGASSim", function(object) {
iT = object@ModelInfo$iT
iN = object@ModelInfo$iN
Dist = getDist(object)
ScalingType = getScalingType(object)
ParNames = FullNamesMulti(iN, Dist)
iK = NumberParameters(Dist, iN)
mA = object@ModelInfo$mA
mB = object@ModelInfo$mB
vKappa = object@ModelInfo$vKappa
names(vKappa) = ParNames
vTheta_Tilde_Unc = solve(diag(iK) - mB) %*% vKappa
vTheta_Unc = c(MapParameters_multi(vTheta_Tilde_Unc, Dist, iN, iK))
names(vTheta_Unc) = ParNames
cat(paste("\n------------------------------------------"))
cat(paste("\n- Univariate GAS Sim -"))
cat(paste("\n------------------------------------------"))
cat("\n\nModel Specification:\t")
cat(paste("\nT = ", iT))
cat(paste("\nN = ", iN))
cat(paste("\nConditional distribution: ", Dist))
cat(paste("\nScore scaling type: ", ScalingType))
#
cat(paste("\n------------------------------------------"))
cat(paste("\nParameters:\n"))
cat("vKappa:\n")
print(vKappa)
cat("mA:\n")
print(mA)
cat("mB:\n")
print(mB)
cat(paste("\n------------------------------------------"))
cat(paste("\nUnconditional Parameters:\n"))
print(vTheta_Unc)
})
setMethod("show", "uGASFor", function(object) {
iH = object@Info$iH
Dist = getDist(object)
ScalingType = getScalingType(object)
Roll = object@Info$Roll
PointForecast = getForecast(object)
if (Roll) {
PointForecast = cbind(PointForecast, realized = object@Data$vOut)
}
cat(paste("\n------------------------------------------"))
cat(paste("\n- Univariate GAS Forecast -"))
cat(paste("\n------------------------------------------"))
cat("\n\nModel Specification")
cat(paste("\nConditional distribution: ", Dist))
cat(paste("\nScore scaling type: ", ScalingType))
cat(paste("\nHorizon: ", iH))
cat(paste("\nRolling forecast: ", Roll))
#
cat(paste("\n------------------------------------------"))
cat(paste("\nParameters forecast:\n"))
if (nrow(PointForecast) > 10L ) {
print(head(PointForecast, 5L))
cat(paste("\n....................\n"))
print(tail(PointForecast, 5L))
} else {
print(PointForecast)
}
})
setMethod("show", "mGASFor", function(object) {
iH = object@Info$iH
iN = object@Info$iN
Dist = getDist(object)
ScalingType = getScalingType(object)
Roll = object@Info$Roll
PointForecast = getForecast(object)
if (Roll) {
PointForecast = cbind(PointForecast, realized = object@Data$vOut)
}
cat(paste("\n------------------------------------------"))
cat(paste("\n- Multivariate GAS Forecast -"))
cat(paste("\n------------------------------------------"))
cat("\n\nModel Specification")
cat(paste("\nConditional distribution: ", Dist))
cat(paste("\nScore scaling type: ", ScalingType))
cat(paste("\nHorizon: ", iH))
cat(paste("\nNumber of series: ", iN))
cat(paste("\nRolling forecast: ", Roll))
#
cat(paste("\n------------------------------------------"))
cat(paste("\nParameters forecast:\n"))
if (nrow(PointForecast) > 10L) {
print(head(PointForecast, 5L))
cat(paste("\n....................\n"))
print(tail(PointForecast, 5L))
} else {
print(PointForecast)
}
})
setMethod("show", "uGASRoll", function(object) {
Dist = getDist(object)
ScalingType = getScalingType(object)
PointForecast = getForecast(object)
elapsedTime = object@Info$elapsedTime
cat(paste("\n------------------------------------------"))
cat(paste("\n- Univariate GAS Rolling Forecast -"))
cat(paste("\n------------------------------------------"))
cat("\n\nModel Specification")
cat(paste("\nConditional distribution: ", Dist))
cat(paste("\nScore scaling type: ", ScalingType))
#
cat(paste("\n------------------------------------------"))
cat(paste("\nParameters forecast:\n"))
if (nrow(PointForecast) > 10L) {
print(head(PointForecast, 5L))
cat(paste("\n....................\n"))
print(tail(PointForecast, 5L))
} else {
print(PointForecast)
}
cat(paste("\n------------------------------------------"))
cat(paste("\n\nElapsed time:", round(as.double(elapsedTime, units = "mins"), 2L), "mins"))
})
setMethod("show", "mGASRoll", function(object) {
Dist = getDist(object)
ScalingType = getScalingType(object)
PointForecast = getForecast(object)
elapsedTime = object@Info$elapsedTime
cat(paste("\n------------------------------------------"))
cat(paste("\n- Multivariate GAS Rolling Forecast -"))
cat(paste("\n------------------------------------------"))
cat("\n\nModel Specification")
cat(paste("\nConditional distribution: ", Dist))
cat(paste("\nScore scaling type: ", ScalingType))
#
cat(paste("\n------------------------------------------"))
cat(paste("\nParameters forecast:\n"))
if (nrow(PointForecast) > 10L) {
print(head(PointForecast, 5L))
cat(paste("\n....................\n"))
print(tail(PointForecast, 5L))
} else {
print(PointForecast)
}
cat(paste("\n------------------------------------------"))
cat(paste("\n\nElapsed time:", round(as.double(elapsedTime, units = "mins"), 2L), "mins"))
})
setMethod("plot", signature(x = "uGASFit", y = "missing"), function(x, which = NULL) {
iK = x@ModelInfo$iK
iT = x@ModelInfo$iT
vY = x@Data$vY
FilteredParameters = getFilteredParameters(x)[1:iT, , drop = FALSE]
Moments = getMoments(x)[1:iT, , drop = FALSE]
vU = pit(x)
if (is(vY, "xts")) {
vDates = as.Date(index(vY))
} else {
vDates = 1:length(vY)
}
PlotType = 1L
while (PlotType > 0L) {
if (is.null(which)) {
vMenu = PlotMenu(x)
cat(paste("Print 1-",length(vMenu)," or 0 to exit", sep = ""))
PlotType = menu(vMenu)
} else {
PlotType = which
}
if (PlotType == 1L) {
PlotMultipleSeries(FilteredParameters, iK, iT, vDates)
}
if (PlotType == 2L) {
PlotMultipleSeries(Moments, 4L, iT, vDates)
}
if (PlotType == 3L) {
PlotPit(vU, x@Testing$PitTest$Hist)
}
if (PlotType == 4L) {
PlotSingleSeries(vY, iT, vDates)
}
if (PlotType == 5L) {
mRealVsFiltered = cbind(Moments[, 1L], vY)
PlotForecastVsRealized_Univ(mRealVsFiltered, vDates, x)
}
if (!is.null(which)) {
PlotType = 0L
}
}
})
setMethod("plot", signature(x = "mGASFit", y = "missing"), function(x, which = NULL) {
iK = x@ModelInfo$iK
iN = x@ModelInfo$iN
iT = x@ModelInfo$iT
mY = t(x@Data$mY)
vSeriesName = colnames(mY)
if (is.null(vSeriesName)) {
vSeriesName = paste("series", 1:iN, sep = "")
}
if (is(mY, "xts")) {
vDates = as.Date(index(mY))
} else {
vDates = rownames(mY)
vDates = try(as.Date(vDates), silent = TRUE)
if (is.null(vDates) || is(vDates, "try-error")) {
vDates = 1:nrow(mY)
}
}
PlotType = 1L
while (PlotType > 0L) {
if (is.null(which)) {
cat(paste("Print 1-3 or 0 to exit"))
PlotType = menu(PlotMenu(x))
} else {
PlotType = which
}
if (PlotType == 1L) {
series2plot = getFilteredParameters(x)[1:iT, , drop = FALSE]
}
if (PlotType == 2L) {
lMoments = getMoments(x)
mMean = lMoments[["mean"]][1:iT, , drop = FALSE]
dimnames(mMean) = list(vDates, paste(vSeriesName, "mean", sep = "."))
aCov = lMoments[["cov"]][,, 1:iT, drop = FALSE]
dimnames(aCov) = list(vSeriesName, vSeriesName, vDates)
mCov = Array2Matrix(aCov, type = 2L)
}
if (PlotType == 3L) {
series2plot = mY
}
if (PlotType == 1L) {
PlotMultipleSeries(series2plot, iK, iT, vDates)
}
if (PlotType == 2L) {
PlotMultipleSeries(mMean, iN, iT, vDates)
foo = readline("Print enter to plot covariances\n:")
PlotMultipleSeries(mCov, ncol(mCov), iT, vDates)
}
if (PlotType == 3L) {
PlotMultipleSeries(series2plot, iN, iT, vDates)
}
if (!is.null(which)) {
PlotType = 0L
}
}
})
setMethod("plot", signature(x = "uGASSim", y = "missing"), function(x, which = NULL) {
iK = x@ModelInfo$iK
iT = x@ModelInfo$iT
vY = x@Data$vY
vDates = 1:iT
PlotType = 1L
while (PlotType > 0L) {
if (is.null(which)) {
cat(paste("Print 1-3 or 0 to exit"))
PlotType = menu(PlotMenu(x))
} else {
PlotType = which
}
if (PlotType == 1L) {
series2plot = getFilteredParameters(x)[1:iT, , drop = FALSE]
}
if (PlotType == 2L) {
series2plot = getMoments(x)[1:iT, , drop = FALSE]
}
if (PlotType == 3L) {
series2plot = vY
}
if (PlotType == 1L) {
PlotMultipleSeries(series2plot, iK, iT, vDates)
}
if (PlotType == 2L) {
PlotMultipleSeries(series2plot, iK, iT, vDates)
}
if (PlotType == 3L) {
PlotSingleSeries(series2plot, iT, vDates)
}
if (!is.null(which)) {
PlotType = 0L
}
}
})
setMethod("plot", signature(x = "mGASSim", y = "missing"), function(x, which = NULL) {
iK = x@ModelInfo$iK
iT = x@ModelInfo$iT
iN = x@ModelInfo$iN
mY = t(x@Data$mY)
vDates = 1:iT
vSeriesName = colnames(mY)
PlotType = 1L
while (PlotType > 0L) {
if (is.null(which)) {
cat(paste("Print 1-3 or 0 to exit"))
PlotType = menu(PlotMenu(x))
} else {
PlotType = which
}
if (PlotType == 1L) {
series2plot = getFilteredParameters(x)[1:iT, , drop = FALSE]
}
if (PlotType == 2L) {
lMoments = getMoments(x)
mMean = lMoments[["mean"]][1:iT, , drop = FALSE]
dimnames(mMean) = list(vDates, paste(vSeriesName, "mean", sep = "."))
aCov = lMoments[["cov"]][,, 1:iT, drop = FALSE]
dimnames(aCov) = list(vSeriesName, vSeriesName, vDates)
mCov = Array2Matrix(aCov, type = 2L)
}
if (PlotType == 3L) {
series2plot = mY
}
if (PlotType == 1L) {
PlotMultipleSeries(series2plot, iK, iT, vDates)
}
if (PlotType == 2L) {
PlotMultipleSeries(mMean, iN, iT, vDates)
foo = readline("Print enter to plot covariances\n:")
PlotMultipleSeries(mCov, ncol(mCov), iT, vDates)
}
if (PlotType == 3L) {
PlotMultipleSeries(series2plot, iN, iT, vDates)
}
if (!is.null(which)) {
PlotType = 0L
}
}
})
setMethod("plot", signature(x = "uGASFor", y = "missing"), function(x, which = NULL) {
iK = x@Info$iK
vY = x@Data$vY
iH = x@Info$iH
iT = length(vY)
Roll = x@Info$Roll
vOut = x@Data$vOut
FilteredParameters = x@Data$FilteredParameters
FilteredParameters = FilteredParameters[-nrow(FilteredParameters), ] #remove one step ahead forecast
ParametersForecast = getForecast(x)
cBands = x@Bands
Dist = getDist(x)
vLS = LogScore(x)
if (is(vY, "xts")) {
vDates_is = as.Date(index(vY))
if (Roll) {
vDates_os = as.Date(index(vOut))
} else {
DiffTime = vDates_is[2L] - vDates_is[1L]
vDates_os = seq(tail(vDates_is, 1L) + DiffTime, tail(vDates_is, 1L) + DiffTime * iH, by = DiffTime)
}
ParametersForecast = xts(ParametersForecast, vDates_os)
FilteredParameters = xts(FilteredParameters, vDates_is)
} else {
vDates_is = 1:iT
vDates_os = (iT + 1L):(iT + iH)
}
PlotType = 1L
while (PlotType > 0L) {
if (!Roll) {
if (is.null(which)) {
cat(paste("Print 1-6 or 0 to exit"))
PlotType = menu(PlotMenu(x))
} else {
PlotType = which
}
if (PlotType == 1L) {
PlotMultipleSeries(ParametersForecast, iK, iH, vDates_os)
}
if (PlotType == 2L) {
PlotMultipleSeries_Bands(ParametersForecast, iK, iH, vDates_os, cBands)
}
if (PlotType == 3L) {
PlotMultipleSeries_wis(FilteredParameters, ParametersForecast, iK, iH, vDates_os, vDates_is)
}
if (PlotType == 4L) {
PlotMultipleSeries_Bands_wis(FilteredParameters, ParametersForecast, iK, iH, vDates_os,
vDates_is, cBands)
}
if (PlotType == 5L) {
Moments_is = getMoments(x)
PlotMultipleSeries(Moments_is, 4L, iH, vDates_os)
}
if (PlotType == 6L) {
Moments_os = getMoments(x)
Moments_is = EvalMoments_univ(t(FilteredParameters), Dist)
colnames(Moments_is) = paste("M", 1:4, sep = "")
PlotMultipleSeries_wis(Moments_is, Moments_os, 4L, iH, vDates_os, vDates_is)
}
} else {
if (is.null(which)) {
cat(paste("Print 1-4 or 0 to exit"))
PlotType = menu(PlotMenu(x))
} else {
PlotType = which
}
if (PlotType == 1L) {
PlotMultipleSeries(ParametersForecast, iK, iH, vDates_os)
}
if (PlotType == 2L) {
Moments_os = getMoments(x)
Mu = Moments_os[, 1L]
mRealVsForecast = cbind(Mu, vOut)
PlotForecastVsRealized_Univ(mRealVsForecast, vDates_os, x)
}
if (PlotType == 3L) {
Moments_os = getMoments(x)
PlotMultipleSeries(Moments_os, 4L, iH, vDates_os)
}
if (PlotType == 4L) {
PlotSingleSeries(vLS, iH, vDates_os)
}
}
if (!is.null(which)) {
PlotType = 0L
}
}
})
setMethod("plot", signature(x = "mGASFor", y = "missing"), function(x, which = NULL) {
iK = x@Info$iK
iN = x@Info$iN
mY = x@Data$mY
mOut = x@Data$mOut
iH = x@Info$iH
iT = ncol(mY)
Roll = x@Info$Roll
vLS = LogScore(x)
FilteredParameters = x@Data$FilteredParameters
FilteredParameters = FilteredParameters[-nrow(FilteredParameters), ] #remove one step ahead forecast
ParametersForecast = getForecast(x)
cBands = x@Bands
if (is(mY, "xts")) {
vDates_is = as.Date(index(mY))
if (Roll) {
vDates_os = as.Date(index(mOut))
} else {
DiffTime = vDates_is[2L] - vDates_is[1L]
vDates_os = seq(tail(vDates_is, 1L) + DiffTime, tail(vDates_is, 1L) + DiffTime * iH, by = DiffTime)
}
ParametersForecast = xts(ParametersForecast, vDates_os)
FilteredParameters = xts(FilteredParameters, vDates_is)
} else {
vDates_is = 1:iT
vDates_os = (iT + 1L):(iT + iH)
}
PlotType = 1L
while (PlotType > 0L) {
if (!Roll) {
if (is.null(which)) {
cat(paste("Print 1-4 or 0 to exit"))
PlotType = menu(PlotMenu(x))
} else {
PlotType = which
}
if (PlotType == 1L) {
PlotMultipleSeries(ParametersForecast, iK, iH, vDates_os)
}
if (PlotType == 2L) {
PlotMultipleSeries_Bands(ParametersForecast, iK, iH, vDates_os, cBands)
}
if (PlotType == 3L) {
PlotMultipleSeries_wis(FilteredParameters, ParametersForecast, iK, iH, vDates_os, vDates_is)
}
if (PlotType == 4L) {
PlotMultipleSeries_Bands_wis(FilteredParameters, ParametersForecast, iK, iH, vDates_os,
vDates_is, cBands)
}
} else {
if (is.null(which)) {
cat(paste("Print 1-4 or 0 to exit"))
PlotType = menu(PlotMenu(x))
} else {
PlotType = which
}
if (PlotType == 1L) {
PlotMultipleSeries(ParametersForecast, iK, iT, vDates_os)
}
if (PlotType == 2L) {
Moments_os = getMoments(x)
mForcasted = Moments_os[["mean"]]
PlotForecastVsRealized_Multi(t(mOut), mForcasted, iN, vDates_os, x)
}
if (PlotType == 3L) {
Moments_os = getMoments(x)
mMean = Moments_os[["mean"]]
colnames(mMean) = colnames(t(mOut))
cCov = Moments_os[["cov"]]
PlotMultipleSeries(mMean, iN, iH, vDates_os)
foo = readline("Print enter to plot covariances\n:")
PlotCovariances(cCov, iN, iH, vDates_os, colnames(t(mOut)))
}
if (PlotType == 4L) {
PlotSingleSeries(vLS, iH, vDates_os)
}
}
if (!is.null(which)) {
PlotType = 0L
}
}
})
setMethod("plot", signature(x = "uGASRoll", y = "missing"), function(x, which = NULL) {
iK = x@Info$iK
vY = x@Data$vY
iH = x@Info$ForecastLength
vOut = tail(vY, iH)
iT = length(vY)
ParametersForecast = getForecast(x)
vU = pit(x)
if (is(vY, "xts")) {
vDates_os = tail(as.Date(index(vY)), iH)
ParametersForecast = xts(ParametersForecast, vDates_os)
} else {
vDates_os = 1:iH
}
PlotType = 1L
while (PlotType > 0L) {
if (is.null(which)) {
cat(paste("Print 1-4 or 0 to exit"))
PlotType = menu(PlotMenu(x))
} else {
PlotType = which
}
if (PlotType == 1L) {
PlotMultipleSeries(ParametersForecast, iK, iT, vDates_os)
}
if (PlotType == 2L) {
Moments_os = getMoments(x)
Mu = Moments_os[, 1L]
mRealVsForecast = cbind(Mu, vOut)
PlotForecastVsRealized_Univ(mRealVsForecast, vDates_os, x)
}
if (PlotType == 3L) {
Moments_os = getMoments(x)
PlotMultipleSeries(Moments_os, 4L, iH, vDates_os)
}
if (PlotType == 4L) {
PlotPit(vU, x@Testing$PitTest$Hist)
}
if (!is.null(which)) {
PlotType = 0L
}
}
})
setMethod("plot", signature(x = "mGASRoll", y = "missing"), function(x, which = NULL) {
iN = x@Info$iN
iK = x@Info$iK
mY = x@Data$mY
iH = x@Info$ForecastLength
mOut = t(tail(t(mY), iH))
iT = ncol(mY)
ParametersForecast = getForecast(x)
if (is(mY, "xts")) {
vDates_os = tail(as.Date(index(mY)), iH)
ParametersForecast = xts(ParametersForecast, vDates_os)
} else {
vDates_os = 1:iH
}
PlotType = 1L
while (PlotType > 0L) {
if (is.null(which)) {
cat(paste("Print 1-4 or 0 to exit"))
PlotType = menu(PlotMenu(x))
} else {
PlotType = which
}
if (PlotType == 1L) {
PlotMultipleSeries(ParametersForecast, iK, iT, vDates_os)
}
if (PlotType == 2L) {
Moments_os = getMoments(x)
mForcasted = Moments_os[["mean"]]
PlotForecastVsRealized_Multi(t(mOut), mForcasted, iN, vDates_os, x)
}
if (PlotType == 3L) {
Moments_os = getMoments(x)
mMean = Moments_os[["mean"]]
colnames(mMean) = colnames(t(mOut))
cCov = Moments_os[["cov"]]
PlotMultipleSeries(mMean, iN, iH, vDates_os)
foo = readline("Print enter to plot covariances\n:")
PlotCovariances(cCov, iN, iH, vDates_os, colnames(t(mOut)))
}
if (!is.null(which)) {
PlotType = 0L
}
}
})
getFilteredParameters = function(object) {
UseMethod("getFilteredParameters")
}
.getFilteredParameters <- function(object) {
if (is(object, "uGASFit") | is(object, "mGASFit"))
mTheta = object@GASDyn$mTheta
if (is(object, "uGASSim") | is(object, "mGASSim"))
mTheta = object@GASDyn$mTheta
mTheta = t(mTheta)
parNames = getParNames(object)
colnames(mTheta) = parNames
return(mTheta)
}
setMethod("getFilteredParameters", signature(object = "uGASFit"), .getFilteredParameters)
setMethod("getFilteredParameters", signature(object = "mGASFit"), .getFilteredParameters)
setMethod("getFilteredParameters", signature(object = "uGASSim"), .getFilteredParameters)
setMethod("getFilteredParameters", signature(object = "mGASSim"), .getFilteredParameters)
getObs = function(object) {
UseMethod("getObs")
}
.getObs <- function(object) {
if (is(object, "uGASFit"))
Data = object@Data$vY
if (is(object, "mGASFit"))
Data = object@Data$mY
if (is(object, "uGASSim"))
Data = object@Data$vY
if (is(object, "mGASSim"))
Data = object@Data$mY
if (is(object, "uGASFor"))
Data = object@Data$vY
if (is(object, "uGASRoll"))
Data = object@Data$vY
return(Data)
}
setMethod("getObs", signature(object = "uGASFit"), .getObs)
setMethod("getObs", signature(object = "mGASFit"), .getObs)
setMethod("getObs", signature(object = "uGASSim"), .getObs)
setMethod("getObs", signature(object = "mGASSim"), .getObs)
setMethod("getObs", signature(object = "uGASFor"), .getObs)
setMethod("getObs", signature(object = "uGASRoll"), .getObs)
getMoments = function(object) {
UseMethod("getMoments")
}
.getMoments <- function(object) {
if (is(object, "uGASFit")) {
Moments = object@Estimates$Moments
}
if (is(object, "mGASFit")) {
Moments = object@Estimates$Moments
}
if (is(object, "uGASSim")) {
Moments = object@Data$Moments
}
if (is(object, "mGASSim")) {
Moments = object@Data$Moments
}
if (is(object, "uGASFor")) {
Moments = object@Forecast$Moments
}
if (is(object, "mGASFor")) {
Moments = object@Forecast$Moments
}
if (is(object, "uGASRoll")) {
Moments = object@Forecast$Moments
}
if (is(object, "mGASRoll")) {
Moments = object@Forecast$Moments
}
Dist = getDist(object)
if (!is.null(Moments) & DistType(Dist) != "multivariate") {
colnames(Moments) = paste("M", 1:4, sep = "")
}
return(Moments)
}
setMethod("getMoments", signature(object = "uGASFit"), .getMoments)
setMethod("getMoments", signature(object = "mGASFit"), .getMoments)
setMethod("getMoments", signature(object = "uGASSim"), .getMoments)
setMethod("getMoments", signature(object = "mGASSim"), .getMoments)
setMethod("getMoments", signature(object = "uGASFor"), .getMoments)
setMethod("getMoments", signature(object = "mGASFor"), .getMoments)
setMethod("getMoments", signature(object = "uGASRoll"), .getMoments)
setMethod("getMoments", signature(object = "mGASRoll"), .getMoments)
.getCoef <- function(object, do.list = FALSE) {
if (is(object, "uGASFit") | is(object, "mGASFit")) {
if (do.list) {
ans = list(lCoef = object@Estimates$lParList, mCoef = object@Estimates$Inference)
} else {
ans = object@Estimates$Inference[, "Estimate"]
}
}
if (is(object, "uGASSim") | is(object, "mGASSim")) {
ans = list(vKappa = object@ModelInfo$vKappa, mA = object@ModelInfo$mA, mB = object@ModelInfo$mB)
}
if (is(object, "uGASRoll") | is(object, "mGASRoll")) {
ans = object@Forecast$Coef
if (!do.list) {
ans = as.matrix(do.call(rbind, lapply(ans, function(x) x$mCoef[, "Estimate"])))
}
rownames(ans) = paste("refit", 1:nrow(ans))
}
return(ans)
}
setMethod("coef", signature(object = "uGASFit"), .getCoef)
setMethod("coef", signature(object = "mGASFit"), .getCoef)
setMethod("coef", signature(object = "uGASSim"), .getCoef)
setMethod("coef", signature(object = "mGASSim"), .getCoef)
.getQuantile <- function(x, probs = c(0.01, 0.05)) {
if (is(x, "uGASFit"))
mTheta = getFilteredParameters(x)
if (is(x, "uGASSim"))
mTheta = getFilteredParameters(x)
if (is(x, "uGASRoll"))
mTheta = getForecast(x)
Dist = getDist(x)
mQuantile = Quantiles(t(mTheta), Dist, probs)
colnames(mQuantile) = paste("q.", probs, sep = "")
return(mQuantile)
}
.getQuantile_Sim <- function(x, probs = c(0.01, 0.05)) {
bRoll = x@Info$Roll
iH = x@Info$iH
if (bRoll) {
mTheta_tph = getForecast(x)
mQuantile = matrix(NA, iH, length(probs), dimnames = list(paste("T+", 1:iH, sep = ""),
paste("q.", probs, sep = "")))
for (h in 1:iH) {
mQuantile[h, ] = Quantiles(t(mTheta_tph[h, , drop = FALSE]), Dist = getDist(x), probs)
}
} else {
mDraws = x@Draws
if (is.null(mDraws) & iH > 1) {
stop("ReturnDraws = TRUE needs to be selected in the
UniGASFor function for multistep ahead quantile evaluation.")
}
mQuantile = matrix(NA, iH, length(probs), dimnames = list(paste("T+", 1:iH, sep = ""),
paste("q.", probs, sep = "")))
## one step ahead
vTheta_tp1 = getForecast(x)[1, ,drop = FALSE]
mQuantile[1, ] = Quantiles(t(vTheta_tp1), Dist = getDist(x), probs)
## multi step ahead
if (iH > 1) {
for (h in 2:iH) {
mQuantile[h, ] = quantile(mDraws[, h], probs)
}
}
}
return(mQuantile)
}
.getES <- function(object, probs = c(0.01, 0.05)) {
if (is(object, "uGASFit"))
mTheta = getFilteredParameters(object)
if (is(object, "uGASSim"))
mTheta = getFilteredParameters(object)
if (is(object, "uGASFor"))
mTheta = getForecast(object)
if (is(object, "uGASRoll"))
mTheta = getForecast(object)
Dist = getDist(object)
mQuantile = Quantiles(t(mTheta), Dist, probs)
colnames(mQuantile) = paste("q.", probs, sep = "")
mES = mQuantile
for (i in 1:nrow(mES)) {
for (j in 1:ncol(mES)) {
mES[i, j] = adaptIntegrate(Quantiles, lowerLimit = 1e-7, upperLimit = probs[j],
mTheta = t(mTheta[i, , drop = FALSE]), Dist = Dist)$integral
}
}
mES = t(t(mES) / probs)
return(mES)
}
.getES_Sim <- function(object, probs = c(0.01, 0.05)) {
iH = object@Info$iH
bRoll = object@Info$Roll
iH = object@Info$iH
Dist = getDist(object)
if (bRoll) {
mTheta_tph = getForecast(object)
mQuantile = quantile(object, probs)
mES = mQuantile
vTheta_tp1 = getForecast(object)[1, ,drop = FALSE]
for (h in 1:iH) {
for (j in 1:ncol(mES)) {
mES[h, j] = integrate(Quantiles, lower = 1e-7, upper = probs[j],
mTheta = t(mTheta_tph[h, , drop = FALSE]), Dist = Dist)$value/probs[j]
}
}
} else {
mDraws = object@Draws
if (is.null(mDraws) & iH > 1) {
stop("ReturnDraws = TRUE needs to be selected in the
UniGASFor function for multistep ahead quantile evaluation.")
}
mQuantile = quantile(object, probs)
mES = mQuantile
## one step ahead
vTheta_tp1 = getForecast(object)[1, ,drop = FALSE]
for (j in 1:ncol(mES)) {
mES[1, j] = integrate(Quantiles, lower = 1e-7, upper = probs[j],
mTheta = t(vTheta_tp1), Dist = Dist)$value/probs[j]
}
## multi step ahead
if (iH > 1) {
for (h in 2:iH) {
vDraws = mDraws[, h]
for (j in 1:ncol(mES)) {
mES[h, j] = mean(vDraws[vDraws < mQuantile[h, j]])
}
}
}
}
return(mES)
}
setMethod("quantile", signature(x = "uGASFit"), .getQuantile)
setMethod("quantile", signature(x = "uGASSim"), .getQuantile)
setMethod("quantile", signature(x = "uGASFor"), .getQuantile_Sim)
setMethod("quantile", signature(x = "uGASRoll"), .getQuantile)
ES = function(object, ...) {
UseMethod("ES")
}
setMethod("ES", signature(object = "uGASFit"), .getES)
setMethod("ES", signature(object = "uGASSim"), .getES)
setMethod("ES", signature(object = "uGASFor"), .getES_Sim)
setMethod("ES", signature(object = "uGASRoll"), .getES)
pit = function(object) {
UseMethod("pit")
}
setMethod("pit", signature(object = "uGASFit"), function(object) object@Estimates$vU)
setMethod("pit", signature(object = "uGASFor"), function(object) object@Forecast$vU)
setMethod("pit", signature(object = "uGASRoll"), function(object) object@Forecast$vU)
LogScore = function(object) {
UseMethod("LogScore")
}
setMethod("LogScore", signature(object = "uGASFor"), function(object) object@Forecast$vLS)
setMethod("LogScore", signature(object = "mGASFor"), function(object) object@Forecast$vLS)
setMethod("LogScore", signature(object = "uGASRoll"), function(object) object@Forecast$vLS)
setMethod("LogScore", signature(object = "mGASRoll"), function(object) object@Forecast$vLS)
getForecast = function(object) {
UseMethod("getForecast")
}
setMethod("getForecast", signature(object = "uGASFor"), function(object) return(object@Forecast$PointForecast))
setMethod("getForecast", signature(object = "mGASFor"), function(object) return(object@Forecast$PointForecast))
setMethod("getForecast", signature(object = "uGASRoll"), function(object) return(object@Forecast$PointForecast))
setMethod("getForecast", signature(object = "mGASRoll"), function(object) return(object@Forecast$PointForecast))
getPwNames = function(object) {
UseMethod("getPwNames")
}
setMethod("getPwNames", signature(object = "uGASSpec"), function(object) return(object@Spec$PwNames))
setMethod("getPwNames", signature(object = "mGASSpec"), function(object) return(object@Spec$PwNames))
residuals = function(object, ...) {
UseMethod("residuals")
}
setMethod("residuals", signature(object = "uGASFit"), function(object, standardize = TRUE) {
vY = object@Data$vY
iT = length(vY)
mMoments = getMoments(object)[1:iT, ]
vRes = vY - mMoments[, 1L]
if (standardize) {
vRes = vRes/sqrt(mMoments[, 2L])
}
return(vRes)
})
setMethod("residuals", signature(object = "mGASFit"), function(object, standardize = TRUE) {
mY = object@Data$mY
iT = ncol(mY)
lMoments = getMoments(object)
mMean = lMoments$mean[1:iT, ]
mRes = t(mY) - mMean
if (standardize) {
aCov = lMoments$cov
for (i in 1:iT) {
mRes[i, ] = solve(chol(aCov[,, i])) %*% t(mRes[i, ,drop = FALSE])
}
}
return(mRes)
})
setMethod("residuals", signature(object = "uGASRoll"), function(object, standardize = TRUE) {
iH = object@Info$ForecastLength
vY = tail(object@Data$vY, iH)
mMoments = getMoments(object)
vRes = vY - mMoments[, 1L]
if (standardize) {
vRes = vRes/sqrt(mMoments[, 2L])
}
return(vRes)
})
setMethod("residuals", signature(object = "mGASRoll"), function(object, standardize = TRUE) {
iH = object@Info$ForecastLength
mY = tail(t(object@Data$mY), iH)
lMoments = getMoments(object)
mMean = lMoments$mean
mRes = mY - mMean
if (standardize) {
aCov = lMoments$cov
for (i in 1:iH) {
mRes[i, ] = solve(chol(aCov[,, i])) %*% t(mRes[i, ,drop = FALSE])
}
}
return(mRes)
})
convergence = function(object) {
UseMethod("convergence")
}
setMethod("convergence", signature(object = "uGASFit"), function(object) return(object@ModelInfo$convergence))
setMethod("convergence", signature(object = "mGASFit"), function(object) return(object@ModelInfo$convergence))
|
c3fa7ffac715552409518febba7fc8c4a85c5c1b
|
bc3a58c0f3abd24f4f64f641152c09b79efefe38
|
/man/PCASNPSDS.Rd
|
9ca5556f4851854e47700297f0d01fdc5eb0c59c
|
[
"MIT"
] |
permissive
|
isglobal-brge/dsOmics
|
96aa2594cbe009f2899d99fdc5be43a96f50d6bf
|
78fee19320cdf360db7ec1aed2fb07ee4c533951
|
refs/heads/master
| 2023-04-07T09:23:17.202083
| 2023-03-15T09:31:40
| 2023-03-15T09:31:40
| 158,839,360
| 1
| 12
|
MIT
| 2021-02-02T10:21:06
| 2018-11-23T13:55:17
|
R
|
UTF-8
|
R
| false
| true
| 644
|
rd
|
PCASNPSDS.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PCASNPSDS.R
\name{PCASNPSDS}
\alias{PCASNPSDS}
\title{Principal Component Analysis (PCA) on SNP genotype data}
\usage{
PCASNPSDS(gds, prune, ld.threshold)
}
\arguments{
\item{gds}{\code{GDS} object}
\item{prune}{\code{bool} \code{TRUE} to prune the GDS file using \code{SNPRelate::snpgdsLDpruning}}
\item{ld.threshold}{Threshold for the pruning (see \code{\link{snpgdsLDpruning}})}
}
\value{
\code{data frame} with the IDs and principal component points to be plotted.
}
\description{
Principal Component Analysis (PCA) on SNP genotype data
}
|
b2185e0a589a5b9f8d25d68074448ade913cc725
|
2145787ee6f08e741dc6e6d78364781d394f6c12
|
/hla3.R
|
f27a95b3fb5292e5c6102e3a8e8024fcfcc79af2
|
[] |
no_license
|
RonSchuyler/HLAEpitopes_R
|
5d61ea44cde9bd35d63651b9346df69a91a0c999
|
f4e7e187e90524c08365e409865c2b86cc0ebfa6
|
refs/heads/master
| 2020-06-07T08:32:04.745504
| 2019-07-24T18:00:30
| 2019-07-24T18:00:30
| 192,974,678
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 38,430
|
r
|
hla3.R
|
#
# 10/1/07
# Functions to read data files and create hashes.
#
# Read file mhc.csv
# Return a hash of alleleName:count
get_allele_counts_slow <- function(loci="HLA.DRB1", dx="Control", file_name="../data/mhc.csv"){
# dx = diagnosis: "Affected" | "Control"
# "Control" = "Matched Control" + "Random Control"
mhc_table <- read.table(file_name, header=TRUE, sep=',', as.is=TRUE, strip.white=TRUE);
# Indices of rows with this Dx.
rows <- grep(dx, mhc_table$Dx);
countHash <- list();
for(row_index in rows){
for(loci_index in 1:2){
locus_name <- paste(loci, ".", loci_index, sep=''); # HLA.DRB1.1
loci_col <- which(names(mhc_table) == locus_name);
allele <- as.character(mhc_table[row_index,loci_col]); # DRB1*0301
#print(sprintf("allele:%s: row:%i,col:%i",
# allele, row_index, loci_index));
if(allele %in% names(countHash)){
countHash[[allele]] <- countHash[[allele]] + 1;
}
else{
countHash[[allele]] <- 1;
}
}
}
return(countHash);
}
# Get allele counts for RA dataset.
# See print_allele_counts_RA
get_allele_counts <- function(loci="HLA.DRB1", dx="Control", file_name="../data/mhc.csv"){
mhc_table <- read.table(file_name, header=TRUE, sep=',', as.is=TRUE, strip.white=TRUE);
rows <- grep(dx, mhc_table$Dx);
countHash <- list();
locus1 <- paste(loci, ".", 1, sep='');
locus1_col <- which(names(mhc_table) == locus1);
for(col in (locus1_col:(locus1_col+1))){
#print(sprintf("column %i",col));
for(row_index in rows){
allele <- as.character(mhc_table[row_index, col]);
if(nchar(allele, type="chars") < 9){
# not a complete allele name, maybe DRB1*02
# short names are not specific enough to get sequence data
next;
}
#print(sprintf("%i,%i: %s",row_index, col, allele));
if(allele %in% names(countHash)){
countHash[[allele]] <- countHash[[allele]] + 1;
}
else{
countHash[[allele]] <- 1;
}
}
}
return(countHash);
}
# See print_allele_counts_copd
# control dx=0, affected dx=4
get_allele_counts_copd <- function(loci="DR", dx=0, file_name="../data/COPD_Cleaned_Up.csv"){
#print(sprintf("get_a_c_copd: loci=%s, dx:%i, fn:%s",loci,dx,file_name));
mhc_table <- read.table(file_name, header=TRUE, sep=',', as.is=TRUE, strip.white=TRUE);
rows <- grep(dx, mhc_table$GOLD);
countHash <- list();
if(loci=="DR"){
col1 <- 5;
}
else{
col1 <- 7; # DQ
}
for(col in col1:(col1+1)){
for(row_index in rows){
allele <- as.character(mhc_table[row_index, col]);
if(allele == '-'){
# dash means same as previous column
allele <- as.character(mhc_table[row_index, (col-1)]);
}
if(nchar(allele, type="chars") < 9){
# not a complete allele name, maybe DRB1*02
# short names are not specific enough to get sequence data
next;
}
if( allele %in% names(countHash)){
countHash[[allele]] <- countHash[[allele]] + 1;
}
else{
countHash[[allele]] <- 1;
}
}
}
return(countHash);
}
# Print contents of affected and control count lists.
print_allele_counts <- function(aff, con){
alleles <- sort(union(names(aff), names(con)));
for(allele in alleles){
if(allele %in% names(aff)){
ac <- aff[[allele]];
}
else{
ac <- 0;
}
if(allele %in% names(con)){
cc <- con[[allele]];
}
else{
cc <- 0;
}
pv <- pvalue(ac,cc);
print(sprintf("%s %i %i p=%g",allele,ac,cc,pv));
#print(sprintf("%s %i %i",allele,ac,cc));
}
}
# Return a 2-column matrix from two lists.
two_lists_as_mat <- function(list1, list2){
allNames <- sort(union(names(list1), names(list2)));
#mat <- matrix(0, nrow=length(allNames), ncol=2);
mat <- c();
currentRow <- 0;
for(nm in allNames){
currentRow <- currentRow+1;
c1 <- 0;
c2 <- 0;
if(nm %in% names(list1)){
#mat[currentRow,1] <- list1[[nm]];
c1 <- list1[[nm]];
}
if(nm %in% names(list2)){
c2 <- list2[[nm]];
#mat[currentRow,2] <- list2[[nm]];
}
vec <- matrix(c(c1, c2), nrow=1);
rownames(vec) <- nm;
#names(vec) <- nm;
#names(mat[currentRow,]) <- nm;
mat <- rbind(mat, vec);
}
return(mat);
}
# Same results as:
#tmp<-p_correct(get_module_counts(get_polys(get_seq_mat(dataset="RA")),dataset="RA"))
print_allele_counts_RA <- function(loci="DR"){
if(loci == "DR"){
print("Allele counts for RA DR:");
control <- get_allele_counts(loci="HLA.DRB1", dx="Control", file_name="../data/mhc.csv");
affected <- get_allele_counts(loci="HLA.DRB1", dx="Affected", file_name="../data/mhc.csv");
}
else{
print("Allele counts for RA DQ:");
control <- get_allele_counts(loci="HLA.DQB1", dx="Control", file_name="../data/mhc.csv");
affected <- get_allele_counts(loci="HLA.DQB1", dx="Affected", file_name="../data/mhc.csv");
}
#print_allele_counts(affected, control);
allele_counts <- two_lists_as_mat(affected, control);
p_correct(allele_counts);
return(allele_counts);
}
print_allele_counts_copd <- function(loci="DR"){
if(loci == "DR"){
print("Allele counts for copd DR:");
control <- get_allele_counts_copd(loci="DR", dx=0, file_name="../data/COPD_Cleaned_Up.csv");
affected <- get_allele_counts_copd(loci="DR", dx=4, file_name="../data/COPD_Cleaned_Up.csv");
}
else{
print("Allele counts for copd DQ:");
control <- get_allele_counts_copd(loci="DQ", dx=0, file_name="../data/COPD_Cleaned_Up.csv");
affected <- get_allele_counts_copd(loci="DQ", dx=4, file_name="../data/COPD_Cleaned_Up.csv");
}
#print_allele_counts(affected, control);
allele_counts <- two_lists_as_mat(affected, control);
p_correct(allele_counts);
return(allele_counts);
}
# Get a list (hash) of aligned 0-padded sequences keyed by alleleName,
# given countHashes from get_allele_counts. Actual counts are not used,
# the hash names are just used as a list of allele names to get.
# see get_hash_values_as_matrix()
# depricated. replaced with ParseAlignments.R:get_padded_seqs_from_alignments()
get_padded_seqs <- function(affectedCounts, controlCounts, file_name="../AlleleImport.txt"){
stop("get_padded_seqs() from AlleleImport.txt is no longer supported.");
#print(sprintf("get_padded_seqs: %s", file_name));
alleleNames <- union(names(affectedCounts), names(controlCounts));
fileHandle <- file(file_name, open="r");
header_line <- readLines(fileHandle,1);
unpadded <- list();
maxLen <- 0;
while(1){
line <- readLines(fileHandle,1);
if(length(line) == 0){
break;
}
element <- unlist(strsplit(line,",",fixed=TRUE));
element <- gsub("\"","",element,fixed=TRUE);
if(element[2] %in% alleleNames){
# if the allele name is one we are looking for, add it to the list.
offset <- as.numeric(element[4]);
seq <- unlist(strsplit(element[5],split=""));
if(offset >= 0){
seq <- c(rep(0,offset),seq);
}
else{
seq <- seq[(1 + offset*(-1)):length(seq)];
}
# list (hash) indexed by alleleName, seq is aligned at start,
# possibly with different lengths
unpadded[[element[2]]] <- seq;
if(length(seq) > maxLen){
maxLen <- length(seq);
}
}
}
padded <- list();
for(alleleName in names(unpadded)){
# pad it and add it
u_seq <- unpadded[[alleleName]];
padded[[alleleName]] <- c(u_seq, rep(0,(maxLen-length(u_seq))));
}
close(fileHandle);
return(padded);
}
# Similar to what as.matrix should do.
get_hash_values_as_matrix <- function(hash){
mat <- c();
for(key in sort(names(hash))){
#value <- matrix(unlist(strsplit(hash[[key]], split="")),nrow=1);
value <- hash[[key]];
value <- matrix(value,nrow=1);
rownames(value) <- key;
if(is.null(mat)){
mat <- matrix(value, nrow=1, ncol=length(value));
rownames(mat) <- key;
}
else{
mat <- rbind(mat, value);
}
}
return(mat);
}
# posV (position vector) set of positions to check
# controlCounts and affectedCounts are hashes of counts, keyed on allele name
# paddedSeqs is a hash of 0-padded sequences, keyed on allele name
# Example setup:
# posV <- pocket1B;
# controlCounts <- get_allele_counts_copd(loci="DR", dx=0, file_name="../data/COPD_Cleaned_Up.csv");
# affectedCounts <- get_allele_counts_copd(loci="DR", dx=4, file_name="../data/COPD_Cleaned_Up.csv");
# paddedSeqs <- get_padded_seqs(affectedCounts, controlCounts);
# OR
# controlCounts <- get_allele_counts(loci="HLA.DQB1", dx="Control", file_name="../data/mhc.csv");
# affectedCounts <- get_allele_counts(loci="HLA.DQB1", dx="Affected", file_name="../data/mhc.csv");
# paddedSeqs <- get_padded_seqs(affectedCounts, controlCounts);
get_position_counts <- function(posV, controlCounts, affectedCounts, paddedSeqs, doPrint=FALSE){
allNames <- union(names(controlCounts), names(affectedCounts));
affected <- list();
control <- list();
posV <- sort(posV);
for(allele in allNames){
if(allele %in% names(paddedSeqs)){
if(any(paddedSeqs[[allele]][posV] == 0)){
# 0 is the padding character, meaning this allele doesn't
# have complete sequence data, so we don't count it.
next;
}
# module is the string of amino acids at the posV set of positions
# for this allele. Multiple alleles may have the same module.
module <- paste(paddedSeqs[[allele]][posV], collapse='');
if(module %in% names(affected)){
if(allele %in% names(affectedCounts)){
affected[[module]] <- affected[[module]] + affectedCounts[[allele]];
}
}
else{
if(allele %in% names(affectedCounts)){
affected[[module]] <- affectedCounts[[allele]];
}
}
if(module %in% names(control)){
if(allele %in% names(controlCounts)){
control[[module]] <- control[[module]] + controlCounts[[allele]];
}
}
else{
if(allele %in% names(controlCounts)){
control[[module]] <- controlCounts[[allele]];
}
}
}
}
positionsString <- sprintf(paste(posV,collapse=','));
loop_count <- 0;
pvaluesVec <- c();
#allInfoList <- c(); # list of all info, same order as pvaluesVec
# m=module key for counts
for(m in sort(union(names(control), names(affected)))){
if(m %in% names(control)){
count_c <- control[[m]];
}
else{
count_c <- 0;
}
if(m %in% names(affected)){
count_a <- affected[[m]];
}
else{
count_a <- 0;
}
pv <- pvalue(count_a, count_c);
loop_count <- loop_count + 1;
#allInfoList[[loop_count]] <- sprintf("(%s): %s %i:%i p=%g",
#positionsString, m, count_a, count_c, pv);
if(doPrint){
print(sprintf("%s:%i:%i p=%g", m, count_a, count_c, pv));
}
pvaluesVec <- c(pvaluesVec, pv);
}
#return(list(affected, control, pvaluesVec, allInfoList));
return(list(affected, control, pvaluesVec));
}
# Pocket residue positions for MHC I.
# Note that some positions may belong to more than one pocket.
# (Up to 84 is domain 1, 97 and up is domain 2.)
# (From HistoCheck website.)
pocketA <- c(5, 7, 59, 63, 66, 70, 99, 159, 163, 167, 171);
pocketB <- c(7, 9, 24, 25, 34, 45, 63, 66, 67, 70, 99);
pocketC <- c(9, 70, 73, 74, 97);
pocketD <- c(99, 114, 156, 159, 160);
pocketE <- c(97, 114, 133, 147, 152, 156);
pocketF <- c(77, 80, 81, 84, 116, 123, 143, 146, 147);
pocketE <- c(97, 114, 133, 147, 152, 156);
# Pocket residue positions for MHC II beta.
# From Fu: "Pocket 4 of the HLA-DR(a,B1*0401) Molecule Is A Major Determinant
# of T Cell Recognition of Peptide"
pocket1B <- c(85,86,89,90);
pocket4B <- c(13,70,71,74,78);
pocket6B <- c(11,13);
pocket7B <- c(28,47,61,67,71);
pocket9B <- c(9,57);
# Pocket residue positions for MHC II beta (DRB1) w/i 5A of peptide.
# From Floudas: "A Predictive Method for the Evaluation of Peptide Binding
# in Pocket 1 of HLA-DRB1 via Global Minimization of Energy Interactions"
# More positions that contact peptide or TCR are listed on HistoCheck,
# but not grouped into pockets: 30, 32, 37, 38, 56, 64, 65, 66, 68, 69, 77, 81
pocket1B <- c(82, 85, 86, 89, 90); # +82
pocket4B <- c(13, 26, 70, 71, 74, 78); # +26
pocket6B <- c(11, 13, 71); # +71
pocket7B <- c(28, 47, 61, 67, 71);
pocket9B <- c(9, 57, 60, 61); # +60, 61
# Additional positions for DQ?
# DQ 9:37
# DQ 4:28
# From Baas:"Peptide binding motifs and specificities for HLA-DQ molecules"
# DQ 1:86,87
# DQ 2:77
# DQ 3:74
# DQ 4:13,26,28,71,74
# DQ 5:70,71,74
# DQ 6:9,30,70
# DQ 7:30,47,67,70,71
# DQ 9:9,30,37,38,57,59?
pocketResiduesClassI <- c(pocketA,pocketB,pocketC,pocketD,pocketE,pocketF);
pocketResiduesClassII <- c(pocket1B,pocket4B,pocket6B,pocket7B,pocket9B);
source("hla2.R");
test_subsets <- function(posV=pocketResiduesClassII,dataset="copd", loci="DR"){
# loci="HLA.DRB1"
# loci="HLA.DQB1"
posV <- sort(posV);
print("From positions (posV):");
print(posV);
if(dataset == "copd"){
cc <- get_allele_counts_copd(dx=0, loci=loci); # control
ac <- get_allele_counts_copd(dx=4, loci=loci); # affected
}
else{
if(loci == "DR"){
loci <- "HLA.DRB1";
}
else if(loci == "DQ"){
loci <- "HLA.DQB1";
}
cc <- get_allele_counts(loci=loci, dx="Control");
ac <- get_allele_counts(loci=loci, dx="Affected");
}
padded_seq_hash <- get_padded_seqs(ac, cc);
padded_seq_mat <- get_hash_values_as_matrix(padded_seq_hash);
# Find the polymorphic positions in this dataset.
my_polys <- get_polys(padded_seq_mat);
test_pos <- intersect(my_polys, posV);
print("intersection of posV and polymorphic positions in this dataset:");
print(test_pos);
return(test_pos);
}
# N of X
# Return all possible combinations of n elements of vector x.
# The returned list will have choose(length(x),n) vectors.
nofx <- function(n, x){
if(is.null(n) || is.na(n) || length(n)==0 || n==0){
n <- length(x);
}
if(n > length(x)){
warning(sprintf("nofx: n=%i > length(x)=%i", n, length(x)),
immediate.=TRUE);
n <- length(x);
}
x <- sort(x);
Qlist <- list();
len <- length(x);
posMat <- matrix(0,nrow=n,ncol=(len-n+1));
jj <- c();
for(i in 1:n){
jj[i] <- 1;
for(j in i:(len-n+i)){
#print(sprintf("i:%i, j:%i",i,j));
posMat[i,(j-i+1)] <- x[j];
#group[i] <- x[j];
#print(sprintf("%s",group));
}
}
#print("posMat:");
#print(posMat);
Q <- c(); # results
done <- FALSE;
while(!done){
save <- TRUE;
for(pos in 1:n){
if(pos > 1 && jj[pos] < jj[pos-1]){
save <- FALSE;
break;
}
Q[pos] <- posMat[pos, jj[pos]];
}
if(save){
#print(Q);
Qlist <- c(Qlist, list(Q));
}
for(ni in n:1){
jj[ni] <- jj[ni] + 1;
if(jj[ni] > (len-n+1)){
if(ni == 1){
done <- TRUE;
break;
}
jj[ni] <- jj[ni-1];
#jj[ni] <- jj[ni-1] + 1;
#print(sprintf("set jjni: ni:%i, jjni:%i",ni,jj[ni]));
}
else{
break;
}
}
}
return (Qlist);
}
# Test all combinations of n elements of the position vector posSet.
# Only polymorhic positions are used.
# posSet is optional. If it is supplied, the values actually used are the
# intersection of posSet and all polymorphic positions in this dataset.
# If posSet is not supplied, all polymorphic positions are used.
testSetsofN <- function(n, posSet=c(), dataset="copd", loci="DR", ac=c(), cc=c(), FDR=.1){
if(is.null(ac) || is.null(cc)){
if(dataset=="copd"){
cc <- get_allele_counts_copd(dx=0, loci=loci);
ac <- get_allele_counts_copd(dx=4, loci=loci);
}
else{
if(loci=="DR"){
loci <- "HLA.DRB1";
}
else if(loci=="DQ"){
loci <- "HLA.DQB1";
}
cc <- get_allele_counts(loci=loci, dx="Control");
ac <- get_allele_counts(loci=loci, dx="Affected");
}
}
padded_seq_hash <- get_padded_seqs(ac, cc);
padded_seq_mat <- get_hash_values_as_matrix(padded_seq_hash);
# Find the polymorphic positions in this dataset.
my_polys <- get_polys(padded_seq_mat);
if(!is.null(posSet)){
posSet <- intersect(my_polys, posSet);
}
else{
posSet <- my_polys;
}
#print("print allele counts:");
#print_allele_counts(ac,cc);
setList <- nofx(n, posSet);
#print("");
allPvalues <- c();
for(listI in 1:length(setList)){
#print(sprintf("combination %i, %s", listI, paste(setList[[listI]], collapse=' ')));
acl <- get_position_counts(posV=setList[[listI]], controlCounts=cc,
affectedCounts=ac, paddedSeqs=padded_seq_hash);
allPvalues <- c(allPvalues, acl[[3]]);
}
isSig <- fdr_dep(allPvalues, FDR); # returns 3 column matrix
sumSig <- sum(isSig[,3]==1);
if(sumSig > 0){
print(sprintf("n=%i: %i of %i significant p-values at fdr=%.3f", n, sumSig, nrow(isSig), FDR));
}
return(allPvalues);
}
test_pocket <- function(posV, loci="DR", dataset="copd", affectedCounts=c(),
controlCounts=c(), silent=TRUE){
if(is.null(controlCounts) || is.null(affectedCounts)){
if(dataset=="copd"){
cc <- get_allele_counts_copd(dx=0, loci=loci);
ac <- get_allele_counts_copd(dx=4, loci=loci);
}
else{
if(loci=="DR"){
loci <- "HLA.DRB1";
}
else if(loci == "DQ"){
loci <- "HLA.DQB1";
}
cc <- get_allele_counts(loci=loci, dx="Control");
ac <- get_allele_counts(loci=loci, dx="Affected");
}
}
padded_seq_hash <- get_padded_seqs(ac, cc);
padded_seq_mat <- get_hash_values_as_matrix(padded_seq_hash);
# Find the polymorphic positions in this dataset.
my_polys <- get_polys(padded_seq_mat);
posV <- intersect(posV, my_polys);
acl <- get_position_counts(posV, controlCounts=cc, affectedCounts=ac,
paddedSeqs=padded_seq_hash,doPrint=!silent);
pvalues <- acl[[3]];
fdr_i <- fdr_index(pvalues, .1);
if(!is.null(fdr_i)){
print("Significant");
for(i in 1:length(fdr_i)){
print(acl[[4]][fdr_i[i]]);
}
}
else{
print("not significant");
}
return(pvalues);
}
wrapper <- function(loci="DR", dataset="copd", posV=c(), FDR=.1, maxN=5){
allPV <- c();
if(dataset=="copd"){
cc <- get_allele_counts_copd(dx=0, loci=loci);
ac <- get_allele_counts_copd(dx=4, loci=loci);
}
else{
if(loci=="DR"){
loci <- "HLA.DRB1";
}
else if(loci == "DQ"){
loci <- "HLA.DQB1";
}
cc <- get_allele_counts(loci=loci, dx="Control");
ac <- get_allele_counts(loci=loci, dx="Affected");
}
padded_seq_hash <- get_padded_seqs(ac, cc);
padded_seq_mat <- get_hash_values_as_matrix(padded_seq_hash);
# Find the polymorphic positions in this dataset.
posSet <- get_polys(padded_seq_mat);
if(!is.null(posV)){
posSet <- intersect(posSet, posV);
}
if(is.null(maxN) || maxN > length(posSet)){
maxN <- length(posSet);
}
for(n in 1:maxN){
print(sprintf("n=%i",n));
allPV <- c(allPV, testSetsofN(n, posSet,loci=loci,dataset=dataset, ac=ac, cc=cc, FDR=FDR));
}
return(allPV);
}
pocketList <- c();
pocketList[[1]] <- pocket1B;
pocketList[[2]] <- pocket4B;
pocketList[[3]] <- pocket6B;
pocketList[[4]] <- pocket7B;
pocketList[[5]] <- pocket9B;
pocketNum <- c(1,4,6,7,9);
eachPocket <- function(loci="DR", dataset="copd", silent=TRUE){
allPV <- c();
for(i in 1:length(pocketList)){
print(sprintf("Pocket %i:", pocketNum[i]));
ps <- test_pocket(posV=pocketList[[i]],loci=loci,dataset=dataset,
silent=silent);
allPV <- c(allPV, ps);
}
fdr(allPV, .15);
return(allPV);
}
testp <- function(){
print("copd DQ");
copdq <- eachPocket(loci="DQ", dataset="copd", silent=FALSE);
print("copd DR");
copdr <- eachPocket(loci="DR", dataset="copd", silent=FALSE);
print("RA DQ");
radq <- eachPocket(loci="DQ", dataset="RA");
print("RA DR");
radr <- eachPocket(loci="DR", dataset="RA");
}
# Get the matrix of sequences for the given loci and dataset.
# Rows are sequences, columns are aligned, sequences are 0-padded.
# see also get_padded_seqs
get_seq_mat <- function(loci="DR", dataset="copd"){
print(sprintf("%s %s", dataset, loci));
if(dataset == "copd"){
cc <- get_allele_counts_copd(dx=0, loci=loci); # control
ac <- get_allele_counts_copd(dx=4, loci=loci); # affected
}
else{
if(loci == "DR"){
loci <- "HLA.DRB1";
}
else if(loci == "DQ"){
loci <- "HLA.DQB1";
}
cc <- get_allele_counts(loci=loci, dx="Control");
ac <- get_allele_counts(loci=loci, dx="Affected");
}
mat <- get_hash_values_as_matrix(get_padded_seqs(ac, cc));
return(mat);
}
# Calculate p-values and test for significance with fdr.
# counts is a 2-column matrix
# counts may have more than two columns, but expects first 2 to be Affected and Control counts.
# See two_lists_as_mat() to convert 2 count lists into a 2-column matrix.
# counts <- get_module_counts(...);
# Does not actually correct p-values for multiple comparisons.
p_correct <- function(counts, printAccepted=TRUE, orderByDiff=TRUE, FDR=.05,
significantFigures=4,
n_affected=50, n_control=50){
p_values <- rep(0, nrow(counts));
Accepted <- rep(0, nrow(counts));
for(i in 1:length(p_values)){
p_values[i] <- signif(pvalue_ue2(counts[i,1], counts[i,2],
n_affected=n_affected, n_control=n_control),
significantFigures);
}
fdri <- fdr_dep_index(p_values, r=FDR);
Accepted[fdri] <- 1;
mat <- cbind(counts, "p-value"=p_values, Accepted);
colnames(mat)[1] <- "Affected";
colnames(mat)[2] <- "Control";
if(orderByDiff && nrow(mat) > 1){
sort_order <- sort((counts[,1]-counts[,2]), decreasing=TRUE, index.return=TRUE);
mat <- mat[sort_order$ix,];
}
if(printAccepted){
w <- which(mat[,4] == 1);
print(mat[w,]);
}
return(mat);
}
# Risler amino acid distance matrix.
aadm <- matrix(c(
rep(0,1),92,50,12,40,39,71,13,21,22,29,23,61,10,17,4,7,6,78,49,
rep(0,2),98,93,95,98,99,96,94,92,95,94,99,90,93,88,91,90,100,83,
rep(0,3),30,63,65,88,56,53,60,67,36,86,41,57,38,55,54,89,66,
rep(0,4),40,47,71,18,21,32,41,19,58,2,8,9,14,15,81,50,
rep(0,5),65,83,30,53,31,59,46,83,37,46,42,48,34,77,4,
rep(0,6),86,56,58,59,64,51,85,51,52,37,49,52,87,61,
rep(0,7),75,80,78,84,63,96,68,64,66,77,73,97,74,
rep(0,8),29,3,32,33,70,20,20,14,16,1,73,45,
rep(0,9),38,44,31,72,13,3,19,26,25,82,43,
rep(0,10),9,36,76,27,24,23,26,5,76,43,
rep(0,11),54,84,25,28,39,35,24,87,60,
rep(0,12),79,16,24,7,28,27,82,57,
rep(0,13),69,62,62,68,69,96,85,
rep(0,14),5,10,12,17,80,42,
rep(0,15),6,8,18,75,35,
rep(0,16),2,11,74,45,
rep(0,17),15,80,47,
rep(0,18),72,48,
rep(0,19),70,
rep(0,20)),
nrow=20, ncol=20, byrow=TRUE);
# Calculate the distance between the two strings using the distance matrix dm.
distance <- function(string1, string2, dm=aadm){
AAstring <- "ACDEFGHIKLMNPQRSTVWY";
AAvec <- unlist(strsplit(AAstring,split=''));
s1 <- unlist(strsplit(string1, split=''));
s2 <- unlist(strsplit(string2, split=''));
len <- min(length(s1), length(s2));
total_dist = 0;
for(pos in 1:len){
posi <- c(which(s1[pos]==AAvec), which(s2[pos]==AAvec));
r_i <- min(posi)
c_i <- max(posi);
#print(sprintf("%s,%s : %i", s1[pos], s2[pos], dm[r_i,c_i]));
total_dist <- total_dist + dm[r_i, c_i];
}
return(total_dist);
}
get_module_counts <- function(posV=pocket4B, dataset="copd", loci="DR"){
# loci="HLA.DRB1"
# loci="HLA.DQB1"
posV <- sort(posV);
print("From positions (posV):");
print(posV);
if(dataset == "copd"){
cc <- get_allele_counts_copd(dx=0, loci=loci); # control
ac <- get_allele_counts_copd(dx=4, loci=loci); # affected
}
else{
if(loci == "DR"){
loci <- "HLA.DRB1";
}
else if(loci == "DQ"){
loci <- "HLA.DQB1";
}
cc <- get_allele_counts(loci=loci, dx="Control");
ac <- get_allele_counts(loci=loci, dx="Affected");
}
padded_seq_hash <- get_padded_seqs(ac, cc);
#padded_seq_mat <- get_hash_values_as_matrix(padded_seq_hash);
pc <- get_position_counts(posV, cc, ac, padded_seq_hash, doPrint=FALSE);
# pc is return(list(affected(hash), control(hash), pvaluesVec, allInfoList));
module_counts <- two_lists_as_mat(pc[[1]], pc[[2]]);
return(module_counts);
}
# Return a square, symmetric distance matrix.
pair_dists <- function(posV=pocket4B, dataset="copd", loci="DR"){
module_counts <- get_module_counts(posV, dataset, loci);
#for(row_1 in 1:(nrow(module_counts)-1)){
#for(row_2 in (1+row_1):nrow(module_counts)){
# Make a square matrix of zeros.
my_dm <- matrix(0, nrow=nrow(module_counts), ncol=nrow(module_counts));
for(row_1 in 1:nrow(module_counts)){
for(row_2 in 1:nrow(module_counts)){
#print(sprintf("row1:%i, row2:%i", row_1, row_2));
my_dist <- distance(rownames(module_counts)[row_1],
rownames(module_counts)[row_2]);
my_dm[row_1, row_2] <- my_dist;
print(sprintf("%s to %s: %i", rownames(module_counts)[row_1],
rownames(module_counts)[row_2], my_dist));
}
}
rownames(my_dm) <- rownames(module_counts);
colnames(my_dm) <- rownames(module_counts);
return(my_dm);
}
# Return the distance between the two clusters.
# l1 and l2 are lists, possibly singletons.
# Distance is average distance between all cluster members.
clust_dist <- function(l1, l2){
total_dist <- 0;
count <- 0;
if(length(l1) == 0 || length(l2)==0){
return(0);
}
for(i_1 in 1:length(l1)){
for(i_2 in 1:length(l2)){
count <- count + 1;
total_dist <- total_dist + distance(l1[[i_1]], l2[[i_2]]);
#print(sprintf("count:%i td:%.3f", count, total_dist));
}
}
#print(sprintf("final count:%i td:%.3f", count, total_dist));
return(total_dist / count);
}
# Return a vector of values from the list l1.
# Similar to names() function.
values <- function(l1){
vec <- c();
for(pos in 1:length(l1)){
#print(l1[[pos]]);
vec <- c(vec, paste(l1[[pos]], collapse=' '));
}
return(vec);
}
# cluster row names of counts
# counts is a 2-d matrix of module counts (from get_module_counts())
# thresh is the minimum distance to merge clusters.
# limit is the maximum number of mergers to perform.
cluster <- function(counts, thresh=15, limit=Inf, quiet=TRUE){
# Make a list of rownames.
# Each cluster is intially a singleton.
clusters <- c();
for(rn in rownames(counts)){
clusters <- c(clusters, list(rn));
}
minDist <- Inf;
#while(minDist < thresh){
loop_count <- 0;
while(TRUE){
loop_count <- loop_count+1;
# Find the two nearest clusters.
for(ind_1 in 1:(length(clusters)-1)){
for(ind_2 in (ind_1+1):length(clusters)){
#print(sprintf("%i %i : len:%i",ind_1, ind_2, length(clusters)));
d <- clust_dist(clusters[[ind_1]], clusters[[ind_2]]);
if(!quiet){
print(sprintf("%i %s, %i %s : %.3f", ind_1,
paste(clusters[[ind_1]], collapse=' '),
ind_2, paste(clusters[[ind_2]], collapse=' '), d));
}
if(d < minDist){
minDist <- d;
minDisti <- c(ind_1, ind_2);
}
}
}
#print(sprintf("minDist: %.3f", minDist));
if(minDist < thresh){
# Merge the two nearest clusters.
counts <- merge_counts(counts, minDisti);
clusters <- merge_clust(clusters, minDisti);
minDist <- Inf;
}
else{
break;
}
# Stop if the limit is reached, or if this is the only cluster.
if(loop_count >= limit || length(clusters)==1){
break;
}
}
rownames(counts) <- values(clusters);
return(counts);
}
# Merge counts.
# counts is a 2-d matrix.
# Returned matrix will have 1 less row than mat.
merge_counts <- function(mat, indices){
indices <- sort(indices);
#print(sprintf("merge_counts: %i, %i", indices[1], indices[2]));
#print(mat[indices[1],]);
#print(mat[indices[2],]);
newMat <- matrix(0, nrow=(nrow(mat)-1), ncol=ncol(mat));
rowCount <- 0;
for(matRow in 1:nrow(mat)){
#print(sprintf("matRow:%i",matRow));
if(matRow != indices[2]){
rowCount <- rowCount + 1;
newMat[rowCount,] <- mat[matRow,];
}
else{
newMat[indices[1],] <- newMat[indices[1],] + mat[matRow,];
}
#print(sprintf("to row:%i",rowCount));
#print(newMat);
}
return(newMat);
}
# Merge the cluster elements 1 and 2.
# clusters is a list, elements is a vector of 2 indices to merge.
merge_clust <- function(clusters, elements){
elements <- sort(elements);
#print(sprintf("merge_clust %i, %i", elements[1], elements[2]));
newClusts <- c();
for(clusti in 1:length(clusters)){
if(clusti == elements[1]){
newClusts <- c(newClusts, list(c(clusters[[elements[1]]], clusters[[elements[2]]])));
}
else if(clusti != elements[2]){
newClusts <- c(newClusts, list(clusters[[clusti]]));
}
}
return(newClusts);
}
# Return allele names containing the given module at the given set of positions.
# Either a sequnce matrix or a loci+dataset must be provided.
which_has_module <- function(module, posV, seq_mat=c(), loci=c(), dataset=c()){
# Make sure we have a sequence matrix.
if(is.null(seq_mat)){
seq_mat <- get_seq_mat(loci=loci, dataset=dataset);
}
# Break the module string in a character vector.
mod_vec <- unlist(strsplit(module,split=c(),fixed=TRUE));
# Break the position vector into numbers if it is characters.
if(is.character(posV)){
posV <- as.numeric(unlist(strsplit(posV, split=',', fixed=TRUE)));
}
#alleles <- c();
alleles <- "";
# Inspect each allele at the given position vector.
for(rowi in 1:nrow(seq_mat)){
#print(sprintf("look at row %i: %s",rowi,seq_mat[rowi,posV]));
if(length(posV) > 1){
if(identical(seq_mat[rowi,posV], mod_vec)){
#print(sprintf("allele: %s at row: %i", rownames(seq_mat)[rowi], rowi));
#print(rownames(seq_mat)[rowi]);
alleles <- paste(alleles, rownames(seq_mat)[rowi], sep=" ",
collapse='');
}
}
else{
if(seq_mat[rowi,posV] == mod_vec){
alleles <- paste(alleles, rownames(seq_mat)[rowi], sep=" ",
collapse='');
}
}
}
return(alleles);
}
# Print out all modules at the given position vector posV,
# with the allele(s) containing that module at that positon set.
# The module counts (mc) matrix must be named; see get_module_counts()
# If the module counts matrix is not provided it will be found using
# loci+dataset combination.
which_alleles <- function(posV, mc=c(), seq_mat=c(), loci=c(), dataset=c()){
if(is.null(mc)){
mc <- get_module_counts(posV=posV, dataset=dataset, loci=loci);
}
for(module in rownames(mc)){
# Get the allele names as a string.
str <- which_has_module(module=module, posV=posV, seq_mat=seq_mat, loci=loci, dataset=dataset);
print(sprintf("%s: %s", module, str));
}
}
# Add 2 lists.
# Lists are hashes keyed on names. Values are numbers to be added.
# Return hash.
add2Lists <- function(l1, l2){
allNames <- union(names(l1), names(l2));
merged <- list();
for(thisName in allNames){
if(thisName %in% names(l1)){
l1value <- l1[[thisName]];
}
else{
l1value <- 0;
}
if(thisName %in% names(l2)){
l2value <- l2[[thisName]];
}
else{
l2value <- 0;
}
merged[[thisName]] <- l1value + l2value;
}
return(merged);
}
alleleCountsUnequal <- function(alleleStr, alleleCounts, expected_aff, expected_con){
alleleNames <- unlist(strsplit(alleleStr, " +",)); # split on whitespace
allRows <- c();
for(thisName in alleleNames){
aff <- alleleCounts[thisName, 1];
con <- alleleCounts[thisName, 2];
p <- signif(pvalue(aff, con), 4);
p_uneq <- signif(pvalue_unequal(aff, con, expected_aff, expected_con), 4);
thisRow <- cbind(aff, con, p, p_uneq);
allRows <- rbind(allRows, thisRow);
#print(sprintf(%s\t%i\t%i\t%e\t%e), quote=FALSE);
}
rownames(allRows) <- alleleNames;
colnames(allRows) <- c("Affected", "Control", " p null ", sprintf(" p %i:%i", expected_aff, expected_con));
allRows;
}
# fixClusterCounts
# The function cluster is counting individuals multiple times.
# Use the orignial function to do the clustering, then use the group names
# of the returned clusters to do a real count of the patients in each group.
# countMatrix is the matrix of counts returned by the cluster function.
# Row names of countMatrix contain the clustered modules.
# pm is the dataMat of a patientMatrix.
# Look through pm and count the number of patients
# that have any of the modules for each cluster.
# posV is the vector of positions.
# seqHash is the hash of sequences for all alleles, keyed on allele name.
fixClusterCounts <- function(countMatrix, apm, cpm, posV, seqHash){
# Build a list of clusters where each cluster is represented by a vector
# of module names from the white-space-split rownames of countMatrix.
clusterNames <- rownames(countMatrix);
clustersList <- list();
for(groupNumber in 1:length(clusterNames)){
clustersList[[groupNumber]] <-
# split on whitespace
unlist(strsplit(clusterNames[groupNumber], " +",));
}
affectedCounts <- rep(0, length(clustersList));
controlCounts <- rep(0, length(clustersList));
# Look at each row of the each patientMatrix and count patients with a
# module in each cluster set.
alleleNames <- names(seqHash);
for(row_i in 1:(max(nrow(apm),nrow(cpm)))){
# Get the modules for this affected patient.
module1a <- c(); # Module 1 affected.
module2a <- c(); # Module 2 affected.
# Make sure we don't run off the end of the affected matrix.
if(row_i <= nrow(apm)){
# Module 1 affected.
if( (!is.null(apm[row_i, 1])) && (apm[row_i, 1] %in% alleleNames) ){
allele1a <- apm[row_i, 1];
module1a <-paste(seqHash[[allele1a]][posV], collapse='');
}
# Module 2 affected.
if( (!is.null(apm[row_i, 2])) && (apm[row_i, 2] %in% alleleNames) ){
allele2a <- apm[row_i, 2];
module2a <-paste(seqHash[[allele2a]][posV], collapse='');
}
}
# Get the modules for this control patient.
module1c <- c(); # Module 1 control.
module2c <- c(); # Module 2 control.
# Make sure we don't run off the end of the control matrix.
if(row_i <= nrow(cpm)){
if( (!is.null(cpm[row_i, 1])) && (cpm[row_i, 1] %in% alleleNames) ){
allele1c <- cpm[row_i, 1];
module1c <-paste(seqHash[[allele1c]][posV], collapse='');
}
if( (!is.null(cpm[row_i, 2])) && (cpm[row_i, 2] %in% alleleNames) ){
allele2c <- cpm[row_i, 2];
module2c <-paste(seqHash[[allele2c]][posV], collapse='');
}
}
# Check each cluster to see if this patient should be counted here.
# Patients may be counted in more than one cluster.
for(groupNumber in 1:length(clustersList)){
# If either module of this patient is in this cluster group,
# add one to this cluster's patient count.
if((!is.null(module1a) &&
(module1a %in% clustersList[[groupNumber]])) ||
(!is.null(module2a) &&
(module2a %in% clustersList[[groupNumber]])) ){
affectedCounts[groupNumber] <- affectedCounts[groupNumber] + 1;
}
# Do the same thing for the control patientMatrix.
if((!is.null(module1c) &&
(module1c %in% clustersList[[groupNumber]])) ||
(!is.null(module2c) &&
(module2c %in% clustersList[[groupNumber]])) ){
controlCounts[groupNumber] <- controlCounts[groupNumber] + 1;
}
}
} # for row_i in 1:max(nrow(apm),nrow(cpm))
# We have counts. Now format like the return from the cluster function.
counts <- cbind(affectedCounts, controlCounts);
rownames(counts) <- clusterNames;
return(counts);
}
# Logging.
# Open the file name for appending and add the supplied message.
# If firsttime=TRUE, overwrite existing file, otherwise append.
logToFile <- function(logfilename, logmessage, timestamp=TRUE, firsttime=FALSE, echo=FALSE){
# Should we add a timestamp?
if(timestamp){
logmessage <- sprintf("%s %s", Sys.time(), logmessage);
}
if(firsttime==TRUE){
# Over write existing.
logfile <- file(logfilename, open="wt");
}else{
# Open the file for appending, in text mode.
logfile <- file(logfilename, open="at");
}
writeLines(logmessage, con=logfile);
if(echo){
print(logmessage, quote=FALSE);
}
close(logfile);
}
|
6bc6f7be5822de9380598efec56c832279bb1480
|
439933a3fb21a29240ab4b04aebaced0569248be
|
/Mixed model post processing/orig/non SS/Cumulative Chinook Distribution.R
|
345b63c45a7e90e9c3901055d8bae60ee4369041
|
[] |
no_license
|
nwfsc-cb/spring-chinook-distribution
|
e47b5e39f5ce2ab8f20413085bc13249ef3bec37
|
5bff26b6fe5102a16a9c3f2c13d659b7e831e03e
|
refs/heads/master
| 2023-08-08T03:35:36.302066
| 2023-08-01T16:35:04
| 2023-08-01T16:35:04
| 128,123,447
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54,550
|
r
|
Cumulative Chinook Distribution.R
|
#### # MAKING SALMON DISTRIBUTION SURFACES
library(ggplot2)
library(reshape2)
library(dplyr)
# READ IN POSTERIOR FILE FROM MODEL FIT OF INTEREST:
results.dir <- "/Users/ole.shelton/GitHub/Orca_Salmon/Output files/_Mixed Results"
code.dir <- "/Users/ole.shelton/GitHub/Orca_Salmon_Code/Mixed model post processing"
setwd(results.dir)
#load("Binomial+Positive_output-1978-90 Troll_Rec_Treaty_E200_6year_vuln_int_seasonal.RData")
## Simpson diversity function
inv.simp.div <- function(prop){
return(1 / sum(prop^2))
}
gini.simp.div <- function(prop){
return(1 - sum(prop^2))
}
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
# Calculate number of outmigrant smolt from various regions....
smolt.dat <- read.csv("/Users/ole.shelton/GitHub/Orca_Salmon_DATA/Total Smolt and Run Info/_smolt_migrants.csv")
smolt.mod <- smolt.dat[smolt.dat$type == "independent",]
smolt.mod$total.wild.hatch <- smolt.mod$total.releases.median * (1+smolt.mod$frac.wild.missing)
smolt.mod$finger <- smolt.mod$total.wild.hatch * (1 - smolt.mod$frac.yearling )
smolt.mod$yearling <- smolt.mod$total.wild.hatch * smolt.mod$frac.yearling
smolt.prod <- aggregate(smolt.mod[,c("finger","yearling")],
by=list(number=smolt.mod$location.number,region=smolt.mod$rel.region,
n.mon.fing = smolt.mod$n.month.finger ,n.mon.year = smolt.mod$n.month.yearling
),sum)
smolt.prod <- smolt.prod[order(smolt.prod$number),]
#############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
#############################################################################################
### PROJECT SPATIAL DISTRIBUTION FOR EACH ORIGIN
#### DATA FITS
# mod <- Output$stanMod
# samp <- Output$pars
# dat.bin <- Output$raw.dat.bin
# dat.pos <- Output$raw.dat.pos
# cum_M2 <- Output$cum_M2
# spawn_loc <- Output$spawn_loc
###### Calculate Initial Monthly mortality rate
rel_yr <- data.frame(rel_id = 1:length(unique(dat.bin$release_year)),rel_year = sort(unique(dat.bin$release_year)))
nom.all <- sort(unique(dat.bin$year.reg))
THESE <- match(nom.all,dat.bin$year.reg)
nom <- data.frame( dat.bin[THESE,c("year.reg.idx","ocean.reg","release_year","loc.spawn.idx")],nom = nom.all)
nom$ocean.reg[nom$ocean.reg == "NWVI" | nom$ocean.reg == "SWVI"] <- "VI"
nom$ocean.reg[nom$ocean.reg == "SOR" | nom$ocean.reg == "COR" |nom$ocean.reg == "NOR"] <- "OR"
nom <- nom[order(nom$loc.spawn.idx),]
dat.start <- matrix(-99,length(unique(spawn_loc$loc.spawn.idx)),nrow(rel_yr))
colnames(dat.start) <- rel_yr$rel_year
rownames(dat.start) <- unique(nom$ocean.reg)
dat.mort.mean <- dat.start
dat.mort.sd <- dat.start
for(i in 1:max(dat.bin$year.reg.idx)){
X <- which(rownames(dat.start)==nom$ocean.reg[i])
Y <- which(colnames(dat.start)==nom$release_year[i])
dat.mort.mean[X,Y] <- mean(samp$rel_year_all[,nom$year.reg.idx[i]])
dat.mort.sd[X,Y] <- sd(samp$rel_year_all[,nom$year.reg.idx[i]])
}
juv.mort <- NULL
for(i in 1:nrow(dat.mort.mean)){
temp <- dat.mort.mean[i,]
juv.mort <- rbind(juv.mort,c(rownames(dat.mort.mean)[i],mean(temp[temp>0])))
}
juv.mort <- data.frame(juv.mort)
colnames(juv.mort) <- c("region","juv.mort")
juv.mort$juv.mort <- as.numeric(as.character(unlist(juv.mort$juv.mort)))
juv.mort$loc.spawn.idx <- 1:nrow(juv.mort)
#############################################################################################
sim.region <- aggregate(dat.bin$ocean.reg,
by=list(region = dat.bin$ocean.reg,
origin.idx = dat.bin$origin.idx,
loc.spawn.idx = dat.bin$loc.spawn.idx
#season.idx = dat.bin$season.idx
),length)[,1:3]
sim.region <- merge(sim.region,juv.mort[,c("loc.spawn.idx","juv.mort")],all=T,by="loc.spawn.idx")
sim.region <- merge(sim.region,smolt.prod)
sim.region <- sim.region[order(sim.region$number),]
sim.region$juv.mort.shared <- mean(exp(samp$log_rel_year_mu))
N.mod.int <- max(dat.bin$time)
LOC <- sort(unique(dat.bin$location))
AGE_MONTH_IDX <- 1:N.mod.int
SEASON_NAME <- c(c("spring","summer","fall"),rep(c("winter","spring","summer","fall"),4))
SEASON_IDX <- c(c(1,2,3),rep(c(1,1,2,3),4))
sim.dat <- expand.grid(age.month=AGE_MONTH_IDX,loc=LOC)
sim.dat$season.idx <- SEASON_IDX
sim.dat$season.name <-SEASON_NAME
sim.dat$age.year <- 0
sim.dat$age.year[sim.dat$age.month<=3]<-1
sim.dat$age.year[sim.dat$age.month >3 & sim.dat$age.month <= 7 ] <- 2
sim.dat$age.year[sim.dat$age.month >7 & sim.dat$age.month <= 11 ] <- 3
sim.dat$age.year[sim.dat$age.month >11 & sim.dat$age.month <= 15 ]<- 4
sim.dat$age.year[sim.dat$age.month >15 & sim.dat$age.month <= 19 ]<- 5
OUT <- sim.dat
for(i in 1:nrow(sim.region)){
finger <- rep(0,nrow(sim.dat))
yearling <- rep(0,nrow(sim.dat))
if(sim.region$finger[i] > 0){
for(j in 1:nrow(sim.dat)){
finger[j] <-
log(sim.region$finger[i] * 1e6) -
cum_M2[sim.dat$age.month[j]] - # fixed mortality for adults.
sim.region$juv.mort[i] * sim.region$n.mon.fing[i] + # Initial period before fish are observed
# log_q_troll + # effort offset
# log_q_rec * effort_idx_rec[i] + # effort offset
# log_q_treaty * effort_idx_treaty[i] + # effort offset
# log_effort[i] +
log(origin_loc[sim.dat$season.idx[j],sim.region$origin.idx[i],sim.dat$loc[j]]) + # dispersion in the ocean term (sum to 1)
log(1 - sum_prob[sim.region$loc.spawn.idx[i],sim.dat$age.year[j]])
# log(vuln_mat[vuln_idx[i],age_vuln_idx[i]]) ;
}
}
if(sim.region$yearling[i] > 0){
for(j in 1:nrow(sim.dat)){
yearling[j] <-
log(sim.region$yearling[i] * 1e6) -
cum_M2[sim.dat$age.month[j]] - # fixed mortality for adults.
sim.region$juv.mort[i] * sim.region$n.mon.year[i] + # Initial period before fish are observed
# log_q_troll + # effort offset
# log_q_rec * effort_idx_rec[i] + # effort offset
# log_q_treaty * effort_idx_treaty[i] + # effort offset
# log_effort[i] +
log(origin_loc[sim.dat$season.idx[j],sim.region$origin.idx[i],sim.dat$loc[j]]) + # dispersion in the ocean term (sum to 1)
log(1 - sum_prob[sim.region$loc.spawn.idx[i],sim.dat$age.year[j]])
# log(vuln_mat[vuln_idx[i],age_vuln_idx[i]]) ;
}
}
OUT[,paste(sim.region$region[i])] <- exp(finger) + exp(yearling)
}
THESE <- match(sim.region$region,colnames(OUT))
OUT[,THESE] <- OUT[,THESE] / 1000
OUT$TOTAL <- rowSums(OUT[,THESE])
##################################################################################################################
##################################################################################################################
summer <- OUT[OUT$season.name =="summer",]
fall <- OUT[OUT$season.name =="fall",]
winter <- OUT[OUT$season.name =="winter",]
spring <- OUT[OUT$season.name =="spring",]
temp <- summer[summer$age.year == 5,]
plot(TOTAL~loc,data=temp,type="l")
temp <- fall[fall$age.year == 4,]
plot(TOTAL~loc,data=temp,type="l")
temp <- spring[spring$age.year == 4,]
plot(TOTAL~loc,data=temp,type="l")
OUT.LONG <- melt(OUT,id=c("loc","season.name","age.year","age.month","season.idx"))
OUT.TOT <- OUT.LONG[OUT.LONG$variable == "TOTAL",]
OUT.LONG <- OUT.LONG[OUT.LONG$variable != "TOTAL",]
#### MAKE CUMULATIVE DISTRIBUTION FOR ALL AREAS BASED ON MEAN SURVIVAL FOR EACH REGION, MEAN DISTRIBUTION, MEAN EVERYTHING.
age <- c(1,2,3,4)
for(i in 1:length(age)){
AGE <- age[i]
age.name <- AGE + 1
OUT.OLD <- aggregate(OUT.TOT$value[OUT.TOT$age.year>=AGE],
by=list(loc=OUT.TOT$loc[OUT.TOT$age.year>=AGE],
season=OUT.TOT$season.name[OUT.TOT$age.year>=AGE]),sum)
OUT.OLD <- data.frame(OUT.OLD)
colnames(OUT.OLD)[3] <- "chin"
OUT.OLD.REG <- aggregate(OUT.LONG$value[OUT.LONG$age.year>=AGE],
by=list(loc=OUT.LONG$loc[OUT.LONG$age.year>=AGE],
season=OUT.LONG$season.name[OUT.LONG$age.year>=AGE],
region=OUT.LONG$variable[OUT.LONG$age.year>=AGE]),sum)
OUT.OLD.REG <- data.frame(OUT.OLD.REG)
colnames(OUT.OLD.REG)[4] <- "chin"
### Calculate Evenness for each region and each season
total.by.reg <- aggregate(OUT.OLD.REG$chin,by=list(loc=OUT.OLD.REG$loc,season=OUT.OLD.REG$season),sum)
colnames(total.by.reg)[3] <- "TOT"
OUT.EVEN <- merge(OUT.OLD.REG,total.by.reg)
OUT.EVEN$prop <- OUT.EVEN$chin / OUT.EVEN$TOT
even.plot <- aggregate(OUT.EVEN$prop,by=list(loc=OUT.EVEN$loc,season=OUT.EVEN$season),gini.simp.div)
colnames(even.plot)[3] <- "gini.simp"
############################
### plots start here
############################
# Plots
p1 <- ggplot(OUT.OLD.REG[OUT.OLD.REG$season=="spring",], aes(x = loc, y = chin, fill = region)) +
geom_area() +
labs(x = "", y = paste("Chinook, age ",age.name,"+ (thousands)",sep="")) +
scale_x_continuous(breaks = 1:nrow(LOCATIONS), labels=LOCATIONS$location.name) +
ggtitle("Spring Distribution") +
theme(axis.text.x = element_text(angle = 90, hjust = 1,vjust=0.5)) +
scale_fill_discrete(name="Origin")
p2 <- ggplot(OUT.OLD.REG[OUT.OLD.REG$season=="summer",], aes(x = loc, y = chin, fill = region)) +
geom_area() +
labs(x = "", y = paste("Chinook, age ",age.name,"+ (thousands)",sep="")) +
scale_x_continuous(breaks = 1:nrow(LOCATIONS), labels=LOCATIONS$location.name) +
ggtitle("Summer Distribution") +
theme(axis.text.x = element_text(angle = 90, hjust = 1,vjust=0.5))+
scale_fill_discrete(name="Origin")
p3 <- ggplot(OUT.OLD.REG[OUT.OLD.REG$season=="fall",], aes(x = loc, y = chin, fill = region)) +
geom_area() +
labs(x = "", y = paste("Chinook, age ",age.name,"+ (thousands)",sep="")) +
scale_x_continuous(breaks = 1:nrow(LOCATIONS), labels=LOCATIONS$location.name) +
ggtitle("Fall Distribution") +
theme(axis.text.x = element_text(angle = 90, hjust = 1,vjust=0.5)) +
scale_fill_discrete(name="Origin")
################################################################
#### MAKE PROPORTIONAL ORIGIN OF FISH IN EACH AREA
################################################################
q1 <- ggplot(OUT.OLD.REG[OUT.OLD.REG$season=="spring",], aes(x = loc, y = chin, fill = region)) +
geom_bar(position = "fill",stat = "identity") +
scale_y_continuous() +
labs(x = "", y = paste("Proportion",sep="")) +
scale_x_continuous(breaks = 1:nrow(LOCATIONS), labels=LOCATIONS$location.name) +
ggtitle("Spring Distribution") +
theme(axis.text.x = element_text(angle = 90, hjust = 1,vjust=0.5)) +
scale_fill_discrete(name="Origin")
q2 <- ggplot(OUT.OLD.REG[OUT.OLD.REG$season=="summer",], aes(x = loc, y = chin, fill = region)) +
geom_bar(position = "fill",stat = "identity") +
scale_y_continuous() +
labs(x = "", y = paste("Proportion",sep="")) +
scale_x_continuous(breaks = 1:nrow(LOCATIONS), labels=LOCATIONS$location.name) +
ggtitle("Summer Distribution") +
theme(axis.text.x = element_text(angle = 90, hjust = 1,vjust=0.5)) +
scale_fill_discrete(name="Origin")
q3 <- ggplot(OUT.OLD.REG[OUT.OLD.REG$season=="fall",], aes(x = loc, y = chin, fill = region)) +
geom_bar(position = "fill",stat = "identity") +
scale_y_continuous() +
labs(x = "", y = paste("Proportion",sep="")) +
scale_x_continuous(breaks = 1:nrow(LOCATIONS), labels=LOCATIONS$location.name) +
ggtitle("Fall Distribution") +
theme(axis.text.x = element_text(angle = 90, hjust = 1,vjust=0.5)) +
scale_fill_discrete(name="Origin")
setwd("/Users/ole.shelton/GitHub/Orca_Salmon/Output plots/Mixed Model")
pdf(file= paste("Fall Chinook, age ",age.name,"+ abundance in space.pdf",sep=""),width=8,height=4.5)
print(p1)
print(p2)
print(p3)
print(q1)
print(q2)
print(q3)
par(mar=c(4,4,1,1))
x.lim <- c(1,17)
y.lim <- c(0,1)
plot(gini.simp ~ loc, data=even.plot[even.plot$season == "spring",],
axes=F,type="b",xlim=x.lim,ylim=y.lim,xlab="",ylab="",yaxs="i",lwd=2,pch=21,bg=1)
par(new=T)
plot(gini.simp ~ loc, data=even.plot[even.plot$season == "summer",],
axes=F,type="b",xlim=x.lim,ylim=y.lim,xlab="",ylab="",col=2,yaxs="i",lwd=2,pch=24,bg=2)
par(new=T)
plot(gini.simp ~ loc, data=even.plot[even.plot$season == "fall",],
axes=F,type="b",xlim=x.lim,ylim=y.lim,xlab="",ylab="",col=4,yaxs="i",lwd=2,pch=22,bg=4)
axis(1,1:nrow(LOCATIONS), labels=LOCATIONS$location.name,las=2)
axis(2,las=2)
title(ylab= "Gini-Simpson index")
box(bty="o",lwd=2)
title("Run Diversity in each ocean region")
legend(x=14,y=0.4,legend=c("Spring","Summer","Fall"),col=c(1,2,4),pch=c(21,24,22),lwd=2,pt.bg=c(1,2,4),bty="n")
dev.off()
}
################################################################
################################################################
################################################################
################################################################
##############################################################################################################
##############################################################################################################
##############################################################################################################
##############################################################################################################
# Repeat above but use single, coast wide average juv mort rate
##############################################################################################################
##############################################################################################################
##############################################################################################################
##############################################################################################################
#############################################################################################
##############################################################################################
#############################################################################################
### PROJECT SPATIAL DISTRIBUTION FOR EACH ORIGIN
###### Calculate Initial Monthly mortality rate
N.mod.int <- max(dat.bin$time)
LOC <- sort(unique(dat.bin$location))
AGE_MONTH_IDX <- 1:N.mod.int
SEASON_NAME <- c(c("spring","summer","fall"),rep(c("winter","spring","summer","fall"),4))
SEASON_IDX <- c(c(1,2,3),rep(c(1,1,2,3),4))
sim.dat <- expand.grid(age.month=AGE_MONTH_IDX,loc=LOC)
sim.dat$season.idx <- SEASON_IDX
sim.dat$season.name <-SEASON_NAME
sim.dat$age.year <- 0
sim.dat$age.year[sim.dat$age.month<=3]<-1
sim.dat$age.year[sim.dat$age.month >3 & sim.dat$age.month <= 7 ] <- 2
sim.dat$age.year[sim.dat$age.month >7 & sim.dat$age.month <= 11 ] <- 3
sim.dat$age.year[sim.dat$age.month >11 & sim.dat$age.month <= 15 ]<- 4
sim.dat$age.year[sim.dat$age.month >15 & sim.dat$age.month <= 19 ]<- 5
OUT <- sim.dat
for(i in 1:nrow(sim.region)){
finger <- rep(0,nrow(sim.dat))
yearling <- rep(0,nrow(sim.dat))
if(sim.region$finger[i] > 0){
for(j in 1:nrow(sim.dat)){
finger[j] <-
log(sim.region$finger[i] * 1e6) -
cum_M2[sim.dat$age.month[j]] - # fixed mortality for adults.
sim.region$juv.mort.shared[i] * sim.region$n.mon.fing[i] + # Initial period before fish are observed
# log_q_troll + # effort offset
# log_q_rec * effort_idx_rec[i] + # effort offset
# log_q_treaty * effort_idx_treaty[i] + # effort offset
# log_effort[i] +
log(origin_loc[sim.dat$season.idx[j],sim.region$origin.idx[i],sim.dat$loc[j]]) + # dispersion in the ocean term (sum to 1)
log(1 - sum_prob[sim.region$loc.spawn.idx[i],sim.dat$age.year[j]])
# log(vuln_mat[vuln_idx[i],age_vuln_idx[i]]) ;
}
}
if(sim.region$yearling[i] > 0){
for(j in 1:nrow(sim.dat)){
yearling[j] <-
log(sim.region$yearling[i] * 1e6) -
cum_M2[sim.dat$age.month[j]] - # fixed mortality for adults.
sim.region$juv.mort.shared[i] * sim.region$n.mon.year[i] + # Initial period before fish are observed
# log_q_troll + # effort offset
# log_q_rec * effort_idx_rec[i] + # effort offset
# log_q_treaty * effort_idx_treaty[i] + # effort offset
# log_effort[i] +
log(origin_loc[sim.dat$season.idx[j],sim.region$origin.idx[i],sim.dat$loc[j]]) + # dispersion in the ocean term (sum to 1)
log(1 - sum_prob[sim.region$loc.spawn.idx[i],sim.dat$age.year[j]])
# log(vuln_mat[vuln_idx[i],age_vuln_idx[i]]) ;
}
}
OUT[,paste(sim.region$region[i])] <- exp(finger) + exp(yearling)
}
THESE <- match(sim.region$region,colnames(OUT))
OUT[,THESE] <- OUT[,THESE] / 1000
OUT$TOTAL <- rowSums(OUT[,THESE])
##################################################################################################################
##################################################################################################################
summer <- OUT[OUT$season.name =="summer",]
fall <- OUT[OUT$season.name =="fall",]
winter <- OUT[OUT$season.name =="winter",]
spring <- OUT[OUT$season.name =="spring",]
temp <- summer[summer$age.year == 5,]
plot(TOTAL~loc,data=temp,type="l")
temp <- fall[fall$age.year == 4,]
plot(TOTAL~loc,data=temp,type="l")
temp <- spring[spring$age.year == 4,]
plot(TOTAL~loc,data=temp,type="l")
OUT.LONG <- melt(OUT,id=c("loc","season.name","age.year","age.month","season.idx"))
OUT.TOT <- OUT.LONG[OUT.LONG$variable == "TOTAL",]
OUT.LONG <- OUT.LONG[OUT.LONG$variable != "TOTAL",]
#### MAKE CUMULATIVE DISTRIBUTION FOR ALL AREAS BASED ON MEAN SURVIVAL FOR EACH REGION, MEAN DISTRIBUTION, MEAN EVERYTHING.
age <- c(1,2,3,4)
for(i in 1:length(age)){
AGE <- age[i]
age.name <- AGE + 1
OUT.OLD <- aggregate(OUT.TOT$value[OUT.TOT$age.year>=AGE],
by=list(loc=OUT.TOT$loc[OUT.TOT$age.year>=AGE],
season=OUT.TOT$season.name[OUT.TOT$age.year>=AGE]),sum)
OUT.OLD <- data.frame(OUT.OLD)
colnames(OUT.OLD)[3] <- "chin"
OUT.OLD.REG <- aggregate(OUT.LONG$value[OUT.LONG$age.year>=AGE],
by=list(loc=OUT.LONG$loc[OUT.LONG$age.year>=AGE],
season=OUT.LONG$season.name[OUT.LONG$age.year>=AGE],
region=OUT.LONG$variable[OUT.LONG$age.year>=AGE]),sum)
OUT.OLD.REG <- data.frame(OUT.OLD.REG)
colnames(OUT.OLD.REG)[4] <- "chin"
### Calculate Evenness for each region and each season
total.by.reg <- aggregate(OUT.OLD.REG$chin,by=list(loc=OUT.OLD.REG$loc,season=OUT.OLD.REG$season),sum)
colnames(total.by.reg)[3] <- "TOT"
OUT.EVEN <- merge(OUT.OLD.REG,total.by.reg)
OUT.EVEN$prop <- OUT.EVEN$chin / OUT.EVEN$TOT
even.plot <- aggregate(OUT.EVEN$prop,by=list(loc=OUT.EVEN$loc,season=OUT.EVEN$season),gini.simp.div)
colnames(even.plot)[3] <- "gini.simp"
# Start plots
p1 <- ggplot(OUT.OLD.REG[OUT.OLD.REG$season=="spring",], aes(x = loc, y = chin, fill = region)) +
geom_area() +
labs(x = "", y = paste("Chinook, age ",age.name,"+ (thousands)",sep="")) +
scale_x_continuous(breaks = 1:nrow(LOCATIONS), labels=LOCATIONS$location.name) +
ggtitle("Spring Distribution") +
theme(axis.text.x = element_text(angle = 90, hjust = 1,vjust=0.5)) +
scale_fill_discrete(name="Origin")
p2 <- ggplot(OUT.OLD.REG[OUT.OLD.REG$season=="summer",], aes(x = loc, y = chin, fill = region)) +
geom_area() +
labs(x = "", y = paste("Chinook, age ",age.name,"+ (thousands)",sep="")) +
scale_x_continuous(breaks = 1:nrow(LOCATIONS), labels=LOCATIONS$location.name) +
ggtitle("Summer Distribution") +
theme(axis.text.x = element_text(angle = 90, hjust = 1,vjust=0.5))+
scale_fill_discrete(name="Origin")
p3 <- ggplot(OUT.OLD.REG[OUT.OLD.REG$season=="fall",], aes(x = loc, y = chin, fill = region)) +
geom_area() +
labs(x = "", y = paste("Chinook, age ",age.name,"+ (thousands)",sep="")) +
scale_x_continuous(breaks = 1:nrow(LOCATIONS), labels=LOCATIONS$location.name) +
ggtitle("Fall Distribution") +
theme(axis.text.x = element_text(angle = 90, hjust = 1,vjust=0.5)) +
scale_fill_discrete(name="Origin")
# Proportional contribution
q1 <- ggplot(OUT.OLD.REG[OUT.OLD.REG$season=="spring",], aes(x = loc, y = chin, fill = region)) +
geom_bar(position = "fill",stat = "identity") +
scale_y_continuous() +
labs(x = "", y = paste("Proportion",sep="")) +
scale_x_continuous(breaks = 1:nrow(LOCATIONS), labels=LOCATIONS$location.name) +
ggtitle("Spring Distribution") +
theme(axis.text.x = element_text(angle = 90, hjust = 1,vjust=0.5)) +
scale_fill_discrete(name="Origin")
q2 <- ggplot(OUT.OLD.REG[OUT.OLD.REG$season=="summer",], aes(x = loc, y = chin, fill = region)) +
geom_bar(position = "fill",stat = "identity") +
scale_y_continuous() +
labs(x = "", y = paste("Proportion",sep="")) +
scale_x_continuous(breaks = 1:nrow(LOCATIONS), labels=LOCATIONS$location.name) +
ggtitle("Summer Distribution") +
theme(axis.text.x = element_text(angle = 90, hjust = 1,vjust=0.5)) +
scale_fill_discrete(name="Origin")
q3 <- ggplot(OUT.OLD.REG[OUT.OLD.REG$season=="fall",], aes(x = loc, y = chin, fill = region)) +
geom_bar(position = "fill",stat = "identity") +
scale_y_continuous() +
labs(x = "", y = paste("Proportion",sep="")) +
scale_x_continuous(breaks = 1:nrow(LOCATIONS), labels=LOCATIONS$location.name) +
ggtitle("Fall Distribution") +
theme(axis.text.x = element_text(angle = 90, hjust = 1,vjust=0.5)) +
scale_fill_discrete(name="Origin")
setwd("/Users/ole.shelton/GitHub/Orca_Salmon/Output plots/Mixed Model")
pdf(file= paste("Fall Chinook, age ",age.name,"+ abundance in space (shared mean mortality).pdf",sep=""),width=8,height=4.5)
print(p1)
print(p2)
print(p3)
print(q1)
print(q2)
print(q3)
par(mar=c(4,4,1,1))
x.lim <- c(1,17)
y.lim <- c(0,1)
plot(gini.simp ~ loc, data=even.plot[even.plot$season == "spring",],
axes=F,type="b",xlim=x.lim,ylim=y.lim,xlab="",ylab="",yaxs="i",lwd=2,pch=21,bg=1)
par(new=T)
plot(gini.simp ~ loc, data=even.plot[even.plot$season == "summer",],
axes=F,type="b",xlim=x.lim,ylim=y.lim,xlab="",ylab="",col=2,yaxs="i",lwd=2,pch=24,bg=2)
par(new=T)
plot(gini.simp ~ loc, data=even.plot[even.plot$season == "fall",],
axes=F,type="b",xlim=x.lim,ylim=y.lim,xlab="",ylab="",col=4,yaxs="i",lwd=2,pch=22,bg=4)
axis(1,1:nrow(LOCATIONS), labels=LOCATIONS$location.name,las=2)
axis(2,las=2)
title(ylab= "Gini-Simpson index")
title("Run Diversity in each ocean region")
box(bty="o",lwd=2)
legend(x=14,y=0.4,legend=c("Spring","Summer","Fall"),col=c(1,2,4),pch=c(21,24,22),lwd=2,pt.bg=c(1,2,4),bty="n")
dev.off()
}
#################################################################
#################################################################
#################################################################
#################################################################
# PROPAGATE UNCERTAINTY FOR PARAMETERS TO LOOK AT AMONG YEAR VARIATION FOR EACH ORIGIN AREA
# USE FIXED NUMBERS OF RELEASES FOR EACH AREA, THOUGH
N.rep <- 1000
N.MCMC <- dim(samp$log_q_troll)
THIS <- sample(1:N.MCMC,N.rep)
all.site <- list()
all.site.summary <- list()
# Cycle across origin regions:
for(i in 1:nrow(sim.region)){
SITE <- sim.region$region[i]
TEMP.OUT <- matrix(0,nrow(sim.dat),N.rep)
# pick a juvenile mortality
nom_reg <- nom[nom$ocean.reg == sim.region$region[i],]
if(sim.region$region[i] == "SOR" | sim.region$region[i] == "COR" | sim.region$region[i] == "NOR"){ nom_reg <- nom[nom$ocean.reg == "OR",]}
if(sim.region$region[i] == "SWVI" | sim.region$region[i] == "NWVI"){ nom_reg <- nom[nom$ocean.reg == "VI",]}
for(j in 1:N.rep){
finger <- rep(0,nrow(sim.dat))
yearling <- rep(0,nrow(sim.dat))
# Juvenile mortality
juv.idx <- sample(nom_reg$year.reg.idx,1)
juv.mort <- samp$rel_year_all[THIS[j], juv.idx]
if(sim.region$finger[i] > 0){
for(k in 1:nrow(sim.dat)){
finger[k] <-
log(sim.region$finger[i] * 1e6) -
cum_M2[sim.dat$age.month[k]] - # fixed mortality for adults.
juv.mort * sim.region$n.mon.fing[i] + # Initial period before fish are observed
# log_q_troll + # effort offset
# log_q_rec * effort_idx_rec[i] + # effort offset
# log_q_treaty * effort_idx_treaty[i] + # effort offset
# log_effort[i] +
log(samp$origin_loc[THIS[j],sim.dat$season.idx[k],sim.region$origin.idx[i],sim.dat$loc[k]]) + # dispersion in the ocean term (sum to 1)
log(1 - samp$sum_prob[THIS[j],sim.region$loc.spawn.idx[i],sim.dat$age.year[k]])
# log(vuln_mat[vuln_idx[i],age_vuln_idx[i]]) ;
}
}
if(sim.region$yearling[i] > 0){
for(k in 1:nrow(sim.dat)){
yearling[k] <-
log(sim.region$yearling[i] * 1e6) -
cum_M2[sim.dat$age.month[k]] - # fixed mortality for adults.
juv.mort * sim.region$n.mon.fing[i] + # Initial period before fish are observed
# log_q_troll + # effort offset
# log_q_rec * effort_idx_rec[i] + # effort offset
# log_q_treaty * effort_idx_treaty[i] + # effort offset
# log_effort[i] +
log(samp$origin_loc[THIS[j],sim.dat$season.idx[k],sim.region$origin.idx[i],sim.dat$loc[k]]) + # dispersion in the ocean term (sum to 1)
log(1 - samp$sum_prob[THIS[j],sim.region$loc.spawn.idx[i],sim.dat$age.year[k]])
# log(vuln_mat[vuln_idx[i],age_vuln_idx[i]]) ;
}
}
TEMP.OUT[,j] <- exp(finger) + exp(yearling)
}
all.site[[SITE]] <- data.frame(cbind(sim.dat,TEMP.OUT))
temp <- data.frame(MEAN=apply(TEMP.OUT,1,mean),
SD=apply(TEMP.OUT,1,sd),
q=t(as.matrix(apply(TEMP.OUT,1,quantile,probs=c(0.025,0.05,0.1,0.25,0.5,0.75,0.9,0.95,0.975)))))
colnames(temp)[3:ncol(temp)] <- c("q.025","q.05","q.10","q.25","q.50","q.75","q.90","q.95","q.975")
all.site.summary[[SITE]] <- data.frame(cbind(sim.dat,temp))
}
##################################################################################################
# Calculate the total abundance
THESE <- grep("X",colnames(all.site[[1]]))
for(i in 1:length(names(all.site))){
if(i==1){ all.site.sum <- all.site[[i]][,THESE]}
all.site.sum <- all.site.sum + all.site[[i]][,THESE]
}
total.chin <- data.frame(MEAN=apply(all.site.sum,1,mean),
SD=apply(all.site.sum,1,sd),
q=t(as.matrix(apply(all.site.sum,1,quantile,probs=c(0.025,0.05,0.1,0.25,0.5,0.75,0.9,0.95,0.975)))))
colnames(total.chin)[3:ncol(total.chin)] <- c("q.025","q.05","q.10","q.25","q.50","q.75","q.90","q.95","q.975")
total.chin <- data.frame(cbind(sim.dat,total.chin))
total.chin$CV <- total.chin$SD / total.chin$MEAN
### Plot functions Mean and Variability of Total Chinook Abundance
plot.mean <- function(temp,TITLE.REG,LOCATIONS,AGE,SEASON,i,j,k){
par(mar=c(4,5,2,1))
x.lim <- c(1,max(temp$loc))
y.lim <- c(0,max(temp$q.95)/1000)
a <- temp[,c("loc","q.05")]; colnames(a)[2] <- "y"
b <- temp[,c("loc","q.95")]; colnames(b)[2] <- "y"
b <- b[order(b$loc,decreasing=T),]
a <- rbind(a,b)
a$y <- a$y / 1000
plot(MEAN/1000 ~loc,data=temp,xlim=x.lim,ylim=y.lim,type="l",axes=F,xlab="",ylab="", yaxs="i")
par(new=T)
polygon(x=a$loc,y=a$y,col=grey(0.7),border=F)
par(new=T)
plot(MEAN/1000 ~loc,data=temp,xlim=x.lim,ylim=y.lim,type="l",axes=F,xlab="",ylab="",lwd=2, yaxs="i")
axis(1,las=2,at=1:max(temp$loc),LOCATIONS$location.name)
axis(2,las=2)
box(bty="o",lwd=2)
title(ylab=paste("Chinook (thousands)"))
title(main=paste(TITLE.REG,";age",AGE[j]+1,SEASON[i],"distribution"))
}
plot.cv <- function(temp,TITLE.REG,LOCATIONS,AGE,SEASON,i,j,k){
temp$CV <- temp$SD / temp$MEAN
par(mar=c(4,5,2,1))
x.lim <- c(1,max(temp$loc))
y.lim <- c(0,round(max(temp$CV)*1.1,1))
a <- temp[,c("loc","q.05")]; colnames(a)[2] <- "y"
b <- temp[,c("loc","q.95")]; colnames(b)[2] <- "y"
b <- b[order(b$loc,decreasing=T),]
a <- rbind(a,b)
a$y <- a$y / 1000
plot(CV ~loc,data=temp,xlim=x.lim,ylim=y.lim,type="l",axes=F,xlab="",ylab="", yaxs="i")
#par(new=T)
# polygon(x=a$loc,y=a$y,col=grey(0.7),border=F)
# par(new=T)
#plot(MEAN/1000 ~loc,data=temp,xlim=x.lim,ylim=y.lim,type="l",axes=F,xlab="",ylab="",lwd=2, yaxs="i")
axis(1,las=2,at=1:max(temp$loc),LOCATIONS$location.name)
axis(2,las=2)
box(bty="o",lwd=2)
title(ylab=paste("Chinook CV"))
title(main=paste(TITLE.REG,";age",AGE[j]+1,SEASON[i],"distribution"))
}
##################################################################################
########### MAKE PLOTS OF ABUNDANCE AND CV
##################################################################################
##### EACH REGION
season.name <- c("spring","summer","fall")
age.name <- c(3)
pdf("/Users/ole.shelton/GitHub/Orca_Salmon/Output plots/Mixed Model/Mean_CV_by_region.pdf",onefile = T,height=3,width=8.5)
par(mfcol=c(1,3))
for(k in 1:nrow(sim.region)){
for(i in 1:length(season.name)){
for(j in 1:length(age.name)){
temp <- all.site.summary[[sim.region$region[k]]]
temp <- temp[temp$season.name == season.name[i] & temp$age.year == age.name[j],]
plot.mean(temp,sim.region$region[k],LOCATIONS,age.name,season.name,i,j,k)
}
}
for(i in 1:length(season.name)){
for(j in 1:length(AGE)){
temp <- all.site.summary[[sim.region$region[k]]]
temp <- temp[temp$season.name == season.name[i] & temp$age.year == age.name[j],]
plot.cv(temp,sim.region$region[k],LOCATIONS,age.name,season.name,i,j,k)
}
}
}
dev.off()
######## TOTAL ABUNDANCE
AGE <- c(2,3,4)
pdf("/Users/ole.shelton/GitHub/Orca_Salmon/Output plots/Mixed Model/Mean_CV_Sum.pdf",onefile = T,height=3,width=8.5)
par(mfcol=c(1,3))
for(i in 1:length(season.name)){
for(j in 1:length(age.name)){
temp <- total.chin
temp <- temp[temp$season.name == season.name[i] & temp$age.year == AGE[j],]
plot.mean(temp,"Total Chinook",LOCATIONS,age.name,season.name,i,j,k)
}
}
for(i in 1:length(season.name)){
for(j in 1:length(age.name)){
temp <- total.chin
temp <- temp[temp$season.name == season.name[i] & temp$age.year == age.name[j],]
plot.cv(temp,"Total Chinook",LOCATIONS,age.name,season.name,i,j,k)
}
}
dev.off()
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
##### PLOT FOR PUB ( stacked PROPORTIONAL CONTRIBUTION)
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
###################################################################################
region.area <- read.csv(file="/Users/ole.shelton/GitHub/Orca_Salmon_DATA/Region Area estimates/region_area_blake_7_2016.csv")
region.area <- merge(region.area,LOCATIONS)
## Use mean survivorship for
surv.fing <- mean(dat.mean.prop.fing[dat.mean.prop.fing>0])
surv.year <- mean(dat.mean.prop.year[dat.mean.prop.year>0])
#############################################################################################
### PROJECT SPATIAL DISTRIBUTION FOR EACH ORIGIN
###### Calculate Initial Monthly mortality rate
N.mod.int <- max(dat.bin$time)
LOC <- sort(unique(dat.bin$location))
AGE_MONTH_IDX <- 1:N.mod.int
SEASON_NAME <- c(c("spring","summer","fall"),rep(c("winter","spring","summer","fall"),4))
SEASON_IDX <- c(c(1,2,3),rep(c(1,1,2,3),4))
sim.dat <- expand.grid(age.month=AGE_MONTH_IDX,loc=LOC)
sim.dat$season.idx <- SEASON_IDX
sim.dat$season.name <-SEASON_NAME
sim.dat$age.year <- 0
sim.dat$age.year[sim.dat$age.month<=3]<-1
sim.dat$age.year[sim.dat$age.month >3 & sim.dat$age.month <= 7 ] <- 2
sim.dat$age.year[sim.dat$age.month >7 & sim.dat$age.month <= 11 ] <- 3
sim.dat$age.year[sim.dat$age.month >11 & sim.dat$age.month <= 15 ]<- 4
sim.dat$age.year[sim.dat$age.month >15 & sim.dat$age.month <= 19 ]<- 5
OUT <- sim.dat
for(i in 1:nrow(sim.region)){
finger <- rep(0,nrow(sim.dat))
yearling <- rep(0,nrow(sim.dat))
if(sim.region$finger[i] > 0){
for(j in 1:nrow(sim.dat)){
finger[j] <-
log(sim.region$finger[i] * 1e6) -
cum_M2[sim.dat$age.month[j]] + # fixed mortality for adults.
log(surv.fing) + # Initial period before fish are observed
# log_q_troll + # effort offset
# log_q_rec * effort_idx_rec[i] + # effort offset
# log_q_treaty * effort_idx_treaty[i] + # effort offset
# log_effort[i] +
log(origin_loc[sim.dat$season.idx[j],sim.region$origin.idx[i],sim.dat$loc[j]]) + # dispersion in the ocean term (sum to 1)
log(1 - sum_prob[sim.region$loc.spawn.idx[i],sim.dat$age.year[j]])
# log(vuln_mat[vuln_idx[i],age_vuln_idx[i]]) ;
}
}
if(sim.region$yearling[i] > 0){
for(j in 1:nrow(sim.dat)){
yearling[j] <-
log(sim.region$yearling[i] * 1e6) -
cum_M2[sim.dat$age.month[j]] + # fixed mortality for adults.
log(surv.year) + # Initial period before fish are observed
# log_q_troll + # effort offset
# log_q_rec * effort_idx_rec[i] + # effort offset
# log_q_treaty * effort_idx_treaty[i] + # effort offset
# log_effort[i] +
log(origin_loc[sim.dat$season.idx[j],sim.region$origin.idx[i],sim.dat$loc[j]]) + # dispersion in the ocean term (sum to 1)
log(1 - sum_prob[sim.region$loc.spawn.idx[i],sim.dat$age.year[j]])
# log(vuln_mat[vuln_idx[i],age_vuln_idx[i]]) ;
}
}
OUT[,paste(sim.region$region[i])] <- exp(finger) + exp(yearling)
}
THESE <- match(sim.region$region,colnames(OUT))
#OUT[,THESE] <- OUT[,THESE] / 1000
OUT$TOTAL <- rowSums(OUT[,THESE])
##################################################################################################################
##################################################################################################################
summer <- OUT[OUT$season.name =="summer",]
fall <- OUT[OUT$season.name =="fall",]
winter <- OUT[OUT$season.name =="winter",]
spring <- OUT[OUT$season.name =="spring",]
temp <- summer[summer$age.year == 5,]
plot(TOTAL~loc,data=temp,type="l")
temp <- fall[fall$age.year == 4,]
plot(TOTAL~loc,data=temp,type="l")
temp <- spring[spring$age.year == 4,]
plot(TOTAL~loc,data=temp,type="l")
OUT.LONG <- melt(OUT,id=c("loc","season.name","age.year","age.month","season.idx"))
OUT.TOT <- OUT.LONG[OUT.LONG$variable == "TOTAL",]
OUT.LONG <- OUT.LONG[OUT.LONG$variable != "TOTAL",]
#### MAKE CUMULATIVE DISTRIBUTION FOR ALL AREAS BASED ON MEAN SURVIVAL FOR EACH REGION, MEAN DISTRIBUTION, MEAN EVERYTHING.
age <- c(1,2,3,4)
#for(i in 1:length(age)){
i=3
AGE <- age[i]
age.name <- AGE + 1
OUT.OLD <- aggregate(OUT.TOT$value[OUT.TOT$age.year>=AGE],
by=list(loc=OUT.TOT$loc[OUT.TOT$age.year>=AGE],
season=OUT.TOT$season.name[OUT.TOT$age.year>=AGE]),sum)
OUT.OLD <- data.frame(OUT.OLD)
colnames(OUT.OLD)[3] <- "chin"
OUT.OLD.REG <- aggregate(OUT.LONG$value[OUT.LONG$age.year>=AGE],
by=list(loc=OUT.LONG$loc[OUT.LONG$age.year>=AGE],
season=OUT.LONG$season.name[OUT.LONG$age.year>=AGE],
region=OUT.LONG$variable[OUT.LONG$age.year>=AGE]),sum)
OUT.OLD.REG <- data.frame(OUT.OLD.REG)
colnames(OUT.OLD.REG)[4] <- "chin"
OUT.OLD.DENS<- merge(OUT.OLD.REG,region.area,by.x="loc",by.y="location.number")
OUT.OLD.DENS$dens <- OUT.OLD.DENS$chin / OUT.OLD.DENS$km2.10.200m
### Calculate Evenness for each region and each season
total.by.reg <- aggregate(OUT.OLD.REG$chin,by=list(loc=OUT.OLD.REG$loc,season=OUT.OLD.REG$season),sum)
colnames(total.by.reg)[3] <- "TOT"
total.by.reg <- merge(total.by.reg,region.area,by.x="loc",by.y="location.number")
total.by.reg$DENS <- total.by.reg$TOT / total.by.reg$km2.10.200m
total.by.reg$TOT <- total.by.reg$TOT /1000
OUT.EVEN <- merge(OUT.OLD.REG,total.by.reg)
OUT.EVEN$prop <- OUT.EVEN$chin / OUT.EVEN$TOT
even.plot <- aggregate(OUT.EVEN$prop,by=list(loc=OUT.EVEN$loc,season=OUT.EVEN$season),gini.simp.div)
colnames(even.plot)[3] <- "gini.simp"
OUT.OLD.REG$chin <- OUT.OLD.REG$chin / 1000
# Proportional contribution
q1 <- ggplot(OUT.OLD.REG[OUT.OLD.REG$season=="spring",], aes(x = loc, y = chin, fill = region)) +
geom_bar(position = "fill",stat = "identity") +
scale_y_continuous() +
labs(x = "", y = paste("Proportion",sep="")) +
scale_x_continuous(breaks = 1:nrow(LOCATIONS), labels=LOCATIONS$location.name) +
ggtitle("Spring Distribution") +
theme(axis.text.x = element_text(angle = 90, hjust = 1,vjust=0.5)) +
scale_fill_discrete(name="Origin")
COLS <- c("SFB"="#d7191c",
"NCA"="#fdae61",
"SOR"="#ffffbf",
"COR"="#abd9e9",
"NOR"="#2c7bb6",
"COL" ="#d7191c",
"UPCOL"="#fdae61",
"WAC" = "#ffffbf",
"PUSO" ="#abd9e9",
"SGEO" ="#2c7bb6",
"SWVI" = "#d7191c")
COLS <- c("SFB"= "#DAA520", #"#ffffcc",
"NCA"="#B8860B",
"SOR"="#3CB371",
"COR"="#228B22",
"NOR"="#006400",
"COL" ="#00BFFF",
"UPCOL"="#1E90FF",
"WAC" = "#0000FF",
"PUSO" ="#FF6347",
"SGEO" ="#DC143C",
"SWVI" = "#A52A2A")
OUT.OLD.REG$region <- factor(OUT.OLD.REG$region,
levels = c("SWVI", "SGEO","PUSO","WAC","UPCOL","COL",
"NOR","COR","SOR","NCA","SFB"))
OUT.OLD.DENS$region <- factor(OUT.OLD.DENS$region,
levels = c("SWVI", "SGEO","PUSO","WAC","UPCOL","COL",
"NOR","COR","SOR","NCA","SFB"))
# p + scale_colour_manual(values = COLS)
# scale_fill_brewer(palette = 12)
# Proportional contribution
q1 <- ggplot(OUT.OLD.REG[OUT.OLD.REG$season=="spring",], aes(x = loc, y = chin, fill = region )) +
geom_bar(position = "fill",stat = "identity") +
coord_flip() +
scale_y_continuous(expand = c(0, 0)) +
labs(x = "", y = "",title="a) Spring") +
scale_x_continuous(breaks = 1:nrow(LOCATIONS), labels=LOCATIONS$location.name,expand = c(0, 0)) +
scale_fill_manual(values=COLS,name="Origin") +
theme_bw() +
theme(axis.text.x = element_text(angle = 0, hjust = 0.9,vjust=0.5),plot.title = element_text(hjust=0,size=rel(0.9)),legend.position="none",
plot.margin=unit(c(0.1, 0.05, 0.05, 0.01), "lines"))
q1
q2 <- ggplot(OUT.OLD.REG[OUT.OLD.REG$season=="summer",], aes(x = loc, y = chin, fill = region )) +
geom_bar(position = "fill",stat = "identity") +
coord_flip() +
scale_y_continuous(expand = c(0, 0)) +
labs(x = "", y = "",title="b) Summer") +
scale_x_continuous(breaks = 1:nrow(LOCATIONS), labels=LOCATIONS$location.name,expand = c(0, 0)) +
scale_fill_manual(values=COLS,name="Origin") +
theme_bw() +
theme(axis.text.x = element_text(angle = 0, hjust = 0.9,vjust=0.5),plot.title = element_text(hjust=0,size=rel(0.9)),legend.position="none",
plot.margin=unit(c(0.1, 0.05, 0.05, 0.01), "lines"))
q3 <- ggplot(OUT.OLD.REG[OUT.OLD.REG$season=="fall",], aes(x = loc, y = chin, fill = region )) +
geom_bar(position = "fill",stat = "identity") +
coord_flip() +
scale_y_continuous(expand = c(0, 0)) +
labs(x = "", y = paste("Proportion",sep=""),title="c) Fall") +
scale_x_continuous(breaks = 1:nrow(LOCATIONS), labels=LOCATIONS$location.name,expand = c(0, 0)) +
scale_fill_manual(values=COLS,name="Origin") +
theme_bw() +
theme(axis.text.x = element_text(angle = 0, hjust = 0.9,vjust=0.5),plot.title = element_text(hjust=0,size=rel(0.9)),legend.position="none",
plot.margin=unit(c(0.1, 0.05, 0.05, 0.01), "lines"))
if(age.name==3){LIM=c(0,4000)}
if(age.name==4){LIM=c(0,1500)}
p1 <- ggplot() +
#geom_bar(data=OUT.OLD.REG[OUT.OLD.REG$season=="spring",], aes(x = loc, y = chin, fill = region )) +
geom_bar(data=total.by.reg[total.by.reg$season == "spring",], aes(x = loc, y = TOT ),fill=grey(0.5),stat = "identity") +
#geom_line(data=total.by.reg[total.by.reg$season == "spring",], aes(x = loc, y = TOT ),color= "black") +
coord_flip() +
#labs(title = "New plot title")
labs(x = "",y="",title="e)") +
xlab(NULL) +#, y = paste("Chinook, age ",age.name,"+ (thousands)",sep="")) +
scale_x_continuous(expand=c(0.0,0),breaks = 1:nrow(LOCATIONS), labels= rep("",nrow(LOCATIONS))) + #rep("",nrow(LOCATIONS))) + #LOCATIONS$location.name) +
scale_y_continuous(expand=c(0,0),limits=LIM) +
scale_fill_manual(values=COLS,name="Origin") +
theme_bw() +
theme(axis.text.x = element_text(angle = 0, hjust = 0.5,vjust=0.5),plot.title = element_text(hjust = 0,color="white",size=rel(0.9)),legend.position="none",
panel.border=element_rect(colour="black",size=1.5),
plot.margin=unit(c(0.1, 5, 0.05, 0.01), "lines"))
p1
p2 <- ggplot() +
geom_bar(data=OUT.OLD.REG[OUT.OLD.REG$season=="summer",], aes(x = loc, y = chin, fill = region ),stat = "identity") +
geom_bar(data=total.by.reg[total.by.reg$season == "summer",], aes(x = loc, y = TOT ),fill=grey(0.5),stat = "identity") +
#geom_line(data=total.by.reg[total.by.reg$season == "summer",], aes(x = loc, y = TOT ),color= "black") +
coord_flip() +
#labs(title = "New plot title")
labs(x = "",y="",title="e)") +
xlab(NULL) +#, y = paste("Chinook, age ",age.name,"+ (thousands)",sep="")) +
scale_x_continuous(expand=c(0.0,0),breaks = 1:nrow(LOCATIONS), labels= rep("",nrow(LOCATIONS))) + #rep("",nrow(LOCATIONS))) + #LOCATIONS$location.name) +
scale_y_continuous(expand=c(0,0),limits=LIM) +
# ggtitle("e)") +
scale_fill_manual(values=COLS,name="Origin") +
theme_bw() +
theme(axis.text.x = element_text(angle = 0, hjust = 0.5,vjust=0.5),plot.title = element_text(hjust = 0,color="white",size=rel(0.9)),
panel.border=element_rect(colour="black",size=1.5),
plot.margin=unit(c(0.1, 0.0, 0.05, 0.01), "lines"),
legend.key.size = unit(0.4, "cm"))
p2
p3 <- ggplot() +
#geom_area(data=OUT.OLD.REG[OUT.OLD.REG$season=="fall",], aes(x = loc, y = chin, fill = region )) +
geom_bar(data=total.by.reg[total.by.reg$season == "fall",], aes(x = loc, y = TOT ),fill=grey(0.5),stat="identity") +
#geom_line(data=total.by.reg[total.by.reg$season == "fall",], aes(x = loc, y = TOT ),color= "black") +
coord_flip() +
labs(x = "", y = paste("Chinook, age ",age.name,"+ (1000s)",sep=""),title="f)") +
xlab(NULL) +
scale_x_continuous(expand=c(0.0,0),breaks = 1:nrow(LOCATIONS), labels= rep("",nrow(LOCATIONS))) + #rep("",nrow(LOCATIONS))) + #LOCATIONS$location.name) +
scale_y_continuous(expand=c(0,0),limits=LIM) +
scale_fill_manual(values=COLS,name="Origin") +
theme_bw() +
theme(axis.text.x = element_text(angle = 0, hjust = 0.5,vjust=0.5),plot.title = element_text(hjust = 0,vjust=0,color="white",size=rel(0.9)),legend.position="none",
panel.border=element_rect(colour="black",size=1.5),
plot.margin=unit(c(0.1, 5, 0.05, 0.01), "lines"))
p3
######
quartz(file=paste("/Users/ole.shelton/GitHub/Orca_Salmon/Output plots/Mixed Model/Abundance plot ",age.name,"+.pdf",sep=""),dpi=600,height=8,width=7,type="pdf")
Layout= matrix(c(4,4,4,4,1,1,1,5,5,5,5,2,2,2,6,6,6,6,3,3,3),nrow=3,ncol=7,byrow=T)
QQ <- list(p1,p2,p3,q1,q2,q3)
multiplot(plotlist=QQ ,layout= Layout)
dev.off()
#######################
######################
#######################
######################
#######################
######################
# Same plot but with densities instead of total densities.
#######################
######################
#######################
######################
#######################
######################
# p + scale_colour_manual(values = COLS)
# scale_fill_brewer(palette = 12)
# Proportional contribution
q1 <- ggplot(OUT.OLD.REG[OUT.OLD.REG$season=="spring",], aes(x = loc, y = chin, fill = region )) +
geom_bar(position = "fill",stat = "identity") +
coord_flip() +
scale_y_continuous(expand = c(0, 0)) +
labs(x = "", y = "",title="a) Spring") +
scale_x_continuous(breaks = 1:nrow(LOCATIONS), labels=LOCATIONS$location.name,expand = c(0, 0)) +
scale_fill_manual(values=COLS,name="Origin") +
theme_bw() +
theme(axis.text.x = element_text(angle = 0, hjust = 0.9,vjust=0.5),plot.title = element_text(hjust=0,size=rel(0.9)),legend.position="none",
plot.margin=unit(c(0.1, 0.05, 0.05, 0.01), "lines"))
q1
q2 <- ggplot(OUT.OLD.REG[OUT.OLD.REG$season=="summer",], aes(x = loc, y = chin, fill = region )) +
geom_bar(position = "fill",stat = "identity") +
coord_flip() +
scale_y_continuous(expand = c(0, 0)) +
labs(x = "", y = "",title="b) Summer") +
scale_x_continuous(breaks = 1:nrow(LOCATIONS), labels=LOCATIONS$location.name,expand = c(0, 0)) +
scale_fill_manual(values=COLS,name="Origin") +
theme_bw() +
theme(axis.text.x = element_text(angle = 0, hjust = 0.9,vjust=0.5),plot.title = element_text(hjust=0,size=rel(0.9)),legend.position="none",
plot.margin=unit(c(0.1, 0.05, 0.05, 0.01), "lines"))
q3 <- ggplot(OUT.OLD.REG[OUT.OLD.REG$season=="fall",], aes(x = loc, y = chin, fill = region )) +
geom_bar(position = "fill",stat = "identity") +
coord_flip() +
scale_y_continuous(expand = c(0, 0)) +
labs(x = "", y = paste("Proportion",sep=""),title="c) Fall") +
scale_x_continuous(breaks = 1:nrow(LOCATIONS), labels=LOCATIONS$location.name,expand = c(0, 0)) +
scale_fill_manual(values=COLS,name="Origin") +
theme_bw() +
theme(axis.text.x = element_text(angle = 0, hjust = 0.9,vjust=0.5),plot.title = element_text(hjust=0,size=rel(0.9)),legend.position="none",
plot.margin=unit(c(0.1, 0.05, 0.05, 0.01), "lines"))
LIM = c(0,1000)
if(age.name==4){LIM=c(0,300)}
p1 <- ggplot() +
#geom_area(data=OUT.OLD.DENS[OUT.OLD.DENS$season=="spring",], aes(x = loc, y = dens, fill = region )) +
geom_bar(data=total.by.reg[total.by.reg$season == "spring",], aes(x = loc, y = DENS ),fill=grey(0.5),stat="identity") +
#geom_line(data=total.by.reg[total.by.reg$season == "spring",], aes(x = loc, y = DENS ),color= "black") +
coord_flip() +
#labs(title = "New plot title")
labs(x = "",y="",title="e)") + #, y = paste("Chinook, age ",age.name,"+ (thousands)",sep="")) +
xlab(NULL) +
#scale_x_continuous(breaks = 1:nrow(LOCATIONS),expand=c(0,0), labels= rep("",nrow(LOCATIONS))) + #LOCATIONS$location.name) +
scale_x_continuous(expand=c(0.0,0),breaks = 1:nrow(LOCATIONS), labels= rep("",nrow(LOCATIONS))) + #rep("",nrow(LOCATIONS))) + #LOCATIONS$location.name) +
scale_y_continuous(expand=c(0,0),limits=LIM) +
# ggtitle("e)") +
scale_fill_manual(values=COLS,name="Origin") +
theme_bw() +
theme(axis.text.x = element_text(angle = 0, hjust = 0.5,vjust=0.5),plot.title = element_text(hjust = 0,color="white",size=rel(0.9)),legend.position="none",
panel.border=element_rect(colour="black",size=1.5),
plot.margin=unit(c(0.1, 5, 0.05, 0.01), "lines"))
p1
p2 <- ggplot() +
geom_bar(data=OUT.OLD.DENS[OUT.OLD.DENS$season=="summer",], aes(x = loc, y = dens, fill = region ),stat="identity") +
geom_bar(data=total.by.reg[total.by.reg$season == "summer",], aes(x = loc, y = DENS ),fill=grey(0.5),stat="identity") +
#geom_line(data=total.by.reg[total.by.reg$season == "summer",], aes(x = loc, y = DENS ),color= "black") +
coord_flip() +
#labs(title = "New plot title")
labs(x = "",y="",title="e)") +
xlab(NULL) +#, y = paste("Chinook, age ",age.name,"+ (thousands)",sep="")) +
scale_x_continuous(expand=c(0.0,0),breaks = 1:nrow(LOCATIONS), labels= rep("",nrow(LOCATIONS))) + #rep("",nrow(LOCATIONS))) + #LOCATIONS$location.name) +
scale_y_continuous(expand=c(0,0),limits=LIM) +
# ggtitle("e)") +
scale_fill_manual(values=COLS,name="Origin") +
theme_bw() +
theme(axis.text.x = element_text(angle = 0, hjust = 0.5,vjust=0.5),plot.title = element_text(hjust = 0,color="white",size=rel(0.9)),
panel.border=element_rect(colour="black",size=1.5),
plot.margin=unit(c(0.1, 0.0, 0.05, 0.01), "lines"),
legend.key.size = unit(0.4, "cm"))
p2
p3 <- ggplot() +
geom_bar(data=total.by.reg[total.by.reg$season == "fall",], aes(x = loc, y = DENS ),fill=grey(0.5),stat="identity") +
#geom_line(data=total.by.reg[total.by.reg$season == "fall",], aes(x = loc, y = DENS ),color= "black") +
coord_flip() +
labs(x = "", y = paste("Chinook density (#/km2)",sep=""),title="f)") +
xlab(NULL) +
scale_x_continuous(expand=c(0.0,0),breaks = 1:nrow(LOCATIONS), labels= rep("",nrow(LOCATIONS))) + #rep("",nrow(LOCATIONS))) + #LOCATIONS$location.name) +
scale_y_continuous(expand=c(0,0),limits=LIM) +
scale_fill_manual(values=COLS,name="Origin") +
theme_bw() +
theme(axis.text.x = element_text(angle = 0, hjust = 0.5,vjust=0.5),plot.title = element_text(hjust = 0,vjust=0,color="white",size=rel(0.9)),legend.position="none",
panel.border=element_rect(colour="black",size=1.5),
plot.margin=unit(c(0.1, 5, 0.05, 0.01), "lines"))
p3
######
quartz(file=paste("/Users/ole.shelton/GitHub/Orca_Salmon/Output plots/Mixed Model/Chinook Density plot ",age.name,"+.pdf",sep=""),dpi=600,height=8,width=7,type="pdf")
Layout= matrix(c(4,4,4,4,1,1,1,5,5,5,5,2,2,2,6,6,6,6,3,3,3),nrow=3,ncol=7,byrow=T)
QQ <- list(p1,p2,p3,q1,q2,q3)
multiplot(plotlist=QQ ,layout= Layout)
dev.off()
|
4ce12d5d341edd7bf76187cefc6f09da8d841931
|
bb72c39fdddaec1fe09483c61877980d86e49a00
|
/code/summary_functions.R
|
bd6b79227c28bd7a38660bb75cac69738c200dd7
|
[] |
no_license
|
adorph/fireandfrag_reptile_msom_example
|
acd717764631183ba419c6eb19bf08fc0e55685a
|
7407ee1854e14e59fd6c5819bb09a0a8a95c48a2
|
refs/heads/master
| 2022-12-24T15:02:32.988569
| 2020-09-22T04:03:49
| 2020-09-22T04:03:49
| 297,508,822
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,517
|
r
|
summary_functions.R
|
##
## This file contains summary functions used to evaluate model fit and how species respond to environmental
## correlates. Parts of these functions are based on code from Kéry M & Royle JA (2016) (Applied Hierarchical
## Modeling in Ecology: Analysis of distribution, abundance and species richness in R and BUGS: Volume 1).
##
## Inputs mean:
## data: data list compiled using the dataprep function
## model: jags model
## mcmc: jags.basic model
## extract: parameter value to extract from the MSOM model (e.g. lpsi)
##
## NOTE: The some functions require a csv containing a list of your sites (using site id) and the x
## and y coordinates for the sites. These need to be in the same order as they were in the dataprep matrix.
## NOTE: Some of the functions (e.g. cumulativeDet) will need to be updated for other datasets.
#Extract rows from results based on parameter name
extract.par <- function(model, extract){
ndf <- setDT(as.data.frame(model$summary), keep.rownames = "parameter")
ndf <- dplyr:: filter(ndf, grepl(extract, parameter))
return(ndf)
}
#Extract species richness with site Id
extract.nsite <- function(model){
require(dplyr)
df <- read.csv("./site_coords.csv", header = T, sep = ",", dec = ".")
df <- (df %>%
arrange(site) %>%
select(site=site, xcoord, ycoord))
nsite <- extract.par(model, "Nsite")
out <- cbind(df, nsite)
return(out)
}
#Check correlation between observed and mean estimated species richness
checkCor <- function(data, model){
ndf<-extract.par(model, "Nsite")
ndf$C <- data$C #Observed species richness from the dataprep list
ret <- cor(ndf$mean, ndf$C)
return(ret)
}
#Calculate and plot cumulative detection for the species over 50 days. Returns a plot and table containing the species
#and detection probabilities after a single day and after the 5 day sample period.
cumulativeDet <- function(data, model){
require(ggplot2)
require(dplyr)
# This function calculates probability detected when present and associated 95% CRIs for 1 to 50 days
ddp50 <- function (x) {
day <- data.frame (day = seq(1, 50))
return (mutate (day, mean = 1-(1-x$est)^day, low = 1-(1-x$low)^day, upp = 1-(1-x$upp)^day))
}
spec <- data$species.list
ndf <- data.frame()
for(i in 1:length(spec)){
det <- extract.par(model, "^lp\\[")[i,2] #Extract mean
luci <- cbind(extract.par(model, "^lp\\[")[i,4], extract.par(model, "^lp\\[")[i,8]) #Extract 95% CRIs
ddp <- data.frame(cbind (est = plogis (det), ci = plogis (luci)))
names (ddp) = c("est", "low", "upp")
values <- ddp50(ddp)
values$species <- spec[i]
ndf <- rbind(ndf, values)
rm(det, luci, ddp, values)
}
nddp <- ndf %>% filter(day %in% 5)
mns <- format(round(nddp$mean, 2), nsmall=2)
ndf <- ndf %>% mutate(species, lab=recode(species,
"Eastern Three Lined Skink" = paste("ETS : mu(5) =", mns[1]),
"Pale flecked Garden Skink" = paste("PGS : mu(5) =", mns[2]),
"Robust Ctenotus" = paste("CTE : mu(5) =", mns[3]),
"Shrubland Morethia Skink" = paste("SMS : mu(5) =", mns[4]),
"South eastern Slider" = paste("SES : mu(5) =", mns[5]),
"Southern Grass Skink" = paste("SGS : mu(5) =", mns[6]),
"Southern Water Skink" = paste("SWS : mu(5) =", mns[7]),
"White's Skink" = paste("WHI : mu(5) =", mns[8]) ))
# Plot a graph of cumulative detection probability over 50 days with the mean detection probability after 5 days marked by
# a dotted line with the value printed above the graph.
aa <- ggplot(data=ndf, aes(x=day, y=mean, group=species))+
geom_line(size=1)+
geom_vline(xintercept=c(5), linetype="dotted")+ #change 20 to the deployment time for your cameras
geom_ribbon(aes(x=day, ymin=low, ymax=upp), color="NA", alpha=0.4, fill="grey")+
labs(x="Days", y="Probability detected when present")+
facet_wrap(.~ lab, scales="free_x", ncol=3, labeller=label_wrap_gen()) +
theme_classic()+
theme(axis.title=element_text(size=12))+
theme(axis.text=element_text(size=10, colour="black"))+
scale_x_continuous(limits=c(0,50), breaks=seq(0,50, 5), expand = c(0,0))+
scale_y_continuous(limits=c(0,1), breaks=seq(0, 1, .2), expand = c(0,0))
oddp <- ndf %>% filter(day %in% 1)
sumdp <- cbind(oddp$species, dailyMean=oddp$mean, dailyLow=oddp$low, dailyHigh=oddp$upp,
cumMean = nddp$mean, cumLow=nddp$low, cumHigh=nddp$upp)
return(list(detPlot=aa, detDays=sumdp))
}
#See baseline estimates of species-specific occupancy and detection
occ.det.sum <- function(data, model){
#See baseline estimates of species-specific occupancy and detection
occ <- model$sims.list$lpsi
det <- model$sims.list$lp
nspec <- data$nspec
#This includes occupancy and detection estimates for all observed species
psi.occ <- plogis(occ[,1:nspec])
p.det <- plogis(det[,1:nspec])
(occ.matrix <- cbind(apply(psi.occ,2,mean),apply(psi.occ,2,sd), data$spec.name.list))
(det.matrix <- cbind(apply(p.det,2,mean),apply(p.det,2,sd), data$spec.name.list))
return(list(occ.matrix, det.matrix))
}
#Create a list of estimated true occurrence for species at a site, with X and Y coords for that site for visualisation
spp.site <- function(data, model) {
df <- read.csv("./site_coords.csv", header = T, sep = ",", dec = ".")
df <- df[order(df$site),]
df <- (df %>% select(site=site, xcoord, ycoord))[1:107,]
spdf <- as.data.frame(model$mean$z)
colnames(spdf) <- data$species.list
spdf <- cbind(df, spdf)
spsd <- as.data.frame(model$sd$z)
colnames(spsd) <- data$species.list
spsd <- cbind(df, spsd)
sp50 <- as.data.frame(model$q50$z)
colnames(sp50) <- data$species.list
sp50 <- cbind(df, sp50)
spdf <- spdf %>% pivot_longer(cols=4:ncol(spdf), names_to="Species", values_to = "mean")
spsd <- spsd %>% pivot_longer(cols=4:ncol(spsd), names_to="Species", values_to = "sd")
sp50 <- sp50 %>% pivot_longer(cols=4:ncol(sp50), names_to="Species", values_to = "q50")
out <- merge(spdf, spsd, by=c("site", "xcoord", "ycoord", "Species"))
out <- merge(out, sp50, by=c("site", "xcoord", "ycoord", "Species"))
return(out)
}
#Plot observed number of species against estimated number of species
plotobs.est <- function(data, model){
ndf<-extract.par(model, "Nsite")
ndf$C <- data$C
g <- ggplot(ndf) +
geom_abline(aes(intercept=0, slope=1)) +
geom_errorbar(aes(ymin=mean-sd, ymax=mean+sd, x=C), width=0, color="grey") +
geom_point(aes(x=C, y=mean)) +
coord_cartesian(xlim=c(0,8), ylim=c(0,8)) +
labs(x="Observed Number of Species", y = "Estimated Number of Species") +
theme_bw() +
theme(panel.grid=element_blank(),
axis.text = element_text(size=9),
axis.title = element_text(size=10))
return(g)
}
#Plot estimated (Nsite) against observed species richness
plotobs.sites <- function(data, model){
ndf <- extract.par(model, "Nocc.fs")
plot(data$obs.occ, ndf[, 2], xlab="Observed number of occupied sites", ylab = "Estimated version of quantity", ylim=c(0,max(ndf[,2])), frame=F, pch=16)
abline(0,1)
segments(data$obs.occ, ndf[,4], data$obs.occ, ndf[, 8], col="grey")
}
# Community distribution of average occupancy and detection probability
plotcomm.dist <- function(model){
#Average species detection probability is 0.100 and average species occurrence is 0.35
par(mfrow = c(1,2)) # Fig. 11-16
psi.sample <- plogis(rnorm(10^6, mean = model$mean$mu.lpsi, sd = model$mean$sd.lpsi))
p.sample <- plogis(rnorm(10^6, mean = model$mean$mu.lp, sd = model$mean$sd.lp))
hist(psi.sample, freq = F, breaks = 50, col = "grey", xlab = "Species occupancy probability", ylab = "Density", main = "")
abline(v=mean(psi.sample), col="red", lwd=2)
hist(p.sample, freq = F, breaks = 50, col = "grey", xlab = "Species detection probability", ylab = "Density", main = "")
abline(v=mean(p.sample), col="red", lwd=2)
}
#Plot occupancy vs detection estimate
plotocc.det <- function(data, model){
occ.det <- occ.det.sum(data, model)
occ <- setDT(as.data.frame(occ.det[[1]]), keep.rownames = "Species")
colnames(occ) <- c("Species", "Mean.Occupancy", "SD.Occupancy", "id")
det <- setDT(as.data.frame(occ.det[[2]]), keep.rownames = "Species")
colnames(det) <- c("Species", "Mean.Detection", "SD.Detection", "id")
occdet <- merge(occ, det, by=c("Species", "id"))
g <- ggplot(occdet) +
geom_errorbar(aes(ymin=(Mean.Detection-SD.Detection), ymax=(Mean.Detection+SD.Detection), x=Mean.Occupancy), width=0.0, color="grey") +
geom_errorbarh(aes(xmin=Mean.Occupancy-SD.Occupancy, xmax=Mean.Occupancy+SD.Occupancy, y=Mean.Detection), height=0, color="grey") +
geom_point(aes(x=Mean.Occupancy, y=Mean.Detection)) +
theme_bw() +
labs(y="Detection Estimate", x="Occupancy Estimate") +
coord_cartesian(xlim=c(0,1), ylim=c(0,1)) +
theme(panel.grid=element_blank(),
axis.text=element_text(size=9),
axis.title=element_text(size=10))
return(g)
}
# Visualize covariate mean relationships for the average species
plotcovar.relation <- function(data, model){
o.temp <- seq(13, 40,, 500)
o.date <- seq(1, 146,, 500)
temp.pred <- as.matrix((o.temp - mean(data$temp)) / sd(data$temp))
date.pred <- as.matrix((o.date - mean(data$date)) / sd(data$date))
# Predict detection for temp and rain
# Put all predictions into a single array
tmp <- model$sims.list # grab MCMC samples
nsamp <- length(tmp$mu.lp) # number of mcmc samples
predC <- array(NA, dim = c(500, nsamp, 2)) # "C" for 'community mean'
for(i in 1:nsamp){
predC[,i,1] <- plogis(tmp$mu.lp[i] + tmp$mu.betalp1[i] * temp.pred + tmp$mu.betalp2[i] * (temp.pred^2))
predC[,i,2] <- plogis(tmp$mu.lp[i] + tmp$mu.betalp3[i] * date.pred + tmp$mu.betalp4[i] * (date.pred^2))
}
# Get posterior means and 95% CRIs and plot
pmC <- apply(predC, c(1,3), mean)
criC <- apply(predC, c(1,3), function(x) quantile(x, prob = c(0.025, 0.975), na.rm=T))
par(mfrow=c(1,2), mar=c(4,4,1,1))
plot(o.temp, pmC[,1], ylim=c(0,1), col = "blue", lwd = 3, type = 'l', lty = 1, frame = F, xlab = "Temp", ylab = "Community mean detection")
matlines(o.temp, t(criC[,,1]), col = "grey", lty = 1)
plot(o.date, pmC[,2], ylim=c(0,1), col = "blue", lwd = 3, type = 'l', lty = 1, frame = F, xlab = "Date", ylab = "Community mean detection")
matlines(o.date, t(criC[,,2]), col = "grey", lty = 1)
}
#Plot regression coefficients
plotreg.coef <- function(data, model, mcmc){
all20 <- as.matrix(mcmc)
str(all20) # look at the MCMC output
pm <- apply(all20, 2, mean) # Get posterior means and 95% CRIs
pm1 <- setDT(as.data.frame(pm), keep.rownames = "parameter")
cri <- apply(all20, 2, function(x) quantile(x, prob = c(0.025, 0.975))) # CRIs
cri1 <- as.data.frame(cri)
setDT(cri1, keep.rownames = "CRI")
plotcoef <- function(data, mu.betalp, specno, mytitle, ylab){
g <- ggplot(data=data) +
geom_errorbarh(aes(xmin=`2.5%`, xmax=`97.5%`, y=id, color=sig1), size=1, height=0.01) +
geom_point(mapping=aes(x=pm, y=id)) +
geom_vline(aes(xintercept=0), color="black") +
geom_vline(aes(xintercept=mu.betalp[,2]), color="red") +
geom_vline(aes(xintercept=mu.betalp[,4]), linetype="dashed", color="red") +
geom_vline(aes(xintercept=mu.betalp[,8]), linetype="dashed", color="red") +
scale_color_manual(values=c("black", "blue")) +
scale_y_continuous(breaks=1:specno, labels=tdet$spec, trans = "reverse") +
coord_cartesian(xlim=c(-2.5,2.5)) +
labs(title=mytitle) +
theme_bw() +
theme(legend.position = "none",
panel.grid = element_blank(),
axis.title = element_blank(),
plot.title = element_text(size=10),
axis.text = element_text(size=9))
if(ylab==F){
g <- g +
theme(axis.text.y = element_blank())
}
return(g)
}
# Temp linear (Fig. 11 � 20 left)
tlpm <- filter(pm1, grepl("betalp1", parameter))
tlpm$id <- 1:data$nspec
tlpm$spec <- data$species.list
tlcri <- cri1 %>%
select(CRI, contains("betalp1")) %>%
pivot_longer(cols=-CRI, names_to="parameter", values_to="values") %>%
pivot_wider(names_from="CRI", values_from = "values")
tdet <- merge(tlpm, tlcri, by=c("parameter"))
tdet$sig1 <- (tdet[,5] * tdet[,6]) > 0
mu.betalp1 <- extract.par(model, "mu.betalp1")
#Effects of temp (quadratic) on detection
tqpm <- filter(pm1, grepl("betalp2", parameter))
tqpm$id <- 1:data$nspec
tqpm$spec <- data$species.list
tqcri <- cri1 %>%
select(CRI, contains("betalp2")) %>%
pivot_longer(cols=-CRI, names_to="parameter", values_to="values") %>%
pivot_wider(names_from="CRI", values_from = "values")
tqdet <- merge(tqpm, tqcri, by=c("parameter"))
tqdet$sig1 <- (tqdet[,5] * tqdet[,6]) > 0
mu.betalp2 <- extract.par(model, "mu.betalp2")
# Effects of date (linear) on detection
dlpm <- filter(pm1, grepl("betalp3", parameter))
dlpm$id <- 1:data$nspec
dlpm$spec <- data$species.list
dlcri <- cri1 %>%
select(CRI, contains("betalp3")) %>%
pivot_longer(cols=-CRI, names_to="parameter", values_to="values") %>%
pivot_wider(names_from="CRI", values_from = "values")
dldet <- merge(dlpm, dlcri, by=c("parameter"))
dldet$sig1 <- (dldet[,5] * dldet[,6]) > 0
mu.betalp3 <- extract.par(model, "mu.betalp3")
#Effects of date (quadratic) on detection
dqpm <- filter(pm1, grepl("betalp4", parameter))
dqpm$id <- 1:data$nspec
dqpm$spec <- data$species.list
dqcri <- cri1 %>%
select(CRI, contains("betalp4")) %>%
pivot_longer(cols=-CRI, names_to="parameter", values_to="values") %>%
pivot_wider(names_from="CRI", values_from = "values")
dqdet <- merge(dqpm, dqcri, by=c("parameter"))
dqdet$sig1 <- (dqdet[,5] * dqdet[,6]) > 0
mu.betalp4 <- extract.par(model, "mu.betalp4")
p1 <- plotcoef(tdet, mu.betalp1, data$nspec, "Temperature (linear)", ylab=T)
p2 <- plotcoef(tqdet, mu.betalp2, data$nspec, "Temperature (quadratic)", ylab=F)
p3 <- plotcoef(dldet, mu.betalp3, data$nspec, "Date (linear)", ylab=T)
p4 <- plotcoef(dqdet, mu.betalp4, data$nspec, "Date (quadratic)", ylab=F)
pg <- plot_grid(p3, p4, p1, p2, rel_widths = c(1.6,1))
xlab <- textGrob("Parameter Estimates", gp=gpar(fontsize=10))
ga <- grid.arrange(arrangeGrob(pg, bottom=xlab))
return(ga)
}
# Predict detection for temperature and rainfall for each of the observed species
plotdet.covar <- function(data, model, mcmc){
require(ggplot2)
require(tidyr)
o.temp <- seq(13, 40,, 500)
o.date <- seq(1, 146,, 500)
temp.pred <- as.matrix((o.temp - mean(data$temp)) / sd(data$temp))
date.pred <- as.matrix((o.date - mean(data$date)) / sd(data$date))
all20 <- as.matrix(mcmc)
str(all20) # look at the MCMC output
pm <- apply(all20, 2, mean) # Get posterior means and 95% CRIs
pm1 <- setDT(as.data.frame(pm), keep.rownames = "parameter")
cri <- apply(all20, 2, function(x) quantile(x, prob = c(0.025, 0.975))) # CRIs
cri1 <- as.data.frame(cri)
cri2 <- cri1 %>% pivot_longer(cols=2:length(cri1), names_to = "parameter", values_to = "cri")
# Effects of temp (linear) on detection
par(mfrow = c(2,2), cex.lab = 0.7, cex.axis = 0.7)
#lp
lppm <- filter(pm1, grepl("^lp\\[", parameter))
# Temp linear
tlpm <- filter(pm1, grepl("betalp1", parameter))
# Temp quadratic
tqpm <- filter(pm1, grepl("betalp2", parameter))
#Date linear
dlpm <- filter(pm1, grepl("betalp3", parameter))
# Date (quadratic) on detection
dqpm <- filter(pm1, grepl("betalp4", parameter))
predS <- array(NA, dim = c(500, data$nspec, 3)) # covariate value x species x response, "S" for 'species'
p.coef <- cbind(lp=lppm[,2], betalp1 =tlpm[,2], betalp2 = tqpm[,2], betalp3 = dlpm[,2], betalp4 = dqpm[,2])
for(i in 1:data$nspec){ # Loop over 16 observed species
predS[,i,1] <- plogis(p.coef[i,1] + p.coef[i,2] * temp.pred + p.coef[i,3] * temp.pred^2) # p ~ date
predS[,i,2] <- plogis(p.coef[i,1] + p.coef[i,4] * date.pred + p.coef[i,5] * date.pred^2) # p ~ duration
}
# Plots for detection probability and temperature and rainfall
dtemp <- as.data.frame(cbind(o.temp, predS[,,1]))
cnames <- names(data$spec.name.list)
colnames(dtemp) <- c("Values", cnames)
dtemp$Variable <- c("Temperature")
dtemp <- dtemp %>%
select("Values", "Variable", names(data$spec.name.list)) %>%
pivot_longer(cols = -c("Values", "Variable"), names_to = "Species", values_to = "Detection Probability")
ddate <- as.data.frame(cbind(o.date, predS[,,2]))
colnames(ddate) <- c("Values", cnames)
ddate$Variable <- c("Date")
ddate <- ddate %>%
select("Values", "Variable", names(data$spec.name.list)) %>%
pivot_longer(cols = -c("Values", "Variable"), names_to = "Species", values_to = "Detection Probability")
ndat <- rbind(dtemp, ddate)
ggplot(data=ndat, group=Variable) +
geom_line(aes(x=Values, y=`Detection Probability`, color=Species), lwd=1) +
facet_wrap(. ~ Variable, scales="free_x", ncol=2, strip.position = "bottom") +
ylim(0,1) +
theme_bw() +
theme(strip.text = element_text(size = 9, margin = margin(0.0,0,0.0,0, "cm")),
strip.background = element_rect(fill="white", color=NA),
strip.placement = "outside",
axis.text = element_text(size=9),
axis.title.x=element_blank(),
axis.title.y=element_text(size=10),
legend.text = element_text(size=9),
legend.title = element_text(size=9),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.margin = unit(c(0.5,0.05,0,0), "cm"),
panel.spacing.x = unit(5, "mm"))
}
|
a092947224986120fa5cafa12f34cda4d04d90fa
|
d7f68113ba841857d68f2ac452bcda91fe373cf0
|
/Insight/Scripts/Imaging Data Import and Cleaning.R
|
27820d0a48c4b0b272be654ee339bdfedd8b23e0
|
[] |
no_license
|
ramyead/Insight-Project-kNOw-Care
|
bc731398d3a49ac803af08d70ca58beb9a38d09d
|
654c0203bc51036f671a14c17e259c8fa4f17b47
|
refs/heads/master
| 2021-01-17T11:58:33.172780
| 2017-06-26T00:45:01
| 2017-06-26T00:45:01
| 95,390,630
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 609
|
r
|
Imaging Data Import and Cleaning.R
|
pacman::p_load(stringr, ggplot2, car, effects, lme4, lmerTest, dplyr, reshape2, tidyr, sjPlot, nlme)
imaging = read.csv("Data/Hospital_Revised_Flatfiles/Outpatient Imaging Efficiency - Hospital.csv")
imaging.n = distinct(imaging, Measure.ID, Measure.Name) %>% as.data.frame()
imaging2 = select(imaging, Hospital.Name, State, Measure.ID , Score) %>% mutate(Score = as.numeric(as.character(Score)))
imaging2$row <- 1:nrow(imaging2)
imaging2 = spread(imaging2, Measure.ID , Score, fill = F) %>% select(-row)
imaging.final = group_by(imaging2, Hospital.Name, State) %>%
summarise_all(sum)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.