blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
adb27ed5301483781f3d9b0a1b177ff5a796384a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/archivist/examples/shinySearchInRepo.Rd.R | bc3dc27a855fcdab5ac16cc5442f4e384469f9b2 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 351 | r | shinySearchInRepo.Rd.R | library(archivist)
### Name: shinySearchInLocalRepo
### Title: Shiny Based Live Search for an Artifact in a Repository Using
### Tags
### Aliases: shinySearchInLocalRepo
### ** Examples
## Not run:
##D # assuming that there is a 'repo' dir with a valid archivist repository
##D shinySearchInLocalRepo( repoDir = 'repo' )
## End(Not run)
|
ada64e4c5a044057c8f4f74b0cf3d8a63a3b4cc2 | 74f9fb10d3d6e2cc88ca5398d2e5d361a627d4ec | /R/TreatIncorrect.r | 502e11bcf1d8cc0dbccdd6217c9e586c913e4524 | [] | no_license | cran/TestDataImputation | cadb5ea8ebf2c704a36429c30f2c69d1d90dc495 | fcdba13b0b05d60612e7384fc306e1bdb2cccc32 | refs/heads/master | 2021-10-20T09:51:41.121988 | 2021-10-18T17:10:11 | 2021-10-18T17:10:11 | 65,485,437 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,830 | r | TreatIncorrect.r | #' Treat missing responses as incorrect (IN)
#' @description This function replaces all missing responses by zero (see Lord, 1974 <doi: 10.1111/j.1745-3984.1974.tb00996.x>;
#' Mislevy & Wu, 1996 <doi: 10.1002/j.2333-8504.1996.tb01708.x>; Pohl et al., 2014 <doi: 10.1177/0013164413504926>);).
#' @param test.data Test data set (a data frame or a matrix) containing missing responses.
#' Missing values are coded as NA or other values (e.g., 8, 9).
#' @param Mvalue Missing response indicators in the data (e.g. "NA", "8", "9", etc.). Mvalue="NA" by default.
#' @return A data frame with all missing responses replaced by '0'.
#' @import stats
#' @examples
#' TreatIncorrect(test.data, Mvalue="NA")
#' @export
#' @references {
#' Lord, F. M. (1974).
#' " Quick estimates of the relative efficiency of two tests as a function of ability level."
#' Journal of Educational Measurement, 11(4), 247-254. doi: 10.1111/j.1745-3984.1974.tb00996.x.
#' }
#' @references {
#' Mislevy, R. J., & Wu, P. K. (1996).
#' " Missing responses and IRT ability estimation: Omits, choice, time limits, and adaptive testing. "
#' ETS Research Report Series, 1996(2), i-36. doi: 10.1002/j.2333-8504.1996.tb01708.x.
#' }
#' @references {
#' Pohl, S., Gräfe, L., & Rose, N. (2014).
#' "Dealing with omitted and not-reached items in competence tests evaluating approaches accounting for missing responses in item response theory models. "
#' Educational and Psychological Measurement, 74(3), 423-452. doi: 10.1177/0013164413504926.
#' }
TreatIncorrect<-function (test.data, Mvalue="NA") {
if (Mvalue == "NA") {
test.data[is.na(test.data)] <- 0
} else {test.data[test.data==Mvalue]<-NA
test.data[is.na(test.data)] <- 0}
test.data<-as.data.frame(test.data)
return(test.data)
}
|
b2b91a011533d6b3d0d6dff7f71db47b7e6b309f | a16e182224fd3e112f4701acd9874a89b4425361 | /plot4.R | c674e3e5bc6aa60d617e363d88c130a8b532a1ea | [] | no_license | coursera2016/ExData_Plotting1 | 5d46fec577fbd21d980931383c44f53a612fc5e2 | 57c1048e2aa401ea99e875d7655288546fc1d3ec | refs/heads/master | 2021-01-17T06:56:08.111244 | 2015-04-12T16:37:00 | 2015-04-12T16:37:00 | 33,817,692 | 0 | 0 | null | 2015-04-12T13:22:28 | 2015-04-12T13:22:28 | null | UTF-8 | R | false | false | 887 | r | plot4.R | dat <- read.csv2("household_power_consumption.txt", na.string="?", dec=".")
dat <- dat[dat$Date %in% c("1/2/2007", "2/2/2007"),]
datetime <- strptime(paste(dat$Date, dat$Time), "%d/%m/%Y %H:%M:%S")
dat <- cbind(dat, datetime)
png("plot4.png", width=480, height=480)
par(mfcol=c(2,2))
plot(dat$datetime, dat$Global_active_power, type="l", xlab="", ylab="Global Active Power")
plot(dat$datetime, dat$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(dat$datetime, dat$Sub_metering_2, type="l", col=2)
lines(dat$datetime, dat$Sub_metering_3, type="l", col=4)
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c(1,2,4),
lty=1, bty="n")
plot(dat$datetime, dat$Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(dat$datetime, dat$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off() |
62817f02149e45744533c278e69f6c0cf70c0cb9 | f3e914e8a3ccb1c4d73555321e3eaf52b59f52e0 | /R/3.4-practice.R | 615541a982de3ee8348bf360fb77683c6b90d6db | [] | no_license | youjia36313/learn_R | 08be35ebc032839e8c25466c63ae5a0292069855 | 674de3d09e0e7dfec2d3e164ffab98e0c40ca597 | refs/heads/master | 2020-09-15T19:39:00.136679 | 2019-11-23T06:37:41 | 2019-11-23T06:37:41 | 223,541,846 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | 3.4-practice.R | df<-read.csv("practice3-4-1.csv")
table(df$Answer)
df1<-read.csv("practice3-4-2.csv")
df1
x<-df1$Export
y<-df1$Import
cor(x,y)
cor(y,x) |
bc1cf1231810a6d01693a622f537de5cc0ce9b86 | 442f9770e53101c4461c9031dfd69d3dfa69a757 | /man/newBlockInformation.Rd | b5a238aebb0ea38fb15e864961e8ef0c9c16ab91 | [] | no_license | cran/WGCNA | edaf87638c6cf0c9105dbb67637ebe059f598cb1 | 31f538c2f9d7d48f35f7098b4effe17331357d0d | refs/heads/master | 2023-01-25T02:34:33.041279 | 2023-01-18T11:10:05 | 2023-01-18T11:10:05 | 17,694,095 | 48 | 54 | null | 2019-08-17T13:25:00 | 2014-03-13T03:47:40 | R | UTF-8 | R | false | false | 1,447 | rd | newBlockInformation.Rd | \name{newBlockInformation}
\alias{newBlockInformation}
\alias{BlockInformation}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Create a list holding information about dividing data into blocks
}
\description{
This function creates a list storing information about dividing data into blocks, as well as about possibly
excluding genes or samples with excessive numbers of missing data.
}
\usage{
newBlockInformation(blocks, goodSamplesAndGenes)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{blocks}{
A vector giving block labels. It is assumed to be a numeric vector with block labels consecutive integers
starting at 1.
}
\item{goodSamplesAndGenes}{
A list returned by \code{\link{goodSamplesGenes}} or \code{\link{goodSamplesGenesMS}}.
}
}
\value{
A list with \code{class} attribute set to \code{BlockInformation}, with the following componens:
\item{blocks}{A copy of the input \code{blocks}.}
\item{blockGenes}{A list with one component per block, giving the indices of elements in \code{block} whose
value is the same.}
\item{goodSamplesAndGenes}{A copy of input \code{goodSamplesAndGenes}.}
\item{nGGenes}{Number of `good' genes in \code{goodSamplesAndGenes}.}
\item{gBlocks}{The input \code{blocks} restricted to `good' genes in \code{goodSamplesAndGenes}.}
}
\author{
Peter Langfelder
}
\seealso{
\code{\link{goodSamplesGenes}}, \code{\link{goodSamplesGenesMS}}.
}
\keyword{misc}
|
ae648a3177cdb5951b56334ed0262323802f59a1 | 934b913fd45c0f26ba88afcf356a8324cd7bde69 | /analysis/02_explore_prey_categories_harbour.R | 19a40272676c88b6af8d1f842f57dea79cfaa198 | [] | no_license | adbpatagonia/SealDietAnalysis | 07ac508d9c0b16dd707a4fc2052970be58a544a8 | f7126aaf23ee03d70ed2fade58d32969c5ddec71 | refs/heads/master | 2022-01-28T09:39:15.416591 | 2019-07-18T13:49:22 | 2019-07-18T13:49:22 | 116,158,718 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,057 | r | 02_explore_prey_categories_harbour.R | ## ADB July 25, 2017.
## The intent of this script is to
## explore which prey species to include in prey categories
## load libraries ----
library(dplyr)
## read data ----
load('interimsteps/diet.rdata')
preysp <- read.csv('data/tlkp_preysp.csv',header = T)
## subsetting parameters ----
# mammalsp <- c(1,2,5,6)
# nafodiet <- c('2H','2J','3K')
# dietby <- c('mmspcode','year','nafo')
## define prey categories ----
preysp <- preysp[order(preysp$preycode, decreasing = T),]
preysp$preycat <- NA
preysp$preycat <- ifelse(between(preysp$preycode, 9971, 9999) | preysp$prey == 'Reserved', 'donotuse', preysp$preycat)
preysp$preycat <- ifelse(preysp$prey != 'Reserved' & between(preysp$preycode, 9600, 9697), 'bird', preysp$preycat)
#preysp[grepl("Pandalus", preysp$prey.scientific.name),'preycat'] <- 'shrimp pandalus'
#preysp[grepl("Shrimp", preysp$prey) & !grepl("Pandalus", preysp$prey.scientific.name), 'preycat'] <- 'shrimp non-pandalid'
#preysp[grepl("shrimp", preysp$preycat),'preycat'] <- 'shrimp'
preysp$preycat <- ifelse(is.na(preysp$preycat) & between(preysp$preycode, 1100, 9000) , 'invertebrate', preysp$preycat)
preysp$preycat <- ifelse(is.na(preysp$preycat) & between(preysp$preycode, 9801, 9835) , 'invertebrate', preysp$preycat)
#preysp$preycat <- ifelse(between(preysp$preycode, 7925, 7989) | preysp$preycode == 7951, 'mysid', preysp$preycat)
preysp$preycat <- ifelse(between(preysp$preycode, 7991, 8017) , 'euphasiid', preysp$preycat)
#preysp[grepl("Hyperiid", preysp$prey), 'preycat'] <- 'hyperiid amphipod'
#preysp$preycat <- ifelse(between(preysp$preycode, 7023, 7739) , 'gammarid amphipod', preysp$preycat)
preysp[grepl("Crab", preysp$prey), 'preycat'] <- 'crab'
preysp$preycat <- ifelse(is.na(preysp$preycat) & between(preysp$preycode, 9851, 9852) , 'invertebrate', preysp$preycat)
#preysp[grepl("Gad", preysp$prey) | grepl("Cod", preysp$prey) & preysp$preycode != 451 & preysp$preycode != 438, 'preycat'] <- 'gadoid'
preysp[grepl("Squid", preysp$prey), 'preycat'] <- 'squid'
preysp$preycat <- ifelse(between(preysp$preycode, 1000, 1099) , 'marine mammal', preysp$preycat)
preysp[which(preysp$preycode < 1000),'preycat'] <- 'fish'
preysp[which(preysp$preycode == 187),'preycat'] <- 'capelin'
preysp[which(preysp$preycode == 451),'preycat'] <- 'arctic cod'
preysp[which(preysp$preycode == 438),'preycat'] <- 'atlantic cod'
preysp[which(preysp$preycode == 439),'preycat'] <- 'greenland cod'
#preysp[which(preysp$preycode == 892),'preycat'] <- 'greenland halibut'
preysp$preycat <- ifelse(between(preysp$preycode, 149, 150) , 'atlantic herring', preysp$preycat)
#preysp$preycat <- ifelse(between(preysp$preycode, 17, 122) , 'chondrichthyes', preysp$preycat)
preysp[grepl("Ammodyt", preysp$prey.scientific.name), 'preycat'] <- 'sandlance'
preysp[grepl("Sebast", preysp$prey.scientific.name), 'preycat'] <- 'redfish'
preysp$preycat <- ifelse(between(preysp$preycode, 808, 832) , 'sculpin', preysp$preycat)
preysp$preycat <- ifelse(between(preysp$preycode, 887, 909) , 'flatfish', preysp$preycat)
#preysp$preycat <- ifelse(between(preysp$preycode, 882, 909) & preysp$preycode != 892, 'flatfish', preysp$preycat)
preycat <- data.frame(preycat = unique(preysp$preycat), stringsAsFactors = F)
cats <- c(
"arctic cod",
"atlantic cod",
"capelin",
"atlantic herring",
"redfish",
"greenland cod",
"sculpin",
"flatfish",
"sandlance",
"fish",
# "squid",
# "shrimp pandalus",
# "shrimp non-pandalid",
# "shrimp",
# "crab",
# "mysid",
# "hyperiid amphipod",
"squid",
"crab",
"euphasiid",
"invertebrate"
)
preycat <- preycat %>%
slice(match(cats, preycat))
preycat$order <- nrow(preycat):1
## add prey categories to diet data and eliminate non-prey items ----
diet <- merge(diet, preysp[,c('preycode','preycat')], by = 'preycode')
exclude <- c('donotuse', 'marine mammal', 'bird')
diet <- (diet[which(!diet$preycat %in% exclude),])
save(diet, preycat, file = 'interimsteps/diet_categories_harbour.rdata')
|
f824c7e294286dda938b68cdf732957ac6359cf3 | eab78e955aaee69c14d206c8e5bd76cf246a2372 | /R/parse.R | 50ea98964c81bd6ff91ee922e3cc7106e6a1f0fb | [
"MIT"
] | permissive | rstudio/connectapi | 3ad96af4e7935035658bf09aa8082cae4c68ffb5 | 427ac1fe2eb72d45f6048c376ec94b6c545faf8d | refs/heads/main | 2023-08-08T11:55:20.698746 | 2023-07-10T16:36:27 | 2023-07-10T16:36:27 | 167,249,814 | 34 | 17 | NOASSERTION | 2023-08-03T13:26:37 | 2019-01-23T20:32:57 | R | UTF-8 | R | false | false | 4,744 | r | parse.R | # because format(NULL, "%Y-%m") == "NULL"
safe_format <- function(expr, ...) {
if (is.null(expr)) {
return(NULL)
} else {
return(format(expr, ...))
}
}
datetime_to_rfc3339 <- function(input) {
tmp <- format(input, format = "%Y-%m-%dT%H:%M:%OS5%z")
ln <- nchar(tmp)
paste0(substr(tmp, 0, ln - 2), ":", substr(tmp, ln - 1, ln))
}
make_timestamp <- function(input) {
if (is.character(input)) {
# TODO: make sure this is the right timestamp format
return(input)
}
safe_format(input, "%Y-%m-%dT%H:%M:%SZ")
}
swap_timestamp_format <- function(.col) {
if (is.character(.col)) {
gsub("([0-9]{4}-[0-9]{2}-[0-9]{2})T([0-9]{2}:[0-9]{2}:[0-9]{2}\\.*[0-9]*Z)", "\\1 \\2", .col)
} else {
.col
}
}
ensure_columns <- function(.data, ...) {
defaults <- rlang::list2(...)
names <- names(defaults)
for (i in seq_along(defaults)) {
.data <- ensure_column(.data, defaults[[i]], names[[i]])
}
.data
}
ensure_column <- function(data, default, name) {
stopifnot(length(default) == 1)
col <- data[[name]]
scoped_experimental_silence()
if (rlang::is_null(col)) {
col <- vctrs::vec_rep(default, nrow(data))
col <- vctrs::vec_cast(col, default)
} else {
col <- swap_timestamp_format(col)
if (vctrs::vec_is(default, NA_datetime_) && !vctrs::vec_is(col, NA_datetime_)) {
# manual fix because vctrs::vec_cast cannot cast double -> datetime or char -> datetime
col <- coerce_datetime(col, default, name = name)
}
if (inherits(default, "fs_bytes") && !inherits(col, "fs_bytes")) {
col <- coerce_fsbytes(col, default)
}
if (inherits(default, "integer64") && !inherits(col, "integer64")) {
col <- bit64::as.integer64(col)
}
col <- vctrs::vec_cast(col, default)
}
data[[name]] <- col
data
}
parse_connectapi_typed <- function(data, ...) {
ensure_columns(parse_connectapi(data), ...)
}
parse_connectapi <- function(data) {
tibble::as_tibble(purrr::map_df(
data,
function(x) {
purrr::map(
.x = x,
.f = function(y) {
if (is.list(y)) {
# empty list object gets null
prep <- purrr::pluck(y, .default = NULL)
} else {
# otherwise NA
prep <- purrr::pluck(y, .default = NA)
}
if (length(prep) > 1) {
prep <- list(prep)
}
return(prep)
}
)
}
))
}
#' @export
vec_cast.fs_bytes.integer <- function(x, to, ...) {
warn_experimental("vec_cast.fs_bytes")
fs::as_fs_bytes(x)
}
#' @export
vec_cast.fs_bytes.default <- function(x, to, ...) {
vctrs::vec_default_cast(x = x, to = to)
}
#' Cast to fs_bytes
#'
#' \lifecycle{deprecated}
#' This is a temporary placeholder because the functionality
#' does not exist yet in the `fs` package. Do not build dependencies
#' on `connectapi::vec-cast.fs_bytes`, as it will be removed without
#' warning in a future release.
#'
#' @param x Vectors to cast
#' @param to Type to cast to. If `NULL`, `x` will be returned as is
#' @param ... Dots for future extensions and should be empty
#'
#' @return A vector the same length as `x` with the same type as `to`, or an
#' error if the cast is not possible.
#'
#' @export
vec_cast.fs_bytes <- function(x, to, ...) {
warn_experimental("vec_cast.fs_bytes")
UseMethod("vec_cast.fs_bytes")
}
coerce_fsbytes <- function(x, to, ...) {
if (is.numeric(x)) {
fs::as_fs_bytes(x)
} else {
vctrs::stop_incompatible_cast(x = x, to = to, x_arg = "x", to_arg = "to")
}
}
# name - optional. Must be named, the name of the variable / column being converted
coerce_datetime <- function(x, to, ...) {
tmp_name <- rlang::dots_list(...)[["name"]]
if (is.null(tmp_name) || is.na(tmp_name) || !is.character(tmp_name)) {
tmp_name <- "x"
}
if (is.numeric(x)) {
vctrs::new_datetime(as.double(x), tzone = tzone(to))
} else if (is.character(x)) {
as.POSIXct(x, tz = tzone(to))
} else if (inherits(x, "POSIXct")) {
x
} else if (all(is.logical(x) & is.na(x)) && length(is.logical(x) & is.na(x)) > 0) {
NA_datetime_
} else {
vctrs::stop_incompatible_cast(x = x, to = to, x_arg = tmp_name, to_arg = "to")
}
}
vec_cast.POSIXct.double <- function(x, to, ...) {
warn_experimental("vec_cast.POSIXct.double")
vctrs::new_datetime(x, tzone = tzone(to))
}
vec_cast.POSIXct.character <- function(x, to, ...) {
as.POSIXct(x, tz = tzone(to))
}
tzone <- function(x) {
attr(x, "tzone")[[1]] %||% ""
}
new_datetime <- function(x = double(), tzone = "") {
tzone <- tzone %||% ""
if (is.integer(x)) {
x <- as.double(x)
}
stopifnot(is.double(x))
stopifnot(is.character(tzone))
structure(x, tzone = tzone, class = c("POSIXct", "POSIXt"))
}
|
6f36d0f53391645b82b6365c3d158d77801fbbf2 | e2b59ecade22df23781a7f6d2bd6bb559d4b01bc | /R/summary.R | 23549b4d4c0e5a4bbd6f5e45b6c0bcc0760a2320 | [
"MIT"
] | permissive | artedison/ensRadaptor | 2838d1740b5add8b1f348b14a1cc576e571bb789 | bf3911a29812120cfe3965148a9e21f98e350212 | refs/heads/master | 2021-08-20T08:01:24.766312 | 2020-12-07T01:09:18 | 2020-12-07T01:09:18 | 232,164,615 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,740 | r | summary.R | #' summary input files
#'
#' this funciton summary exist content in i01 and i02 file
#' output exist species and reaction
#' print imporant parameters
#' these information can be used for modify ens.def file
#'
#' @param i01 string. the locaiton of i01. must be provided
#' @param i02 string. the locaiton of i02. must be provided
#' @return list. list of information contains in input files
#' @export
#' @import stringr magrittr
summary_input<-function(i01=NULL,i02=NULL){
if(is.null(i01)||is.null(i02)){
stop("please provide path to both i01 and i02 files")
}
list.exi=vector(mode="list")
#i01
nexpt=change_para("nexpt",NA,infile=i01,outfile=NA,type="show")
ntxpt=change_para("ntxpt",NA,infile=i01,outfile=NA,type="show")
nspec=change_para("nspec",NA,infile=i01,outfile=NA,type="show")
nreac=change_para("nreac",NA,infile=i01,outfile=NA,type="show")
ntime=change_para("ntime",NA,infile=i01,outfile=NA,type="show")
lines=readLines(i01)
lenlines=length(lines)
lines %>% str_which(string=.,pattern="^\\s*namespec") %>%
add(.,1) %>%
extract(lines,.) %>%
str_trim(string=.,side="both") %>%
str_split(string=.,pattern="\\s+") %>%
unlist(.) -> species
list.exi[["species"]]=species
lines %>% str_which(string=.,pattern="^\\s*namereac") %>%
add(.,1) %>%
extract(lines,.) %>%
str_trim(string=.,side="both") -> reactions
list.exi[["reactions"]]=reactions
lines %>% str_which(string=.,pattern="^\\s*namereac") %>%
add(.,2) %>%
extract(lines,.) %>%
str_extract(.,pattern="^\\s+\\d+\\s+\\d+\\s") %>%
str_trim(string=.,side="both") %>%
str_split(string=.,pattern="\\s+") %>%
unlist(.) %>%
as.numeric(.) %>%
max(.) -> npart
list.exi[["npart_x"]]=npart
print(paste0("npart_x: ",npart))
##the table for range in total and initial condition for species
ind_spec=str_which(string=lines,pattern="\\#\\s+Species\\s+control\\-\\s+and\\s+\\\\Theta\\-variables")
ind_reac=str_which(string=lines,pattern="#\\s+Reaction\\s+control\\-\\s+and\\s+\\\\Theta\\-variables")
indbound=c(ind_spec,ind_reac,lenlines)
ind_theta=str_which(string=lines,pattern="\\s+1\\s+1\\s*$")
indloc=c(0,1,3)
blocknamevec=c("namespec","namereac")
for(blocknamei in seq(length(blocknamevec))){
blockname=blocknamevec[blocknamei]
ind_blockname=str_which(string=lines,pattern=blockname)
ind_blockname=ind_blockname[ind_blockname>indbound[blocknamei]&ind_blockname<indbound[blocknamei+1]]
ind_blockname=c(ind_blockname,indbound[blocknamei])
listpara=vector(mode="list")
matpara_name=c()
for(indblockele in seq(length(ind_blockname)-1)){
blockstarti=ind_blockname[indblockele]
blockendi=ind_blockname[indblockele+1]
name=str_trim(lines[blockstarti+1],side="both")
cat(paste0(name,"\n"))
ind_theta_seles=ind_theta[ind_theta>blockstarti&ind_theta<blockendi]
for(ind_theta_sele in ind_theta_seles){
valvec=c()
for(indloci in seq(length(indloc))){
indlocele=indloc[indloci]
lines[ind_theta_sele+indlocele] %>% str_trim(string=.,side="both") %>%
str_split(string=.,pattern="\\s+",simplify=TRUE) %>%
extract(.,3) %>% as.numeric(.) -> val
valvec=c(valvec,val)
}
cat(paste0(valvec[1],"\t",valvec[2],"\t",valvec[3],"\n"))
listpara[[length(listpara)+1]]=valvec
matpara_name=c(matpara_name,name)
}
}
matpara=as.data.frame(Reduce("rbind",listpara))
matpara=cbind(matpara_name,matpara)
rownames(matpara)=NULL
colnames(matpara)=c("name","low","high","ini_guess")
list.exi[[blockname]]=matpara
}
#i02
## the structure of experiment data
lines=readLines(i02)
exp_ind=str_which(string=lines,pattern="FF\\s+iexpt")
exp_ind=c(exp_ind,length(lines))
specname_ind=str_which(string=lines,pattern="FFF\\s+namespec")
time_ind=str_which(string=lines,pattern="\\s*ndtin_txpt")
exp_blocks=sapply(seq(length(exp_ind)-1),function(x){
thisblock=exp_ind[x]
nextblock=exp_ind[x+1]
specname_block=specname_ind[specname_ind>thisblock&specname_ind<nextblock]
time_block=time_ind[time_ind>thisblock&time_ind<nextblock]
(specname_block+1) %>% extract(lines,.) %>%
str_trim(string=.,side="both") %>%
str_split(string=.,pattern="\\s+") %>%
unlist() -> specs
(time_block+1) %>% extract(lines,.) %>%
str_trim(string=.,side="both") %>%
str_split(string=.,pattern="\\s+") %>%
unlist(.) %>% as.numeric(.) -> timepointnum
names(timepointnum)=specs
print(paste0("BLOCK: ",x))
print(timepointnum)
timepointnum
})
print(paste0("ALL:",sum(unlist(exp_blocks))))
return(list.exi)
}
#' reaction summary
#'
#' path the reaction file location
#' warning on input equation file
#'
#' @param path string. path to the reaction list file. must be provided
#' @param enzpattern string. the pattern to search for the enzyme entity. must be provided
#' @return list. list containing enzyme information
#' @export
#' @import stringr
summary_reac<-function(path=NULL,enzpattern=NULL){
if(is.null(path)){
stop("please provide the input path")
}
if(is.null(enzpattern)){
stop("please provide the searching pattern for enzyme")
}
list.reac.addon=read_reac(path)
specs=unique(unlist(sapply(list.reac.addon,function(x){
c(x[[2]],x[[3]])
})))
enzind=str_detect(string=specs,pattern=enzpattern)
enz=unique(specs[enzind])
print(paste0("enzymes: ",length(enz)))
print(enz)
compounds=unique(specs[!enzind])
print(paste0("compounds: ",length(compounds)))
print(compounds)
reacts=unlist(sapply(list.reac.addon,function(x){
x[[1]]
}))
print(paste0("reactions: ",length(reacts)))
print(reacts)
reacsdup=reacts[duplicated(reacts)]
if(length(reacsdup)!=0){
print("duplicated reactions:")
print(unique(reacsdup))
}
tabenz=table(specs[enzind])
if(max(tabenz)>2){
print("duplicated enzymes:")
print(names(tabenz[tabenz>2]))
}
reac.type=vector(mode="list")
temp=sapply(list.reac.addon,function(x){
sub=x[["subs"]]
prod=x[["prods"]]
name=x[["name"]]
enzy=unique(c(sub[str_detect(string=sub,pattern=enzpattern)],prod[str_detect(string=prod,pattern=enzpattern)]))
sub=sub[!str_detect(string=sub,pattern=enzpattern)]
prod=prod[!str_detect(string=prod,pattern=enzpattern)]
flag=sapply(reac.type,function(y){
setequal(sub,y[["sub"]])&&setequal(prod,y[["prod"]])
})
ind=NULL
if(length(flag)!=0){
ind=which(flag)
}
if(length(ind)!=0){
reac.type[[ind]][["name"]]<<-c(name,reac.type[[ind]][["name"]])
}else{
temp.list=list(sub=sub,prod=prod,name=name,enzy=enzy)
reac.type[[length(reac.type)+1]]<<-temp.list
}
})
list.res=list(enz=enz,compounds=compounds,reacs=reacts,reac.type=reac.type)
return(list.res)
}
#' summary plot for o02
#'
#' summary plot for o02
#'
#' @param o02.data list. the struct from o02.reader. must be provided
#' @param dir.res string. the location for producing figures. must be provided
#' @param addonname string. the addon name. default ""
#' @param linethick bool. whether thicker lines shoulde be draw for figure. default FALSE
#' @return just plot no return
#' @export
summary_o02<-function(o02.data=NULL,dir.res=NULL,addonname="",linethick=FALSE){
if(is.null(o02.data)){
stop("please provide the input path")
}
if(is.null(dir.res)){
stop("please provide the output result path")
}
##length of modeling
print(paste0("length: ",length(o02.data[["ids"]])))
##sweep~t
tab=cbind(o02.data[["ids"]],o02.data[["chisq"]]*2)
draw_sweep(tab,ylab="chi^2",
loci=paste0(dir.res,"chi2-sweep.",addonname,".pdf"),
linethick=linethick
)
##hisogram of chi^2
tab=o02.data[["chisq"]]*2
dim(tab)=c(length(o02.data[["chisq"]]),1)
draw_hist(tab,loci=paste0(dir.res,"chi2.distr.",addonname,".pdf"),xlab="chi^2")
print(paste0("mean: ",mean(tab[,1])))
##acceptance rates
vec=unlist(o02.data[["raccp"]])
tab=vec
tab=tab[tab>=0]
tab[tab>1]=1
dim(tab)=c(length(tab),1)
draw_hist(tab,loci=paste0(dir.res,"acceptance.rate.distr.",addonname,".pdf"),xlab="acceprate")
print("acceptance rate:")
print(summary(vec))
##step size
vec=unlist(o02.data[["fstp"]])
tab=vec
dim(tab)=c(length(vec),1)
draw_hist(tab,loci=paste0(dir.res,"footstepsize.rate.distr.",addonname,".pdf"),xlab="stepsize")
print("stepsize:")
print(summary(vec))
}
|
55a377fe20e0299b318c92d5d0d911e9596952f1 | 5d21e75528088be4156b6c4872a1bc7f60ac760d | /src/wrangle.R | 7c5e1b4bfffe1f36a242876f3f0805dc311a32db | [] | no_license | joesdesk/poverty-and-education | 095a889f46e660cf91f40aa0571617f9fa3c316a | 8434774f8313753e50f00113c803eb2e5ccdf042 | refs/heads/master | 2021-08-30T12:01:32.513007 | 2017-12-17T20:46:49 | 2017-12-17T20:46:49 | 112,970,509 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,395 | r | wrangle.R | #! /usr/bin/env Rscript
# Wrangle the downloaded data files.
# Usage: Rscript warngle.R <data/poverty.xls> <data/education.xls> <results/socioeconomic.xls>
# Arguments:
# <data/poverty.xls> file location of the poverty spreadsheet
# <data/education.xls> file location of the education spreadsheet
# <results/socioeconomic.csv> file location of the wrangled data csv
# Author: Jomar Sastrillo
# Date: December 10, 2017
# Extract arguments
args <- commandArgs(trailingOnly = TRUE)
poverty_xls <- args[1]
education_xls <- args[2]
socioeconomic_csv <- args[3]
# Load required libraries
library(tidyverse)
library(readxl)
# Read data into data frames
education <- read_excel(education_xls, skip = 4, col_names = T)
poverty <- read_excel(poverty_xls, skip = 3, col_names = T)
# Extract important data
education <- education %>%
select(fips = "FIPS Code",
state = "State",
area_name = "Area name",
higher_ed_proportion = "Percent of adults with a bachelor's degree or higher, 2011-2015")
poverty <- poverty %>%
select(fips = "FIPStxt",
poverty_proportion = "PCTPOVALL_2015")
# We will join the data by the county identification code
socioeconomic <- full_join(education, poverty, by = "fips")
# Write the final dataset to file
write_csv(socioeconomic, socioeconomic_csv, na = "", append = FALSE)
|
b8045736ae88bffe00023732fe59e862eda678ab | 6b579170717f7671b1b06f6e814a602d127c601b | /man/kappa4alShort.Rd | e014f988b3de0fd1b354ea47eef67f522763758d | [] | no_license | mtloots/alR | d8e699c7dadaf9a7abff4db0a21b46522319a069 | 1fc6a3b30ee177b0e22dcdb5ecebae6bfc567091 | refs/heads/master | 2021-04-22T06:46:51.423304 | 2018-03-15T09:25:14 | 2018-03-15T09:25:14 | 59,803,019 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,326 | rd | kappa4alShort.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kappa4alShort.R
\name{kappa4alShort}
\alias{kappa4alShort}
\title{Sigmoidal curve fitting.}
\usage{
kappa4alShort(formula, data = list(), xin, q1, q2, ...)
}
\arguments{
\item{formula}{An LHS ~ RHS formula, specifying the linear model to be estimated.}
\item{data}{A data.frame which contains the variables in \code{formula}.}
\item{xin}{Numeric vector of length 3 containing initial values, for \eqn{\sigma}, \eqn{h}, and \eqn{k}.}
\item{q1, q2}{Numeric vectors, for the lower and upper bounds of the intervals over which arc lengths are to be computed.}
\item{...}{Arguments to be passed on to the outer control list of \code{\link{constrOptim.nl}}.}
}
\value{
kappa4alShort: A list with the following components:
\itemize{
\item coefficients: A vector of estimated coefficients.
\item error: The value of the objective function.
}
}
\description{
A framework for arc length fitting of the four-parameter kappa sigmoidal function.
}
\details{
A shortened version of \code{\link{kappa4al}}.
}
\examples{
k <- kappa4tc(-4, 0, 1)$par
x <- seq(qkappa4(0, 4, 0.4, -4, k), qkappa4(0.7, 4, 0.4, -4, k), length.out=100)
y <- sapply(x, function(i) pkappa4(i, 4, 0.4, -4, k))
kappa4alShort(y~x, xin=c(0.1, -3, -0.1), q1=c(0.1, 0.5), q2=c(0.5, 0.9))
}
|
2e37030ed812d96a84bcc8117a53d25961385f20 | 29585dff702209dd446c0ab52ceea046c58e384e | /spoccutils/tests/testthat.R | 9fa95d5685f31674feeea23fb5df803147f6bbf9 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 45 | r | testthat.R | library('testthat')
test_check("spoccutils")
|
480eb2841dcb5270f2b15dccc1eed3cffc545c98 | e4ce9bc2d66320c50fab21ab3a4a6720ae9021fa | /R/graph_theme.R | a3534f2a09b872126159316b4ff0cad61d4c7a13 | [
"CC-BY-4.0",
"MIT"
] | permissive | UofTCoders/eeb430.2017.Python | 53a236870fe45c0df1666d4a90838af1c3c91383 | d2f8c12ca284aef871eec0d510ef8e0a38e4ca64 | refs/heads/master | 2021-05-15T14:36:47.744625 | 2017-12-13T04:24:34 | 2017-12-13T04:24:34 | 107,316,813 | 3 | 6 | null | 2017-12-13T04:21:50 | 2017-10-17T19:51:13 | R | UTF-8 | R | false | false | 499 | r | graph_theme.R | library(ggthemes)
team_theme <- function() {list(
theme(axis.line = element_line(color = "black"),
text = element_text(size = 8, family = "Times"),
panel.background = element_rect(fill = 'white', colour = 'black'),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
plot.title = element_text(colour = "black", size = 14, hjust = 0.5),
legend.text = element_text(size = 12, family = "Times")),
scale_colour_colorblind())
}
|
70ea9ef6b4efa47b5f14049ca1c84cfad637e688 | ec82a7ee25fbe48e20fd84a3bfdff013f699c90d | /PassiveRamp 1.2018-2.2019/ignorePlotFun.R | 82b1a3e2f021474973e77951ab1036737f7198d6 | [] | no_license | gpagnier/rampAnalysis | 6084e1ba062ef80ba270e82f0f21915de2485176 | 2abe03f0c5190a5525dcdc07772ffe303b90d2fb | refs/heads/master | 2020-03-19T00:32:05.667733 | 2019-04-26T17:48:00 | 2019-04-26T17:48:00 | 135,487,989 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,345 | r | ignorePlotFun.R | #d is filtered data frame
#Do not use trialType with it
ignorePlot<-function(data,orig=TRUE,eb=FALSE,line=FALSE,ylimit=c(0,100),title="",trialType=""){
color='black'
if(trialType=='gambleLeft'){
data<-filter(data,trialType=='gambleLeft')
color='purple'
}else if(trialType=='gambleRight'){
data<-filter(data,trialType=='gambleRight')
color='red'
}
d2fun<-filter(data,gambleDelay!=0,oddsCond!='catch') %>%
group_by(binsTime) %>%
summarise(trials=length(trialNumber),
ignoreCount=sum(ignoreRT!=0),
didNotIgnore=sum(response=="fail"|response=="gamble"),
percentageIgnored=round(ignoreCount/trials*100),
medianRT=median(setdiff(ignoreRT,0)))
d2fun$seconds<-d2fun$binsTime
if(orig){
plot(d2fun$seconds,d2fun$percentageIgnored,xlim = c(0,4),ylim = ylimit,
main=paste("Ignore propensity",title, "n =",toString(length(data$ignoreRT[data$ignoreRT!=0])),
"ignored trials;",toString(length(unique(data$uniqueid))),"participants"),
xlab="Seconds into trial",ylab="Percentage Ignored",pch=19,col=color)
if(line){
abline(lm(d2fun$percentageIgnored~d2fun$seconds))
}
summary(lm(d2fun$percentageIgnored~d2fun$seconds))
}else{
d2pfun<-filter(data,gambleDelay!=0) %>%
group_by(binsTime,uniqueid) %>%
summarise(trials=length(trialNumber),
ignoreCount=sum(ignoreRT!=0),
didNotIgnore=sum(response=="fail"|response=="ignore"),
percentageIgnored=round(ignoreCount/trials*100),
medianRT=median(setdiff(ignoreRT,0)),
semRT=sd(setdiff(ignoreRT,0))/sqrt(length(setdiff(ignoreRT,0))))
d2pfun$seconds<-d2pfun$binsTime
dTestfun<-d2pfun %>%
group_by(seconds) %>%
summarise(meanPercentageIgnored=mean(percentageIgnored),
medianPercentageIgnored=median(percentageIgnored),
sdPercentageIgnored=sd(percentageIgnored),
stdPercentageIgnored=std.error(percentageIgnored))
plot(dTestfun$seconds,dTestfun$meanPercentageIgnored,xlim = c(0,4),ylim = ylimit,
main=paste("Gamble propensity",title,";", "n =",toString(length(data$response[data$response=='gamble'])),
"trials;",toString(length(unique(data$uniqueid))),"participants"),
xlab="Seconds into trial",ylab="Percentage Ignored",pch=19,bty='l',col=color)
summary(lm(d2fun$percentageIgnored~d2fun$seconds))
if(line){
abline(lm(dTestfun$meanPercentageIgnored~dTestfun$seconds))
}
if(eb=='sem'){
for(i in 1:length(dTestfun$seconds)){
arrows(as.numeric(dTestfun$seconds[i]),as.numeric(dTestfun[i,'meanPercentageIgnored']+(as.numeric(dTestfun[i,'stdPercentageIgnored']))),as.numeric(dTestfun$seconds[i]),as.numeric(dTestfun[i,'meanPercentageIgnored']-(as.numeric(dTestfun[i,'stdPercentageIgnored']))),length=0.05, angle=90, code=3)
}
}else if(eb=='std'){
for(i in 1:length(dTestfun$seconds)){
arrows(as.numeric(dTestfun$seconds[i]),as.numeric(dTestfun[i,'meanPercentageIgnored']+(as.numeric(dTestfun[i,'sdPercentageIgnored']))),as.numeric(dTestfun$seconds[i]),as.numeric(dTestfun[i,'meanPercentageIgnored']-(as.numeric(dTestfun[i,'sdPercentageIgnored']))),length=0.05, angle=90, code=3)
}
}
}
}
|
e5631e85b74cbe9f320e6cf713bc16d0908b9e5b | d488095d94b00bc42d355f9b34c276772936b803 | /man/spuds-package.Rd | be4811094f52e3f4d71559d9a599410d95d06b17 | [] | no_license | DavidHofmeyr/spuds | 5415186be67182ef806b74b0a607ea75ca552edf | ecc9247178a20ae543987dc6631067b52486dd8b | refs/heads/master | 2020-04-13T16:04:33.139774 | 2019-01-06T07:39:08 | 2019-01-06T07:39:08 | 163,311,906 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,068 | rd | spuds-package.Rd | \name{spuds-package}
\alias{spuds-package}
\docType{package}
\title{
\packageTitle{spuds}
}
\description{
\packageDescription{spuds}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{spuds}
\packageIndices{spuds}
This package provides an implementation of the SPUDS algorithm introduced in Hofmeyr, D. (2017) "Improving spectral clustering using the asymptotic value of the normalised cut". The main function provided in the package is spuds(). The only mandatory argument is a data matrix, all optional arguments are given the default values used in the paper. Details of optional arguments can be seen using the function documentation. A function to simulate data sets like those used in the paper is also provided (spuds_datagen(ndata, nclust, ndimensions)). Publicly available benchmark data sets are also included. Use data(package = 'spuds') to see a list of included data sets.
}
\references{
Hofmeyr, D. (2017) Improving spectral clustering using the asymptotic value of the normalised cut, \emph{ArXiv preprint}, ArXiv 1703.09975.
}
\keyword{ package }
|
00056a8316dc7c25cb63d53b4740e197b6b4a527 | fd4dcf8b7fbda761c095a0dd6c9c32129a2b9091 | /man/cumulative_value.plot.Rd | aad3776f39726e3f76d66918d49b6dede33b3020 | [
"MIT"
] | permissive | NicolasMatrices-v2/GeoLift | 921faeee3f4847057820df0c07b1995a43327d08 | 95cc7fefa7cced149753396b8b148bb3862acfe0 | refs/heads/main | 2023-07-26T01:24:32.090515 | 2023-07-05T08:03:02 | 2023-07-05T08:03:02 | 659,745,433 | 0 | 0 | MIT | 2023-06-29T23:03:46 | 2023-06-28T13:19:12 | null | UTF-8 | R | false | true | 1,883 | rd | cumulative_value.plot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{cumulative_value.plot}
\alias{cumulative_value.plot}
\title{Plot the accumulated lift effect.}
\usage{
cumulative_value.plot(
data,
treatment_locations,
treatment_start_period,
treatment_end_period,
location_id = "location",
time_id = "time",
Y_id = "Y",
treatment_end_date = NULL,
frequency = "daily",
plot_start_date = NULL,
title = "",
subtitle = "",
notes = "",
...
)
}
\arguments{
\item{data}{DataFrame that GeoLfit will use to determine a result.
Should be the output of \code{GeoDataRead}.}
\item{treatment_locations}{Vector of locations where the treatment was applied.}
\item{treatment_start_period}{Integer representing period where test started.}
\item{treatment_end_period}{Integer representing period where test finished.}
\item{location_id}{Name of the location variable (String).}
\item{time_id}{Name of the time variable (String).}
\item{Y_id}{Name of the outcome variable (String).}
\item{treatment_end_date}{Character that represents a date in year-month-day format.}
\item{frequency}{Character that represents periodicity of time stamps. Can be either
weekly or daily. Defaults to daily.}
\item{plot_start_date}{Character that represents initial date of plot in year-month-day format.}
\item{title}{Character for the title of the plot. NULL by default.}
\item{subtitle}{Character for the subtitle of the plot. NULL by default.}
\item{notes}{String to add notes to the plot. Empty by default.}
\item{...}{additional arguments}
}
\value{
A ggplot object that shows the accumulated lift per time period.
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}}
Plot the accumulated lift effect.
}
|
b5310c815ed4f385af91d736bbb9e98d666ef90a | 0d43c45303c6b9b3ac79113c1b7fdc71a3a7e697 | /R/doTEA.R | b26f95dda00b3266b557e341493a89d34721174d | [] | no_license | HappyLiPei/tea | b3f590bac389beb919809cda417250d66e351d0f | 26d59f69592a0e31644f29707baa4f3c6dde7424 | refs/heads/master | 2021-04-03T05:09:17.597599 | 2015-05-23T14:48:23 | 2015-05-23T14:48:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,735 | r | doTEA.R | #' Build the requested table, including any requisite tables that led up to this table.
doTable <- function(input_table=teaenv$active_tab){
.C("build_one_table", as.character(input_table))
}
#' The preedits are reduced to a list of queries. We can apply them at any time to any
#' table to make sure that they are still clean.
#'
#' @param input_table The table to act on. If NULL, I'll check the active table
doPreedits <- function(input_table=teaenv$active_tab){
.C("do_preedits", as.character(input_table))
}
#' Read a text file into a database table
#' @param input_file The name of the text file to read in. Make sure the path is
#' correct and you have permissions to read the file.
#' @param output_table The name of the table to write to the database
#' @param overwrite If the key is present and "yes", then I will delete and re-insert
#' the table if need be. Otherwise, I will not overwrite existing data,
#' meaning that you can re-run a script with one spec file and only wait for the
#' read-in once. [Technically: if present and equal to a case-insensitive version
#' of No, N, or 0, I won't overwrite; if present and anything else, overwrite.]
#' @param do_preedits If not NULL, run the pre-edits once immediately after readin in to the database,
#` modifying the data in place.
doInput <- function(input_file=NULL,output_table=NULL,types=NULL,primary.key=NULL,
indices=NULL,overwrite=NULL, do_preedits=NULL){
if(is.null(teaenv$con)) stop("You need to have a spec file read in!")
con <- teaenv$con #for the attach-averse
if (teaenv$verbosity > 0)
print(dbGetQuery(con,"select * from keys"))
if(is.null(input_file)) input_file <- teaGetKey("input","input file")
if(is.null(input_file)) stop("I couldn't find the \"input file\" key in the \"input\" section of the spec.")
if(is.null(overwrite)) overwrite <- teaGetKey("input","overwrite")
#if(overwrite!="yes") return("No overwrite");
tbl <- teaGetKey("input", "output table")
print(paste("Reading text file '", input_file, "' into output table '", tbl, "'."))
.C("text_in")
if(is.null(tbl)) stop("I need an 'output table' name in the input section of the spec.")
#Pre-edits are here for now, at read-in, on the raw data table.
if (!is.null(do_preedits)) doPreedits(tbl)
if (dbExistsTable(teaenv$con, paste("view",tbl,sep=""))){
teaenv$active_tab <- paste("view",tbl,sep="")
} else {
teaenv$active_tab <- tbl
}
dbGetQuery(con,
paste("drop table if exists",paste("ORIG",tbl,sep="")))
dbGetQuery(con,
paste("create table",paste("ORIG",tbl,sep=""),
"as select * from", teaenv$active_tab))
}
doFingerprint <- function(flag.key=NULL,frequency=1,combinations=1,id=NULL,geolist=NULL,
input_table=NULL){
if(is.null(teaenv$con)) stop("You need to have a spec file read in!")
con <- teaenv$con
viewtbl <- getInputTable("fingerprint", input_table);
if(is.null(flag.key)) flag.key <- teaGetKey("fingerprint","key")
if(is.null(flag.key)) warning("No fingerprint key for flagging, so flagging everyone!")
if(is.null(geolist)) geolist <- list(teaGetKey("fingerprint","geo"))
id <- teaGetKey("input","primary key")
if(is.null(flag.key) | length(flag.key)==0){
Rflag.SQL(con,viewtbl,NULL,0,id=id,geolist=geolist,all=TRUE)
} else {
frequency <- teaGetKey("fingerprint","frequency")
combinations <- teaGetKey("fingerprint","combinations")
Rflag.SQL(con,viewtbl,flag.key,frequency,combthresh=combinations,
id=id,geolist=geolist,vflagrtn=TRUE,verbose=TRUE);
}
teaenv$overlay="vflags"
}
doMImpute <- function(tag=NULL, autofill=0){
# rmodel <- TEAGetKey("impute", "%%/Rmodel", tag)
# mod <- NULL
# if (!is.null(rmodel)){
# mod <- get(rmodel)$model
# #est <- estimateRapopModel(list(), mod)
# }
# dbname <- dbGetInfo(teaenv$con)$dbname
dbDisconnect(teaenv$con)
active_tab <- "ignored on input; for output"
if (!is.null(tag))
warning("Imputing certain tags is currently not implemented (and not documented). Imputing all tags")
.C("impute", as.character(active_tab), as.integer(autofill))
teaenv$con <- dbConnect(dbDriver("SQLite"),teaenv$dbname)
teaenv$active_tab <- active_tab #active_tab may have changed
}
doEdit <- function(autofill=0){
active_tab <- "ignored on input; for output"
.C("edit", as.character(active_tab), as.integer(autofill))
}
teaenv <- new.env()
#' Perform regression on a data set and generate
#' synthetic data conforming to consistency rules.
#'
#' @param model.spec a list of modeling options, specified as:
#' \enumerate{
#' \item [[*]][[1]] = an R formula (either as a formula object or a character),
#' giving the initial model to fit on the data.
#' \item [[*]][[2]] = a length 1 character containing the name of a supported regression function
#' \item [[*]][[3]] = a character vector containing the optional right-hand-side variables
#' }
#' Each [[*]] element represents a new LHS variable for modeling. The order of model
#' fitting is decided by the ordering of the list.
#' @param data a data frame containing, at a minimum, all variables found in model.spec
#' @ncore The number of threads to use in processing
#' @bayes A non-conformist reverend, living in the late 1700s.
#' @input_table As described.
#' @return "Regression Complete"
#' @author Rolando Rodriguez \email{rolando.a.rodriguez@@census.gov}
doRegression <- function(model.spec=NULL,by=NULL,ncore=NULL,bayes=NULL, input_table=NULL,
verbose=NULL){
if(is.null(teaenv$con)) stop("You need to have a config file read in!")
if(is.null(teaenv$overlay)) stop("You need to tell me which overlay to use!")
all.vars <- character(0) #keep track of all variables needed to reduce data.frame size
key1 <- "Regression"
by.vars <- teaGetKey(key1,"by")
all.vars <- c(all.vars,by.vars)
if(is.null(ncore)) ncore <- teaGetKey(key1,"ncore")
if(is.null(ncore)) ncore <- 1
if(is.null(bayes)) bayes <- teaGetKey(key1,"bayes")
if(!is.null(bayes)) bayes <- bayes > 0
if(is.null(bayes)) bayes <- 0
if(is.null(verbose)) verbose <- teaGetKey(key1,"verbose")
if(is.null(verbose)) verbose <- FALSE
if(!is.null(verbose)) verbose <- TRUE
if(is.null(model.spec)){
LHSs <- teaGetKey(key1,"models", is_sub=TRUE)
if(is.null(LHSs)) stop("You need to have at least one model specification!!")
all.vars <- c(all.vars,LHSs)
model.spec <- vector("list",length(LHSs))
names(model.spec) <- LHSs
#using overlay, create 'flags' input for RegEditSyn
flags <- vector("list",2)
flags[[1]] <- dbGetQuery(teaenv$con,paste("select * from",teaenv$overlay))
flags[[2]] <- vector("list",length(LHSs))
names(flags[[2]]) <- LHSs
consistency <- NULL
edit.subsets <- list()
checks <- list()
for(lhs in LHSs){
checks[[lhs]] <- NA
key2 <- paste(key1,"models",lhs,sep="/")
flags[[2]][[lhs]] <- teaGetKey(key2,"flag")
edit.subset <- teaGetKey(key2,"edit_subset")
check <- teaGetKey(key2,"check")
if(length(check>0)) checks[[lhs]] <- check
if(length(edit.subset>0)) edit.subsets[[lhs]] <- edit.subset
predictors <- teaGetKey(key2,"predictors")
if(!identical(predictors,"1")) all.vars <- c(all.vars,predictors)
predictors <- paste(lhs,"~",paste(predictors,collapse="+"))
model.spec[[lhs]] <- list(predictors,teaGetKey(key2,"model"))
}
print("Here are the checks")
print(checks)
if(length(checks) == length(LHSs)){
consistency <- checks
edit.vars <- dbGetQuery(teaenv$con,"select name from variables")[,"name"]
if(length(edit.subset)>0) edit.vars <- edit.subset
all.vars <- c(all.vars,edit.vars)
}
}
#need to do all.vars set for the case when model.spec is specific in the parameters
all.vars <- unique(all.vars)
viewtbl <- getInputTable(key1, input_table)
if(length(by.vars)>0){
domains <- dbGetQuery(teaenv$con,paste("select distinct",paste(by.vars,collapse=",")," from",viewtbl))[by.vars]
#want each list elements of domains to be a data frame with ONE combination of
#the primary key variables
domains <- split(domains,1:nrow(domains))
}else{
#will generate "where 1=1" in the by query below
#will thus select all records
by.vars <- "1"
domains <- "1"
}
primary_key <- teaGetKey("input","primary_key")
#domains <- lapply(domains,function(x) return(list(domain=x,con=dbConnect(dbDriver("SQLite"),dbGetInfo(teaenv$con)$dbname))))
by.f <- function(domain){
con.tmp <- dbConnect(dbDriver("SQLite"),dbGetInfo(teaenv$con)$dbname)
print(paste("Domain:",paste(domain,collapse=",")))
#BK hack to deal with still more text/numeric issues:
domain[,] <- paste('"' , domain[,] , '"', sep="")
#query to select data in domain for all the variables needed for modeling
query <- paste("select",paste(c(primary_key,all.vars),collapse=","),
"from",viewtbl,"where",
paste(by.vars,domain,sep="=",collapse=" and "));
data <- try(dbGetQuery(con.tmp,query))
if(inherits(data,"try-error")) print(data)
syn <- try(RegEditSyn(model.spec,data,flags,id=primary_key,
consistency=consistency, bayes=bayes,ncore=1,verbose=verbose),silent=TRUE)
# syn <- try(RegEditSyn(model.spec,data,flags,id=primary_key,
# consistency=consistency, edit.subsets=edit.subsets, bayes=bayes,ncore=1,verbose=verbose),silent=TRUE)
if(inherits(syn,"try-error")){
print(paste("Domain",paste(domain,collapse=","),"didn't work for synthesis"))
print(syn)
return(data)
}else{
return(syn)
}
dbDisconnect(con.tmp)
}
updates <- lapply(domains,by.f)
save(updates,file="updates.RData")
lapply(updates,UpdateTablefromDF,tbl=teaGetKey("input", "output table"),
con=teaenv$con,cols=LHSs,match.key=primary_key,
ncommit=10000,verbose=TRUE)
teaenv$active_tab <- viewtbl
return("Regression Complete")
}
|
17041111ba63dee15705d45401661a409dc334d0 | 1307bc92687eaad1a0ad8a47bb8ccc3ca36f03a9 | /man/R2.lm.Rd | 3130e766c4085fbbcca115d5b0c033de114204c5 | [] | no_license | markwh/rcmodel | cf388494df2ceb3e26ab72e28302ffa827ef5927 | 14ce61b8384d5457475604530a5d9707a9247678 | refs/heads/master | 2020-04-12T05:31:29.465019 | 2017-03-05T21:01:55 | 2017-03-05T21:01:55 | 40,208,508 | 0 | 2 | null | null | null | null | UTF-8 | R | false | true | 379 | rd | R2.lm.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/markstats.R
\name{R2.lm}
\alias{R2.lm}
\title{Coefficient of dermination for lm objects
Optionally gives adjusted R2.}
\usage{
\method{R2}{lm}(x, adjust = FALSE)
}
\arguments{
\item{x}{an object of class "lm"}
}
\description{
Coefficient of dermination for lm objects
Optionally gives adjusted R2.
}
|
ff38316ae374415b78c56260b5745fa3ad11a971 | 99ee6d86c82ebb0ac9ba1a1059b2292758cfe903 | /Script/FitModels.R | c0554cd236f09aa99becad97c7831a6096178d89 | [] | no_license | Stewarjo/NSW-sea-garfish-stock-assessment | 58f6b4434cafda3926c3b8e32c9e9be9eac4d24b | b8b93d54949cdc2b8ffa4515563286b3ea354b53 | refs/heads/master | 2020-06-24T23:04:48.388604 | 2017-07-09T08:04:03 | 2017-07-09T08:04:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,630 | r | FitModels.R | # CREATED 1 Sep 2016
# MODIFIED 2 May 2017
# PURPOSE fit several mortality model to NSW sea garfish age data
# using hazard functions
# Load usefull libraries
library(SAFR)
source("UsefulFunctions.R")
# Load data available for analysis
source("LoadTheData.R")
#########################################################
### Fit models
#########################################################
## First model
# boundaries for parameters
lower.bound <- c(5e-2,1e-2,1e-3); upper.bound <- c(15,1,1)
csf <- 1e-3 # catchability scaling factor
result <- optim(par = c(0.2, 0.2, 0.05), fn = ll.model1, catch = nb.at.age.wgt, effort = effort, catchability.scaling.factor = csf, method = c("L-BFGS-B"),
lower = lower.bound, upper = upper.bound, hessian = TRUE)
print(result)
errors <- sqrt(diag(solve(result$hessian)))
print(errors)
## Second model
lower.bound <- c(5e-2,1e-2, 1e-2, 1e-4);upper.bound <- c(15,2,1,1)
csf <- 1e-3 # catchability scaling factor
result2 <- optim(par = c(1.0, 0.2, 0.1, 0.1), fn = ll.model2, catch = nb.at.age.wgt, effort = effort, catchability.scaling.factor = csf, method = c("L-BFGS-B"),
lower = lower.bound, upper = upper.bound, hessian = TRUE)
print(result2)
errors2 <- sqrt(diag(solve(result2$hessian)))
print(errors2)
save(result2, file = "Results/Models/model2.R")
## Slighlty modified second model: the selectivity block divide was shifted 1 year earlier
lower.bound <- c(5e-2,1e-2, 1e-2, 1e-4);upper.bound <- c(15,2,1,1)
csf <- 1e-3 # catchability scaling factor
result2.1 <- optim(par = c(1.0, 0.2, 0.1, 0.1), fn = ll.model2.1, catch = nb.at.age.wgt, effort = effort, catchability.scaling.factor = csf, method = c("L-BFGS-B"),
lower = lower.bound, upper = upper.bound, hessian = TRUE)
print(result2.1)
errors2.1 <- sqrt(diag(solve(result2.1$hessian)))
print(errors2.1)
save(result2.1, file = "Results/Models/model2_1.R")
## Third model in which we fixed M to 0.7 1/year and estimated only q and s
lower.bound <- c(1e-2,0.01);upper.bound <- c(15,1)
csf <- 1e-3 # catchability scaling factor
result3 <- optim(par = c(2, 0.1), fn = ll.model3, catch = nb.at.age.wgt, effort = effort, catchability.scaling.factor = csf, method = c("L-BFGS-B"),
lower = lower.bound, upper = upper.bound, hessian = TRUE)
print("result model 3")
print(result3)
errors3 <- sqrt(diag(solve(result3$hessian)))
print(errors3)
# ## A fourth model: assume 2% fishing power increase per year
# fp.rate <- 1.02
# fp.mat <- outer(fp.rate^seq(0, nrow(effort)-1), rep(1, ncol(effort)))
# lower.bound <- c(5e-2,1e-2, 1e-2, 1e-4);upper.bound <- c(15,2,1,1)
# csf <- 1e-3 # catchability scaling factor
# result4 <- optim(par = c(1.0, 0.2, 0.1, 0.1), fn = ll.model2, catch = nb.at.age.wgt, effort = fp.mat * effort, catchability.scaling.factor = csf, method = c("L-BFGS-B"),
# lower = lower.bound, upper = upper.bound, hessian = TRUE)
# print(result4)
# errors4 <- sqrt(diag(solve(result4$hessian)))
# print(errors4)
# save(result4, file = "Results/Models/model4.R")
# ### A fifth model assuming logistic gear selectivity
# lower.bound <- c(5e-2, 0.2, 0, 0, 0); upper.bound <- c(15, 2, 10, 20, 10)
# csf <- 1e-3 # catchability scaling factor
# result5 <- optim(par = c(1.0, 0.5, 1, 8, 0.5), fn = ll.model5, catch = nb.at.age.wgt, effort = effort, catchability.scaling.factor = csf, method = c("L-BFGS-B"),
# lower = lower.bound, upper = upper.bound, hessian = TRUE)
# print(result5)
# errors5 <- sqrt(diag(solve(result5$hessian)))
# print(errors5)
# save(result5, file = "Results/Models/model5.R")
# ### A variation of the fifth model assuming dome-shaped gear selectivity using logistic functions
# lower.bound <- c("q" = 5e-2, "M" = 1e-3, "alpha.logis1" = 0, "beta.logis1" = 0, "gamma.logis" = 1, "alpha.logis2" = 0);
# upper.bound <- c(15, 2, 10, 20, 6, 10)
# csf <- 1e-3 # catchability scaling factor
# result5.1 <- optim(par = c(1.0, 0.5, 1, 8, 1, 0.5), fn = ll.model5.1, catch = nb.at.age.wgt, effort = effort, catchability.scaling.factor = csf, method = c("L-BFGS-B"),
# lower = lower.bound, upper = upper.bound, hessian = TRUE)
# print(result5.1)
# errors5.1 <- sqrt(diag(solve(result5.1$hessian)))
# print(errors5.1)
# save(result5.1, file = "Results/Models/model5_1.R")
# ### A second variation of the fifth model assuming dome-shaped gear selectivity with age-groups 1-2 and 2-3 fixed to 1 and 5-6 fixed to 0
# lower.bound <- c("q" = 5e-2, "M" = 1e-2, "s1" = 0, "s4" = 0, "s5" = 0, "s1.2" = 0);
# upper.bound <- c(15, 2, 1, 1, 1, 1)
# csf <- 1e-3 # catchability scaling factor
# result5.2 <- optim(par = c(1.0, 0.5, 0.5, 0.8, 0.3, 0.1), fn = ll.model5.2, catch = nb.at.age.wgt, effort = effort, catchability.scaling.factor = csf, method = c("L-BFGS-B"),
# lower = lower.bound, upper = upper.bound, hessian = TRUE)
# print(result5.2)
# errors5.2 <- sqrt(diag(solve(result5.2$hessian)))
# print(errors5.2)
# save(result5.2, file = "Results/Models/model5_2.R")
# ### A sixth model
# lower.bound <- c(5e-2, 0.2, 0, 0, 0); upper.bound <- c(50, 2, 10, 20, 10)
# csf <- 1e-3 # catchability scaling factor
# result6 <- optim(par = c(10, 0.7, 3, 4, 3, 4), fn = ll.model6, catch = nb.at.age.wgt, effort = effort, catchability.scaling.factor = csf, method = c("L-BFGS-B"),
# lower = lower.bound, upper = upper.bound, hessian = TRUE)
# print("Model 6")
# print(result6)
# errors6 <- sqrt(diag(solve(result6$hessian)))
# print(errors6)
# save(result6, file = "Results/Models/model6.R")
# ### A seventh model
# lower.bound <- c(1e-2, 0.3, 0, 0, 0.5, 0, 0.1, 0.5); upper.bound <- c(10, 1.5, 0.1, 0.8, 1, 0.1, 0.5, 1)
# csf <- 1e-3 # catchability scaling factor
# result7 <- optim(par = c(1.0, 0.5, 0.0, 0.4, 0.5, 0.03, 0.24, 0.8), fn = ll.model7, catch = nb.at.age.wgt, effort = effort, catchability.scaling.factor = csf, method = c("L-BFGS-B"),
# lower = lower.bound, upper = upper.bound, hessian = TRUE)
# print(result7)
# errors7 <- sqrt(diag(solve(result7$hessian)))
# print(errors7)
# save(result7, file = "Results/Models/model7.R")
# ### A eighth model
# lower.bound <- c(1, 1e-2, 0.4, 0, 0, 0, 1); upper.bound <- c(20, 20, 2.0, 20, 10, 100, 10)
# csf <- 1e-3 # catchability scaling factor
# result8 <- optim(par = c(10, 10, 0.5, 1, 4, 3, 3), fn = ll.model8, catch = nb.at.age.wgt, effort = effort, catchability.scaling.factor = csf, method = c("L-BFGS-B"),
# lower = lower.bound, upper = upper.bound, hessian = TRUE, control = list(maxit = 1e3))
# print(result8)
# errors8 <- sqrt(diag(solve(result8$hessian)))
# print(errors8)
# save(result8, file = "Results/Models/model8.R")
|
4a41d5affd35231769d0ae01259edda4f4813662 | 77416ab9f5781b3a258d4576e53d400bd2f1d028 | /scripts/STEP-2-debias.R | a892ce48204e7678f945a08d765c83c2e85dd628 | [] | no_license | marissakivi/met-crc-workflow | 5db5100c9ead5cdb6147894ac6945de841f23b8c | 753a7bb1d94ac89c93363ec27c7dabf5591c9355 | refs/heads/master | 2022-06-04T18:32:56.447167 | 2022-05-04T18:23:43 | 2022-05-04T18:23:43 | 197,021,818 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 27,928 | r | STEP-2-debias.R |
####################
# Modeling Met Ensemble Workflow
# Step 2 :: Bias correction, bad ensemble rejection, and visual check
####################
# This script has been modified so it can be run as a job submission to the CRC's machines.
#
# Description: This step combines the third and fourth steps of Christy's met workflow,
# which correct for bias through the generation of daily ensembles using the three different
# met data sources, reject ensemble members with unlikely values, and generate visual checks of the data.
#
# Required functions:
# - align_met.R (PEcAn)
# - debias_met_regression.R (PEcAn)
#
# Required libraries:
# - ncdf4
# - mgcv
# - ggplot2
# - stringr
# - lubridate
####################
# ALTER ONLY THESE VARIABLES BEFORE SUBMITTING FOR NEW SITE
####################
# Load site and directory details
wd.base = '~/met-crc-workflow'
site.name = "SYLVANIA"
site.lat = 46.241944
site.lon = -89.347778
vers=".v2"
ens=20:40
# this should be adjusted depending on the site type (short or long)
first.year = 850
# will this be the last set of daily ensembles produced? do we want to perform rejection?
# if so, this should be TRUE
reject = TRUE
####################
# Step 1: Set up working directory
####################
# install missing libraries in Rlibs folder on account if not already installed
# this section is no longer needed because there is a general script to download packages
#if (!require('ncdf4',lib.loc ='~/Rlibs')) install.packages('ncdf4',lib='~/Rlibs',repos='http://cran.us.r-project.org',dependencies=TRUE)
#if (!require('ggplot2',lib.loc ='~/Rlibs')) install.packages('ggplot2',lib='~/Rlibs',repos='http://cran.us.r-project.org',dependencies=TRUE)
#if (!require('mgcv',lib.loc ='~/Rlibs')) install.packages('mgcv',lib='~/Rlibs',repos='http://cran.us.r-project.org',dependencies=TRUE)
#if (!require('stringr',lib.loc ='~/Rlibs')) install.packages('stringr',lib='~/Rlibs',repos='http://cran.us.r-project.org',dependencies=TRUE)
#if (!require('lubridate',lib.loc ='~/Rlibs')) install.packages('lubridate',lib='~/Rlibs',repos='http://cran.us.r-project.org',dependencies=TRUE)
require(ncdf4,lib.loc ='~/Rlibs')
require(withr, lib.loc='~/Rlibs')
require(ggplot2, lib.loc='~/Rlibs')
require(mgcv)
require(stringr)
require(lubridate)
# Setting some important file paths
path.func <- file.path(wd.base,'functions')
path.in <- file.path(wd.base,'data/paleon_sites')
path.out <- file.path(wd.base,'ensembles')
# Set seed variable for repoducibility
sd = 1159
# Source necessary functions
source(file.path(path.func,"align_met.R"))
source(file.path(path.func,"debias_met_regression.R"))
####################
# Step 2: Bias-correction to generate smooth daily met ensembles
# The end state of this step is continuous, smooth daily output from 850-2010+. The workflow of this step has three main bias-correction steps:
# 1. Debias CRUNCEP data (1 series) using NLDAS training set (1 series) => saves 1901-1979
# 2. Debias GCM historical runs (1 series) using CRUNCEP (n.ens series) => saves 1850-1901
# 3. Debias GCM past millenium (1 series) using GCM historical (n.ens series) => saves 850-1849
# The daily ensembles are saved in: <wd.base>/ensembles/<site.name,vers>/day.
####################
GCM.list=c("MIROC-ESM","MPI-ESM-P", "bcc-csm1-1", "CCSM4")
n.ens=length(ens)
ens.mems=str_pad(ens, 3, "left", pad=0)
# Set up the appropriate seeds to use when adding ensembles
set.seed(sd)
seed.vec <- sample.int(1e6, size=500, replace=FALSE)
seed <- seed.vec[min(ens)]
# This makes sure that if we add ensemble members, it gets a new, but reproducible seed
# Setting up file structure
out.base <- file.path(wd.base, "ensembles", paste0(site.name, vers), "day")
raw.base <- file.path(path.in,site.name)
# -----------------------------------
# Run a loop to do all of the downscaling steps for each GCM and put in one place
# -----------------------------------
for(GCM in GCM.list){
ens.ID=GCM
print(paste("Debiasing GCM",ens.ID))
# Set up a file path for our ensemble to work with now
train.path <- file.path(out.base, "ensembles", GCM)
dir.create(train.path, recursive=TRUE, showWarnings=FALSE)
# --------------------------
# Set up ensemble structure; copy LDAS into ensemble directories
# --------------------------
files.ldas <- dir(file.path(raw.base, "NLDAS_day"))
for(i in 1:n.ens){
# Create a directory for each ensemble member
path.ens <- file.path(train.path, paste(ens.ID, ens.mems[i], sep="_"))
dir.create(path.ens, recursive=TRUE, showWarnings=FALSE)
# Copy LDAS in there with the new name
for(j in 1:length(files.ldas)){
yr <- strsplit(files.ldas[j], "[.]")[[1]][2]
name.new <- paste(ens.ID, ens.mems[i], yr, "nc", sep=".")
cmd.call <- paste("cp", file.path(raw.base, "NLDAS_day", files.ldas[j]),
file.path(path.ens, name.new), sep=" ")
system(cmd.call)
}
}
# --------------------------
# Step 1 :: Debias CRUNCEP using LDAS
# --------------------------
# 1. Align CRU 6-hourly with LDAS daily
source.path <- file.path(raw.base, "CRUNCEP")
print('CRUNCEP with LDAS')
# We're now pulling an ensemble because we've set up the file paths and copied LDAS over
# (even though all ensemble members will be identical here)
met.out <- align.met(train.path, source.path, yrs.train=NULL, yrs.source=NULL, n.ens=n.ens, seed=201708,
pair.mems = FALSE, mems.train=paste(ens.ID, ens.mems, sep="_"),print.progress = FALSE)
# Calculate wind speed if it's not already there
if(!"wind_speed" %in% names(met.out$dat.source)){
met.out$dat.source$wind_speed <- sqrt(met.out$dat.source$eastward_wind^2 + met.out$dat.source$northward_wind^2)
}
# 2. Pass the training & source met data into the bias-correction functions; this will get written to the ensemble
debias.met.regression(train.data=met.out$dat.train, source.data=met.out$dat.source, n.ens=n.ens,
vars.debias=NULL, CRUNCEP=TRUE, pair.anoms = TRUE, pair.ens = FALSE,
uncert.prop="random", resids = FALSE, seed=seed, outfolder=train.path,
yrs.save=NULL, ens.name=ens.ID, ens.mems=ens.mems, lat.in=site.lat, lon.in=site.lon,
save.diagnostics=TRUE, path.diagnostics=file.path(out.base, "bias_correct_qaqc_CRU"),
parallel = FALSE, n.cores = NULL, overwrite = TRUE, verbose = FALSE)
# --------------------------
# Step 2 :: Debias GCM historical runs using CRUNCEP
# --------------------------
# 1. Align GCM daily with our current ensemble
source.path <- file.path(raw.base, GCM, "historical")
print('Historical with CRUNCEP')
# We're now pulling an ensemble because we've set up the file paths and copied LDAS over
# (even though all ensemble members will be identical here)
met.out <- align.met(train.path, source.path, yrs.train=1901:1920, n.ens=n.ens, seed=201708,
pair.mems = FALSE, mems.train=paste(ens.ID, ens.mems, sep="_"))
# Calculate wind speed if it's not already there
if(!"wind_speed" %in% names(met.out$dat.source)){
met.out$dat.source$wind_speed <- sqrt(met.out$dat.source$eastward_wind^2 + met.out$dat.source$northward_wind^2)
}
# With MIROC-ESM, running into problem with NAs in 2005, so lets cut it all at 2000
for(v in names(met.out$dat.source)){
if(v=="time") next
met.out$dat.source[[v]] <- matrix(met.out$dat.source[[v]][which(met.out$dat.source$time$Year<=2000),], ncol=ncol(met.out$dat.source[[v]]))
}
met.out$dat.source$time <- met.out$dat.source$time[met.out$dat.source$time$Year<=2000,]
# 2. Pass the training & source met data into the bias-correction functions; this will get written to the ensemble
debias.met.regression(train.data=met.out$dat.train, source.data=met.out$dat.source, n.ens=n.ens,
vars.debias=NULL, CRUNCEP=FALSE, pair.anoms = FALSE, pair.ens = FALSE,
uncert.prop="random", resids = FALSE, seed=seed,outfolder=train.path,
yrs.save=1850:1900, ens.name=ens.ID, ens.mems=ens.mems, lat.in=site.lat,
lon.in=site.lon, save.diagnostics=TRUE,
path.diagnostics=file.path(out.base, paste0("bias_correct_qaqc_",GCM,"_hist")),
parallel = FALSE, n.cores = NULL, overwrite = TRUE, verbose = FALSE)
# --------------------------
# Step 3 :: Debias GCM past millennium using GCM Historical
# --------------------------
# 1. Align GCM daily with our current ensemble
source.path <- file.path(raw.base, GCM, "p1000")
print('p1000 with historical')
# We're now pulling an ensemble because we've set up the file paths and copied LDAS over
# (even though all ensemble members will be identical here)
met.out <- align.met(train.path, source.path, yrs.train=1850:1900, yrs.source=first.year:1849, n.ens=n.ens,
seed=201708, pair.mems = FALSE, mems.train=paste(ens.ID, ens.mems, sep="_"))
# Calculate wind speed if it's not already there
if(!"wind_speed" %in% names(met.out$dat.source)){
met.out$dat.source$wind_speed <- sqrt(met.out$dat.source$eastward_wind^2 + met.out$dat.source$northward_wind^2)
}
# 2. Pass the training & source met data into the bias-correction functions; this will get written to the ensemble
debias.met.regression(train.data=met.out$dat.train, source.data=met.out$dat.source, n.ens=n.ens,
vars.debias=NULL, CRUNCEP=FALSE, pair.anoms = FALSE, pair.ens = FALSE,
uncert.prop="random", resids = FALSE, seed=seed,
outfolder=train.path, yrs.save=NULL, ens.name=ens.ID, ens.mems=ens.mems,
lat.in=site.lat, lon.in=site.lon, save.diagnostics=TRUE,
path.diagnostics=file.path(out.base, paste0("bias_correct_qaqc_",GCM,"_p1000")),
parallel = FALSE, n.cores = NULL, overwrite = TRUE, verbose = FALSE)
}
if (reject){
####################
# Step 3: Reject bad, outlying ensemble members
# This step removes ensemble members which contain impossible or unlikely values (i.e. values that fall far outside the distributon of values). It moves the bad ensembles to <wd.base>/ensembles/<site.name,vers>/day/rejected where they will not be considered for future steps.
####################
# set up path to good and bad ensembles
path.dat <- file.path(wd.base, "ensembles", paste0(site.name, vers), "day/ensembles/")
path.bad <- file.path(wd.base, "ensembles", paste0(site.name, vers), "day/rejected/")
if(!dir.exists(path.bad)) dir.create(path.bad, recursive = T)
# -----------------------------------
# Get list of GCM and ensemble members and set up data array
# -----------------------------------
GCM.list <- dir(path.dat)
ens.mems <- vector()
n.files <- 0
var.names <- vector()
for(GCM in GCM.list){
ens.now <- dir(file.path(path.dat, GCM))
ens.mems <- c(ens.mems, ens.now)
files.now <- dir(file.path(path.dat, GCM, ens.now[1]))
n.files <- max(n.files, length(files.now))
ncT <- ncdf4::nc_open(file.path(path.dat, GCM, ens.now[1], files.now[1]))
var.names <- names(ncT$var)
ncdf4::nc_close(ncT)
}
# Set up a blank array to store everything in
dat.summary <- array(dim=c(n.files, length(var.names), 2, length(ens.mems)))
# dim[3] == 2 so we can store min/max
dimnames(dat.summary)[[1]] <- seq(1800, length.out=n.files, by=1)
dimnames(dat.summary)[[2]] <- var.names
dimnames(dat.summary)[[3]] <- c("yr.min", "yr.max")
dimnames(dat.summary)[[4]] <- ens.mems
names(dimnames(dat.summary)) <- c("Year", "Var", "max.min", "ensemble.member")
summary(dimnames(dat.summary))
# Loop through and get the summary stats
pb <- txtProgressBar(min=0, max=dim(dat.summary)[1]*dim(dat.summary)[2]*dim(dat.summary)[4], style=3)
pb.ind=1
for(GCM in 1:length(GCM.list)){
ens.gcm <- dir(file.path(path.dat, GCM.list[GCM]))
for(ens in 1:length(ens.gcm)){
ens.ind <- which(ens.mems == ens.gcm[ens])
f.all <- dir(file.path(path.dat, GCM.list[GCM], ens.gcm[ens]))
for(fnow in 1:length(f.all)){
ncT <- ncdf4::nc_open(file.path(path.dat, GCM.list[GCM], ens.gcm[ens], f.all[fnow]))
for(v in 1:length(var.names)){
dat.summary[fnow,v,1,ens.ind] <- min(ncdf4::ncvar_get(ncT, var.names[v]))
dat.summary[fnow,v,2,ens.ind] <- max(ncdf4::ncvar_get(ncT, var.names[v]))
setTxtProgressBar(pb, pb.ind)
pb.ind <- pb.ind+1
}
ncdf4::nc_close(ncT)
}
}
}
# -----------------------------------
# Filter and identify outliers
# -----------------------------------
ens.bad <- array(dim=c(n.files, length(ens.mems)))
dimnames(ens.bad)[[1]] <- dimnames(dat.summary)[[1]]
dimnames(ens.bad)[[2]] <- dimnames(dat.summary)[[4]]
sum.means <- apply(dat.summary[,,,], c(1, 2, 3), FUN=mean)
sum.sd <- apply(dat.summary[,,,], c(1, 2, 3), FUN=sd)
for(i in 1:nrow(ens.bad)){
for(j in 1:ncol(ens.bad)){
vars.bad <- dat.summary[i,,1,j] < sum.means[i,,1] - 6*sum.sd[i,,1] | dat.summary[i,,2,j] > sum.means[i,,2] + 6*sum.sd[i,,2]
if(any(vars.bad)){
ens.bad[i,j] <- length(which(vars.bad))
}
}
}
# Summarizing bad ensembles
yrs.bad <- apply(ens.bad, 1, sum, na.rm=TRUE)
summary(yrs.bad)
mems.bad <- apply(ens.bad, 2, sum, na.rm=TRUE)
length(which(mems.bad==0))/length(mems.bad)
summary(mems.bad)
quantile(mems.bad, 0.90)
# -----------------------------------
# Move the bad ensemble members
# -----------------------------------
mems.bad[mems.bad>0]
for(mem in names(mems.bad[mems.bad>0])){
GCM <- stringr::str_split(mem, "_")[[1]][1]
system(paste("mv", file.path(path.dat, GCM, mem), file.path(path.bad, mem), sep=" "))
}
#Step 4: Generate figures to visually check debiased data
#This step generates QAQC for generated ensembles. These figures should be checked to ensure that the means and variances look more or less OK.
# variables to maintain: wd.base, path.func, site.name, vers, site.lat, site.lon, align.met function,
GCM.list <- c("bcc-csm1-1", "CCSM4", "MIROC-ESM", "MPI-ESM-P")
#GCM.list <- c("MIROC-ESM")
# Setting up some file paths, etc
path.raw.base <- file.path(wd.base, "data/paleon_sites", site.name)
path.day.base <- file.path(wd.base, "ensembles", paste0(site.name, vers), "day")
# defining some variable names
vars.CF <- c("air_temperature_minimum", "air_temperature_maximum", "precipitation_flux",
"surface_downwelling_shortwave_flux_in_air", "surface_downwelling_longwave_flux_in_air",
"air_pressure", "specific_humidity", "wind_speed")
vars.short <- c("tair.min", "tair.max", "precip", "swdown", "lwdown", "press", "qair", "wind")
# -----------------------------------
# 1. Read in met data
# -----------------------------------
# Use the align.met funciton to get everything harmonized
#source("~/Desktop/pecan/modules/data.atmosphere/R/align_met.R")
# ---------
# 1.1. Raw Data
# ---------
# Do this once with NLDAS and CRUNCEP
met.base <- align.met(train.path=file.path(path.raw.base, "NLDAS_day"),
source.path = file.path(path.raw.base, "CRUNCEP"), n.ens=1, seed=20170905)
met.raw <- data.frame(met.base$dat.train$time)
met.raw$dataset <- "NLDAS"
met.raw$tair.min <- met.base$dat.train$air_temperature_minimum[,1]
met.raw$tair.max <- met.base$dat.train$air_temperature_maximum[,1]
met.raw$precip <- met.base$dat.train$precipitation_flux[,1]
met.raw$swdown <- met.base$dat.train$surface_downwelling_shortwave_flux_in_air[,1]
met.raw$lwdown <- met.base$dat.train$surface_downwelling_longwave_flux_in_air[,1]
met.raw$press <- met.base$dat.train$air_pressure[,1]
met.raw$qair <- met.base$dat.train$specific_humidity[,1]
met.raw$wind <- met.base$dat.train$wind_speed[,1]
met.tmp <- data.frame(met.base$dat.source$time)
met.tmp$dataset <- "CRUNCEP"
met.tmp$tair.min <- met.base$dat.source$air_temperature_minimum[,1]
met.tmp$tair.max <- met.base$dat.source$air_temperature_maximum[,1]
met.tmp$precip <- met.base$dat.source$precipitation_flux[,1]
met.tmp$swdown <- met.base$dat.source$surface_downwelling_shortwave_flux_in_air[,1]
met.tmp$lwdown <- met.base$dat.source$surface_downwelling_longwave_flux_in_air[,1]
met.tmp$press <- met.base$dat.source$air_pressure[,1]
met.tmp$qair <- met.base$dat.source$specific_humidity[,1]
met.tmp$wind <- sqrt(met.base$dat.source$eastward_wind[,1]^2 + met.base$dat.source$northward_wind[,1]^2)
met.raw <- rbind(met.raw, met.tmp)
# Loop through the GCMs to extract
for(GCM in GCM.list){
for(experiment in c("historical", "p1000")){
if(experiment == "p1000"){
met.base <- align.met(train.path=file.path(path.raw.base, "NLDAS_day"),
source.path = file.path(path.raw.base, GCM, experiment), yrs.source=1800:1849,
n.ens=1, seed=20170905, pair.mems = FALSE)
} else {
met.base <- align.met(train.path=file.path(path.raw.base, "NLDAS_day"),
source.path = file.path(path.raw.base, GCM, experiment), yrs.source=NULL, n.ens=1,
seed=20170905, pair.mems = FALSE)
}
met.tmp <- data.frame(met.base$dat.source$time)
met.tmp$dataset <- paste(GCM, experiment, sep=".")
met.tmp$tair.min <- met.base$dat.source$air_temperature_minimum[,1]
met.tmp$tair.max <- met.base$dat.source$air_temperature_maximum[,1]
met.tmp$precip <- met.base$dat.source$precipitation_flux[,1]
met.tmp$swdown <- met.base$dat.source$surface_downwelling_shortwave_flux_in_air[,1]
met.tmp$lwdown <- met.base$dat.source$surface_downwelling_longwave_flux_in_air[,1]
met.tmp$press <- met.base$dat.source$air_pressure[,1]
met.tmp$qair <- met.base$dat.source$specific_humidity[,1]
if("wind_speed" %in% names(met.base$dat.source)){
met.tmp$wind <- met.base$dat.source$wind_speed[,1]
} else {
met.tmp$wind <- sqrt(met.base$dat.source$eastward_wind[,1]^2 + met.base$dat.source$northward_wind[,1]^2)
}
met.raw <- rbind(met.raw, met.tmp)
} # End experiment loop
} # end GCM loop
# ---------
# 1.2. Bias-Corrected data
# ---------
met.bias <- list()
for(GCM in GCM.list){
print(GCM)
met.base <- align.met(train.path=file.path(path.raw.base, "NLDAS_day"),
source.path = file.path(path.day.base, "ensembles", GCM), n.ens=n.ens, pair.mems=FALSE,
seed=201709)
met.tmp <- list()
met.tmp$mean <- data.frame(met.base$dat.source$time)
met.tmp$mean$dataset <- GCM
met.tmp$mean$tair.min <- apply(met.base$dat.source$air_temperature_minimum, 1, mean, na.rm=TRUE)
met.tmp$mean$tair.max <- apply(met.base$dat.source$air_temperature_maximum, 1, mean, na.rm=TRUE)
met.tmp$mean$precip <- apply(met.base$dat.source$precipitation_flux , 1, mean, na.rm=TRUE)
met.tmp$mean$swdown <- apply(met.base$dat.source$surface_downwelling_shortwave_flux_in_air, 1, mean,
na.rm=TRUE)
met.tmp$mean$lwdown <- apply(met.base$dat.source$surface_downwelling_longwave_flux_in_air , 1, mean,
na.rm=TRUE)
met.tmp$mean$press <- apply(met.base$dat.source$air_pressure , 1, mean, na.rm=TRUE)
met.tmp$mean$qair <- apply(met.base$dat.source$specific_humidity , 1, mean, na.rm=TRUE)
met.tmp$mean$wind <- apply(met.base$dat.source$wind_speed , 1, mean, na.rm=TRUE)
met.tmp$lwr <- data.frame(met.base$dat.source$time)
met.tmp$lwr$dataset <- GCM
met.tmp$lwr$tair.min <- apply(met.base$dat.source$air_temperature_minimum, 1, quantile, 0.025, na.rm=TRUE)
met.tmp$lwr$tair.max <- apply(met.base$dat.source$air_temperature_maximum, 1, quantile, 0.025, na.rm=TRUE)
met.tmp$lwr$precip <- apply(met.base$dat.source$precipitation_flux , 1, quantile, 0.025, na.rm=TRUE)
met.tmp$lwr$swdown <- apply(met.base$dat.source$surface_downwelling_shortwave_flux_in_air, 1, quantile,
0.025, na.rm=TRUE)
met.tmp$lwr$lwdown <- apply(met.base$dat.source$surface_downwelling_longwave_flux_in_air , 1, quantile,
0.025, na.rm=TRUE)
met.tmp$lwr$press <- apply(met.base$dat.source$air_pressure , 1, quantile, 0.025, na.rm=TRUE)
met.tmp$lwr$qair <- apply(met.base$dat.source$specific_humidity , 1, quantile, 0.025, na.rm=TRUE)
met.tmp$lwr$wind <- apply(met.base$dat.source$wind_speed , 1, quantile, 0.025, na.rm=TRUE)
met.tmp$upr <- data.frame(met.base$dat.source$time)
met.tmp$upr$dataset <- GCM
met.tmp$upr$tair.min <- apply(met.base$dat.source$air_temperature_minimum, 1, quantile, 0.975, na.rm=TRUE)
met.tmp$upr$tair.max <- apply(met.base$dat.source$air_temperature_maximum, 1, quantile, 0.975, na.rm=TRUE)
met.tmp$upr$precip <- apply(met.base$dat.source$precipitation_flux , 1, quantile, 0.975, na.rm=TRUE)
met.tmp$upr$swdown <- apply(met.base$dat.source$surface_downwelling_shortwave_flux_in_air, 1, quantile,
0.975, na.rm=TRUE)
met.tmp$upr$lwdown <- apply(met.base$dat.source$surface_downwelling_longwave_flux_in_air, 1, quantile,
0.975, na.rm=TRUE)
met.tmp$upr$press <- apply(met.base$dat.source$air_pressure , 1, quantile, 0.975, na.rm=TRUE)
met.tmp$upr$qair <- apply(met.base$dat.source$specific_humidity , 1, quantile, 0.975, na.rm=TRUE)
met.tmp$upr$wind <- apply(met.base$dat.source$wind_speed , 1, quantile, 0.975, na.rm=TRUE)
if(length(met.bias)==0){
met.bias <- met.tmp
} else {
met.bias$mean <- rbind(met.bias$mean, met.tmp$mean)
met.bias$lwr <- rbind(met.bias$lwr , met.tmp$lwr )
met.bias$upr <- rbind(met.bias$upr , met.tmp$upr )
}
}
# -----------------------------------
# 2. QAQC graphing
# -----------------------------------
met.bias.yr.mean <- aggregate(met.bias$mean[,vars.short], by=met.bias$mean[,c("Year", "dataset")], FUN=mean)
met.bias.yr.lwr <- aggregate(met.bias$lwr [,vars.short], by=met.bias$lwr [,c("Year", "dataset")], FUN=mean)
met.bias.yr.upr <- aggregate(met.bias$upr [,vars.short], by=met.bias$upr [,c("Year", "dataset")], FUN=mean)
summary(met.bias.yr.mean)
# Stacking everything together
met.bias.yr <- stack(met.bias.yr.mean[,vars.short])
names(met.bias.yr) <- c("mean", "met.var")
met.bias.yr[,c("Year", "dataset")] <- met.bias.yr.mean[,c("Year", "dataset")]
met.bias.yr$lwr <- stack(met.bias.yr.lwr[,vars.short])[,1]
met.bias.yr$upr <- stack(met.bias.yr.upr[,vars.short])[,1]
summary(met.bias.yr)
# Raw met
met.raw.yr1 <- aggregate(met.raw[,vars.short], by=met.raw[,c("Year", "dataset")], FUN=mean)
met.raw.yr1$dataset2 <- as.factor(met.raw.yr1$dataset)
for(i in 1:nrow(met.raw.yr1)){
met.raw.yr1[i,"dataset"] <- stringr::str_split(met.raw.yr1[i,"dataset2"], "[.]")[[1]][1]
}
met.raw.yr1$dataset <- as.factor(met.raw.yr1$dataset)
summary(met.raw.yr1)
met.raw.yr <- stack(met.raw.yr1[,vars.short])
names(met.raw.yr) <- c("raw", "met.var")
met.raw.yr[,c("Year", "dataset", "dataset2")] <- met.raw.yr1[,c("Year", "dataset", "dataset2")]
summary(met.raw.yr)
png(file.path(path.day.base, "Raw_Annual.png"), height=8, width=10, units="in", res=220)
print(
ggplot(data=met.raw.yr[,]) + facet_wrap(~met.var, scales="free_y") +
geom_path(aes(x=Year, y=raw, color=dataset, group=dataset2), size=0.5) +
geom_vline(xintercept=c(1850, 1901, 2010), linetype="dashed") +
scale_x_continuous(expand=c(0,0)) +
theme_bw()
)
dev.off()
png(file.path(path.day.base, "Debias_Annual.png"), height=8, width=10, units="in", res=220)
print(
ggplot(data=met.bias.yr[, ]) + facet_wrap(~met.var, scales="free_y") +
geom_ribbon(aes(x=Year, ymin=lwr, ymax=upr, fill=dataset), alpha=0.5) +
geom_path(aes(x=Year, y=mean, color=dataset), size=0.5) +
geom_vline(xintercept=c(1850, 1901, 2010), linetype="dashed") +
scale_x_continuous(expand=c(0,0)) +
theme_bw()
)
dev.off()
# Save the summaries of the raw and bias-corrected data to quickly make some customized graphs elsewhere
write.csv(met.raw.yr, file.path(path.day.base, "Met_Raw_Annual.csv" ), row.names=FALSE)
write.csv(met.bias.yr, file.path(path.day.base, "Met_Corrected_Annual.csv"), row.names=FALSE)
# Looking at the seasonal cycle
met.bias.doy.mean <- aggregate(met.bias$mean[,vars.short], by=met.bias$mean[,c("DOY", "dataset")], FUN=mean,
na.rm=TRUE)
met.bias.doy.lwr <- aggregate(met.bias$lwr [,vars.short], by=met.bias$lwr [,c("DOY", "dataset")], FUN=mean,
na.rm=TRUE)
met.bias.doy.upr <- aggregate(met.bias$upr [,vars.short], by=met.bias$upr [,c("DOY", "dataset")], FUN=mean,
na.rm=TRUE)
summary(met.bias.doy.mean)
# Stacking everything together
met.bias.doy <- stack(met.bias.doy.mean[,vars.short])
names(met.bias.doy) <- c("mean", "met.var")
met.bias.doy[,c("DOY", "dataset")] <- met.bias.doy.mean[,c("DOY", "dataset")]
met.bias.doy$lwr <- stack(met.bias.doy.lwr[,vars.short])[,1]
met.bias.doy$upr <- stack(met.bias.doy.upr[,vars.short])[,1]
summary(met.bias.doy)
# met.raw$dataset <- as.character(met.raw$dataset2)
met.raw.doy1 <- aggregate(met.raw[,vars.short], by=met.raw[,c("DOY", "dataset")], FUN=mean, na.rm=TRUE)
met.raw.doy1$dataset2 <- as.factor(met.raw.doy1$dataset)
for(i in 1:nrow(met.raw.doy1)){
met.raw.doy1[i,"dataset"] <- stringr::str_split(met.raw.doy1[i,"dataset2"], "[.]")[[1]][1]
}
met.raw.doy1$dataset <- as.factor(met.raw.doy1$dataset)
met.raw.doy <- stack(met.raw.doy1[,vars.short])
names(met.raw.doy) <- c("raw", "met.var")
met.raw.doy[,c("DOY", "dataset", "dataset2")] <- met.raw.doy1[,c("DOY", "dataset", "dataset2")]
summary(met.raw.doy)
summary(met.raw.doy1)
summary(met.bias.doy.mean)
png(file.path(path.day.base, "Raw_DOY.png"), height=8, width=10, units="in", res=220)
print(
ggplot(data=met.raw.doy[,]) + facet_wrap(~met.var, scales="free_y") +
geom_path(data=met.raw.doy[met.raw.doy$dataset=="NLDAS",], aes(x=DOY, y=raw), color="black", size=1) +
geom_path(data=met.raw.doy[met.raw.doy$dataset!="NLDAS",], aes(x=DOY, y=raw, color=dataset, group=dataset2), size=0.5) +
scale_x_continuous(expand=c(0,0)) +
theme_bw()
)
dev.off()
png(file.path(path.day.base, "Debias_DOY.png"), height=8, width=10, units="in", res=220)
print(
ggplot(data=met.bias.doy[, ]) + facet_wrap(~met.var, scales="free_y") +
geom_path(data=met.raw.doy[met.raw.doy$dataset=="NLDAS",], aes(x=DOY, y=raw), color="black", size=1) +
geom_ribbon(aes(x=DOY, ymin=lwr, ymax=upr, fill=dataset), alpha=0.5) +
geom_path(aes(x=DOY, y=mean, color=dataset), size=0.5) +
# geom_vline(xintercept=c(1850, 1901, 2010), linetype="dashed") +
scale_x_continuous(expand=c(0,0)) +
theme_bw()
)
dev.off()
# Save the summaries of the raw and bias-corrected data to quickly make some customized graphs elsewhere
write.csv(met.raw.doy , file.path(path.day.base, "Met_Raw_DOY.csv" ), row.names=FALSE)
write.csv(met.bias.doy, file.path(path.day.base, "Met_Corrected_DOY.csv"), row.names=FALSE)
}
|
da6e1ce3a809ca47c56810e89581f5b096844d10 | 6218f5b9f6a841a87dbb8785e588f876b804cf23 | /R/nrawrap.R | d56d7ea79fb2925df618bda37fb3c0108e76a23e | [] | no_license | Rapporteket/nra | 60c414c56d2ddfb62382ca071fad44bdb73b7e03 | 8b7da276b7dd05af60ad24472769d3e0a515f5d2 | refs/heads/rel | 2023-06-07T15:31:26.951578 | 2023-05-31T14:01:46 | 2023-05-31T14:01:46 | 57,379,519 | 0 | 0 | null | 2020-01-24T11:06:18 | 2016-04-29T11:29:38 | R | UTF-8 | R | false | false | 503 | r | nrawrap.R | #' Automatisk linjebryting av lange tekstetiketter
#'
#' Automatisk linjebryting av lange tekstetiketter
#'
#' Her kan detaljer skrives
#'
#' @param x En tekststreng eller vektor av tekststrenger
#' @param len Lengden strengen skal brytes ved
#'
#' @return En tekststreng med tekstbrudd på angitt lengde
#'
#' @export
#'
# Core wrapping function
wrap.it <- function(x, len)
{
sapply(x, function(y) paste(strwrap(y, len),
collapse = "\n"),
USE.NAMES = FALSE)
}
|
8b45ccb3788dfea56251285f86a670d1a8253139 | e5f84c07e4b229502c67fff9ced2fa29420d5777 | /Clean_Data.R | bef9fa755cdeca8bbfcd0e2ba17b25689eefba2d | [] | no_license | paurabassa/storm-generation | cd648f4ef868f312f48fb909b4aa4971a4760804 | a7ace284d5be9dbf7830f1d984e5492c25042f0c | refs/heads/master | 2021-01-22T17:22:51.416127 | 2015-04-20T18:28:37 | 2015-04-20T18:28:37 | 31,430,642 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,531 | r | Clean_Data.R | #
#
# R file to collect, clean and save relevant wave and tide data for its analysis.
# several files are generated and stored a Data.Processed/
#
#
#read wave hindcast data
hcast.all <- read.csv("~/Post-Doc-LDN/Storm-Generation/Data.Raw/data.csv")
#str(hcast)
hcast.all$Date <- as.POSIXct(strptime(hcast.all$Date, "%Y-%m-%d %H:%M:%S", "GMT"))
waves <- hcast.all[!is.na(hcast.all$hs),c("Date","hs", "fp", "tm", "dir", "U10", "V10")]
#str(waves)
# this data is every 3 hours at the begining and the every hour at some point
# the following lines is to split the data into two files with uniform timescale
t1 <- min(which(as.logical(as.POSIXlt(waves[,1])$hour %%3)))
t.hourly <-(t1-1):length(waves$Date)
laux <- length(t.hourly)/3
t.3hours <- c(1:(t1-1),(t1-1)+3*(1:(laux-1)))
#which(as.logical(as.POSIXlt(hcast[t.3hours,1])$hour %%3))
write.csv(waves, file="./Data.Processed/waves-all.csv", row.names=F, quote=F)
write.csv(waves[t.hourly,], file="./Data.Processed/waves-hourly.csv", row.names=F, quote=F)
write.csv(waves[t.3hours,], file="./Data.Processed/waves-3hours.csv", row.names=F, quote=F)
#
#read residual tides and put the values into a vector.
res <- read.table("~/Post-Doc-LDN/Storm-Generation/Data.Raw/resid.dat")
M <- data.matrix(res)
tl <- length(res$V1)
v.res <- vector("numeric",tl*12)
for (i in 1:tl){
for(j in 1:12){ v.res[12*(i-1)+j]=M[i,j] }
}
#str(v.res)
#plot(v.res, typ='l')
#read total tides and put the values into a vector.
tot <- read.table("~/Post-Doc-LDN/Storm-Generation/Data.Raw/total.dat")
M <- data.matrix(tot)
tl <- length(tot$V1)
v.tot <- vector("numeric",tl*12)
for (i in 1:tl){
for(j in 1:12){ v.tot[12*(i-1)+j]=M[i,j] }
}
#str(v.tot)
#plot(v.tot, typ='l')
# Create a data.frame of tides with the corresponding time
tm <- as.POSIXct(strptime("0101199200", "%d%m%Y%H", "GMT")) + 3600*(0:(length(v.res)-1))
tides <- data.frame(tm, v.tot, v.res, v.tot-v.res)
names(tides) <-c("Date", "t.tide", "res", "a.tide")
write.csv(tides, file="./Data.Processed/tides-all.csv", row.names=F, quote=F)
#
# combine both files
#
climate <- merge(waves, tides, by="Date")
# the resulting merged set is also sampled every 3 hours at the begining
# and then every hour at some point
# the following lines is to split the data into two files with uniform timescale
t1 <- min(which(as.logical(as.POSIXlt(climate[,1])$hour %%3)))
t.hourly <-(t1-1):length(climate$Date)
laux <- length(t.hourly)/3
t.3hours <- c(1:(t1-1),(t1-1)+3*(1:(laux-1)))
write.csv(climate, file = "./Data.Processed/clim-all.csv", row.names=F, quote=F)
write.csv(climate[t.hourly,], file = "./Data.Processed/clim-hourly.csv", row.names=F, quote=F)
write.csv(climate[t.3hours,], file = "./Data.Processed/clim-3hours.csv", row.names=F, quote=F)
# read future tides
con <- file("./Data.Raw/6000591_2015-2064.txt")
lines <- readLines(con)
close(con)
#which(nchar(lines)!=69)
lines2 <- lines[c(6:350645, 350651:438322)] #this is just because the original
#file has a header that hasn't been removed when concatenating files
ff <- tempfile()
cat(file = ff, lines2, sep = "\n")
ftides <- read.fwf(ff, widths = c(18,10,10,10,12,12)) #
#str(ftides)
ftides$V1 <- as.POSIXct(strptime(as.character(ftides$V1), "%d/%m/%Y %H:%M", "GMT"))
names(ftides)[1] <- "Date"
names(ftides)[2] <- "Level"
names(ftides)[3] <- "Speed"
names(ftides)[4] <- "Direc"
names(ftides)[5] <- "U-Comp"
names(ftides)[6] <- "V-Comp"
write.csv(ftides, file = "./Data.Processed/future-tides.csv",row.names=F, quote=F)
#library(copula)
|
42a17d7c66ab49ef6a2ce462b9dda4a63790d6ac | c3ed0eea77de3338cc4820ca27dc04384676b29b | /R/functions_path.R | 9a3e17e347e98455c65930d76690d427da2e57de | [] | no_license | jrboyd/seqtsne | f3bd94ee23140dc71373af4f1e7ce9ffe60d702f | 5a67cbe5af281ec42906689d1a9961d8fe9de68d | refs/heads/master | 2022-10-30T18:33:13.005047 | 2022-10-28T20:22:58 | 2022-10-28T20:22:58 | 177,857,441 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,978 | r | functions_path.R | #' plot_path
#'
#' Traces a path through the t-sne space in tall_var line order defined by qtall_vars
#' for ids in id_to_plot
#'
#' Good for looking at a small number of ids in a modest number of tall_vars.
#'
#' @param tsne_dt data.table with tsne info (tx, ty, id, and tall_var)
#' @param qtall_vars character vector of items in tsne_dt$tall_var
#' @param id_to_plot character vector of ids in tsne_dt$id
#' @param p exiting ggplot to add a layer onto. Default of NULL
#' creates a new ggplot.
#' @param xrng numeric of length 2. passed to coord_cartesian xlim. Not used if p is specified. Default is c(-.5, .5).
#' @param yrng numeric of length 2. passed to coord_cartesian ylim. Not used if p is specified. Default is c(-.5, .5).
#' @param arrowhead_position character, must be one of "each" or "end".
#' Determines if arrowheads are drawn for each segment or only on the final
#' segment.
#' @param line_type character vector describing type of line to connect qtall_vars.
#' One of : curve, spline, or straight
#' @param label_type character vector describing labelling method for points
#' along lines. One of : text, label, or none.
#' @param bg_points number of background id points to plot.
#' @param arrow_FUN result of grid::arrow(). Default of NULL does not draw arrowheads.
#'
#' @return ggplot showing how individual ids behave across qtall_vars.
#' @importFrom stats spline
#' @importFrom ggrepel geom_text_repel geom_label_repel
#'
#' @examples
#' data(tsne_dt)
#' plot_path(tsne_dt, unique(tsne_dt$tall_var), unique(tsne_dt$id)[1:3])
#' plot_path(tsne_dt, unique(tsne_dt$tall_var), unique(tsne_dt$id)[1:3],
#' arrowhead_position = "each", label_type = "none")
#' plot_path(tsne_dt, unique(tsne_dt$tall_var), unique(tsne_dt$id)[1:3],
#' arrowhead_position = "end", label_type = "none", line_type = "spline",
#' arrow_FUN = arrow())
plot_path = function(tsne_dt,
qtall_vars,
id_to_plot,
p = NULL,
xrng = c(-.5, .5),
yrng = c(-.5, .5),
arrowhead_position = c("end", "each")[1],
line_type = c("curve", "spline", "straight")[2],
label_type = c("text", "label", "none")[2],
bg_points = 5000,
arrow_FUN = NULL) {
stopifnot(qtall_vars %in% unique(tsne_dt$tall_var))
stopifnot(arrowhead_position %in% c("end", "each"))
stopifnot(line_type %in% c("curve", "spline", "straight"))
stopifnot(label_type %in% c("text", "label", "none"))
stopifnot(id_to_plot %in% tsne_dt$id)
lines_dt = tsne_dt[tall_var %in% qtall_vars & id %in% id_to_plot]
lines_dt$tall_var = factor(lines_dt$tall_var, levels = qtall_vars)
lines_dt = lines_dt[order(tall_var)][order(id)][]
lines_dt[, tall_var_o := seq(.N), by = list(id)]
# lines_dt
if(is.null(p)){
p = ggplot() +
geom_point(data = tsne_dt[sampleCap(seq(nrow(tsne_dt)), bg_points), ],
aes(x = tx, y = ty), color = "gray") +
labs(title = paste(qtall_vars, collapse = ", ")) +
theme_classic() +
scale_color_brewer(palette = "Dark2") +
coord_cartesian(xlim = xrng, ylim = yrng)
}
switch(line_type,
curve = {
# plot_dt = merge(lines_dt[seq_along(qtall_vars)[-length(qtall_vars)], list(tx, ty, id, tall_var_o)],
# lines_dt[seq_along(qtall_vars)[-1], list(tx_end = tx,
# ty_end = ty,
# id,
# tall_var_o = tall_var_o - 1)])
plot_dt = merge(lines_dt[tall_var_o != length(qtall_vars), list(tx, ty, id, tall_var_o)],
lines_dt[tall_var_o != 1, list(tx_end = tx,
ty_end = ty,
id,
tall_var_o = tall_var_o - 1)])
switch(arrowhead_position,
each = {
p = p +
geom_curve(
data = plot_dt,
aes(
x = tx,
y = ty,
xend = tx_end,
yend = ty_end,
color = id
),
size = 1,
arrow = arrow_FUN
)
},
end = {
p = p +
geom_curve(
data = plot_dt[tall_var_o < max(tall_var_o)],
aes(
x = tx,
y = ty,
xend = tx_end,
yend = ty_end,
color = id
),
size = 1
) +
geom_curve(
data = plot_dt[tall_var_o == max(tall_var_o)],
aes(
x = tx,
y = ty,
xend = tx_end,
yend = ty_end,
color = id
),
size = 1,
arrow = arrow_FUN
)
})
},
spline = {
n = 20
sp_y = lines_dt[, stats::spline(x = tall_var_o,
y = ty,
n = n * (length(qtall_vars) - 1)), by = id][, list(pid = seq(.N), ty = y), by = list(id)]
sp_x = lines_dt[, stats::spline(x = tall_var_o,
y = tx,
n = n * (length(qtall_vars) - 1)), by = id][, list(pid = seq(.N), tx = y), by = list(id)]
sp_dt = merge(sp_x, sp_y, by = c("id", "pid"))
ceiling(sp_dt$pid / n)
sp_dt[, grp := ceiling(pid / n)]
sp_dt[, grp_o := seq(.N), by = list(grp, id)]
start_dt = merge(lines_dt[tall_var_o < length(qtall_vars), list(tx, ty, grp = tall_var_o, id)],
unique(sp_dt[, list(id, grp)]))[, grp_o := 0]
end_dt = merge(lines_dt[tall_var_o > 1 &
tall_var_o < length(qtall_vars), list(tx, ty, grp = tall_var_o - 1, id)],
unique(sp_dt[, list(id, grp = grp)]))[, grp_o := n +
1]
plot_dt = rbind(sp_dt[, list(grp, id, tx, ty, grp_o)],
start_dt,
end_dt)[order(grp_o)][order(id)][order(grp)]
switch(arrowhead_position,
each = {
p = p +
geom_path(
data = plot_dt,
aes(
x = tx,
y = ty,
color = id,
group = paste(grp, id)
),
arrow = arrow_FUN,
size = 1.2,
alpha = 1,
show.legend = FALSE
)
},
end = {
p = p +
geom_path(
data = plot_dt,
aes(
x = tx,
y = ty,
color = id,
group = id
),
arrow = arrow_FUN,
size = 1.2,
alpha = 1,
show.legend = FALSE
)
})
},
straight = {
switch(arrowhead_position,
each = {
plot_dt = merge(lines_dt[tall_var_o != length(qtall_vars), list(tx, ty, id, tall_var_o)],
lines_dt[tall_var_o != 1, list(tx_end = tx,
ty_end = ty,
id,
tall_var_o = tall_var_o - 1)])
p = p +
geom_segment(
data = plot_dt,
aes(
x = tx,
y = ty,
xend = tx_end,
yend = ty_end,
color = id,
group = id
),
size = 1,
arrow = arrow_FUN
)
},
end = {
plot_dt = lines_dt
p = p + geom_path(data = plot_dt,
aes(x = tx, y = ty, color = id, group = id),
arrow = arrow_FUN)
})
})
p = p + geom_point(
data = lines_dt,
aes(x = tx, y = ty, color = id),
size = 3,
shape = 21,
fill = "white"
)
switch(label_type,
text = {
p = p + ggrepel::geom_text_repel(
data = lines_dt,
aes(
x = tx,
y = ty,
color = id,
label = tall_var
),
show.legend = FALSE
)
},
label = {
p = p + ggrepel::geom_label_repel(
data = lines_dt,
aes(
x = tx,
y = ty,
color = id,
label = tall_var
),
fill = "white",
show.legend = FALSE
)
},
none = {
p = p
})
p
}
#' plot_outline
#'
#' a ggplot where the position of id in every tall_var specified by qtall_vars is
#' connected in a polygon. Allows the identification of both regions where ids
#' are stable/dynamic and individual ids that are particularly dynamic.
#'
#' Good for looking at large numbers of ids with a modest number of tall_vars.
#'
#' @param tsne_dt data.table with tsne info (tx, ty, id, and tall_var)
#' @param qtall_vars character vector of items in tsne_dt$tall_var
#' @param id_to_plot character vector of ids in tsne_dt$id
#' @param p exiting ggplot to add a layer onto. Default of NULL creates a new
#' ggplot.
#' @param xrng numeric of length 2. passed to coord_cartesian xlim. Not used if
#' p is specified. Default is c(-.5, .5).
#' @param yrng numeric of length 2. passed to coord_cartesian ylim. Not used if
#' p is specified. Default is c(-.5, .5).
#' @param bg_color character. color to use for background points. Default is
#' "gray"
#' @param line_color_mapping character that is valid color. If less than length
#' of id_to_plot, recycled across specified id_to_plot. Can be named vector
#' to completely specify id_to_plot.
#' @param fill_color_mapping character that is valid color. If less than length
#' of id_to_plot, recycled across specified id_to_plot. Can be named vector
#' to completely specify id_to_plot.
#' @param label_type character. one of c("text", "label", "none"). controls
#' how, if at all, plot objects are labelled.
#' @param bg_points number of points to plot in background. if 0, only points
#' corresponding to id_to_plot are drawn. if -1, no points at all are drawn.
#' @param arrow_FUN result of grid::arrow(). Default of NULL does not draw arrowheads.
#'
#' @return a ggplot
#' @importFrom grDevices chull
#'
#' @examples
#' data(tsne_dt)
#' plot_outline(tsne_dt, unique(tsne_dt$tall_var), unique(tsne_dt$id)[1:3])
#' plot_outline(tsne_dt, unique(tsne_dt$tall_var), unique(tsne_dt$id)[1:3],
#' label_type = "none")
#' plot_outline(tsne_dt, unique(tsne_dt$tall_var), unique(tsne_dt$id)[1:3],
#' label_type = "label")
plot_outline = function(tsne_dt,
qtall_vars,
id_to_plot = NULL,
p = NULL,
xrng = c(-.5, .5),
yrng = c(-.5, .5),
bg_color = "gray",
line_color_mapping = "black",
fill_color_mapping = "gray",
label_type = c("text", "label", "none")[3],
bg_points = 5000,
arrow_FUN = NULL) {
stopifnot(qtall_vars %in% unique(tsne_dt$tall_var))
if(is.numeric(label_type)){
label_type = c("text", "label", "none")[label_type]
}
if(is.null(id_to_plot)){
id_to_plot = unique(tsne_dt$id)
}
stopifnot(id_to_plot %in% tsne_dt$id)
lines_dt = tsne_dt[tall_var %in% qtall_vars & id %in% id_to_plot]
lines_dt$tall_var = factor(lines_dt$tall_var, levels = qtall_vars)
lines_dt = lines_dt[order(tall_var)][order(id)]
lo = (seq(id_to_plot) %% length(line_color_mapping))+1
line_color_mapping = line_color_mapping[lo]
names(line_color_mapping) = id_to_plot
fo = (seq(id_to_plot) %% length(fill_color_mapping))+1
fill_color_mapping = fill_color_mapping[fo]
names(fill_color_mapping) = id_to_plot
# lines_dt
if(bg_points < 0){
id_tp = character()
}else if(bg_points == 0){
id_tp = id_to_plot
}else{
id_tp = sampleCap(unique(tsne_dt$id), bg_points)
id_tp = union(id_tp, id_to_plot)
}
if(is.null(p)){
p = ggplot() +
labs(title = paste(qtall_vars, collapse = ", ")) +
theme_classic() +
coord_cartesian(xlim = xrng, ylim = yrng)
}
p = p +
annotate("point",
x = tsne_dt[id %in% id_tp,]$tx,
y = tsne_dt[id %in% id_tp,]$ty,
color = bg_color)
ch_dt = lines_dt[, .(ch_i = grDevices::chull(tx, ty)), .(id)]
lines_dt[, ch_i := seq(.N), by = .(id)]
ch_res = lines_dt[, .(ch_i = grDevices::chull(tx, ty)), by = .(id)]
ch_res$o = seq(nrow(ch_res))
poly_dt = merge(lines_dt, ch_res)
poly_dt = poly_dt[order(o)]
for(tid in unique(poly_dt$id)){
p = p +
annotate("polygon",
x = poly_dt[id == tid]$tx,
y = poly_dt[id == tid]$ty,
color = line_color_mapping[tid],
fill = fill_color_mapping[tid])
}
lab_dt = lines_dt[, .(tx = mean(tx), ty = mean(ty)), by = .(id)]
switch(label_type,
text = {
p = p + ggrepel::geom_text_repel(
data = lab_dt,
aes(
x = tx,
y = ty,
label = id
),
color = "black",
show.legend = FALSE
)
},
label = {
p = p + ggrepel::geom_label_repel(
data = lab_dt,
aes(
x = tx,
y = ty,
label = id
),
color = "black",
fill = "white",
show.legend = FALSE
)
},
none = {
p = p
})
p
}
|
13e3e33a21baa501c593e8f9ff0a15e7a75a5202 | 7f86f568dab6279e6f2d987c77a023bed055a11c | /man/data_FrenchPeregrines.Rd | 100ca39c296c12f0980cf99e79f493b67233755c | [] | no_license | cran/AHMbook | b6acd2ed71319be2f0e3374d9d8960a8b04e21bf | d8f8ad8bef93120f187bef494b9ac1ad8200c530 | refs/heads/master | 2023-08-31T21:13:00.618018 | 2023-08-23T21:10:03 | 2023-08-23T22:30:32 | 88,879,777 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,576 | rd | data_FrenchPeregrines.Rd | \name{FrenchPeregrines}
\alias{FrenchPeregrines}
\encoding{UTF-8}
\docType{data}
\title{
Data for observations of peregrines from the French Jura mountains
}
\description{
The data are detection/nondetection data of the Peregrine Falcon (\emph{Falco peregrinus}) from the wild and wonderful French Jura between 1964 and 2016 for 284 cliff sites (= territories, or sites in the context of a site-occupancy model) where a pair had been detected at least once in this period. A large proportion of sites are visited multiple times per year, but unfortunately only the aggregate results are available in each year, i.e., whether a pair was detected at least once, or never.
}
\usage{data("FrenchPeregrines")}
\format{
\code{FrenchPeregrines} is a data frame with 284 rows and 56 columns:
\describe{
\item{site }{cliff (or site) identifier.}
\item{department }{factor, the administrative area (Ain, Jura or Doubs).}
\item{height }{factor, height of the cliff, low, medium, or tall.}
\item{yr1964 to yr2016 }{detection histories for each year: 1 if a pair of peregrines was detected during at least one survey, 0 if no pair was detected, NA if no survey was carried out in that year.}
}
}
\source{
Groupe Pèlerin Jura (René-Jean Monneret, René Ruffinoni, and colleagues)
}
\references{
Kéry, M. & Royle, J.A. (2021) \emph{Applied Hierarchical Modeling in Ecology} AHM2 - 4.11.
}
\examples{
data(FrenchPeregrines)
str(FrenchPeregrines)
# Extract the capture history data:
ch <- as.matrix(FrenchPeregrines[, 4:56])
dim(ch)
range(ch, na.rm=TRUE)
}
\keyword{datasets}
|
41d8f58c9b8b320c61e0150d50518f7c7a49c70c | 86a30d3ca8e3bbf0bf77ec7e856596e619b9d70b | /code/scripts_joel_HPC_V5_females/run_FAMD_ordinal_factors_HPC_V5_females.R | ef6798f2421a04223b523fe89ce1397467c6f62d | [] | no_license | EpiCompBio/Barracudas | c8692874a7565d3703d9f55dfdec729339f195d7 | 2bc606f3cfd8eabab900fdf22c35295ddf27efd2 | refs/heads/master | 2020-04-23T23:01:53.827985 | 2019-05-07T00:39:37 | 2019-05-07T00:39:37 | 171,521,953 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,008 | r | run_FAMD_ordinal_factors_HPC_V5_females.R | ################################################################################
# LOADING LIBRARIES
################################################################################
# using<-function(...) {
# libs<-unlist(list(...))
# req<-unlist(lapply(libs,require,character.only=TRUE))
# need<-libs[req==FALSE]
# if(length(need)>0){
# install.packages(need)
# lapply(need,require,character.only=TRUE)
# }
# }
#
# using("FactoMineR","ggplot2","ggrepel","viridis","RColorBrewer")
#Package from sourcing functions
library(FactoMineR,lib.loc ="/home/jheller/anaconda3/lib/R/library")
library(ggplot2,lib.loc ="/home/jheller/anaconda3/lib/R/library")
library(ggrepel,lib.loc ="/home/jheller/anaconda3/lib/R/library")
library(viridis,lib.loc ="/home/jheller/anaconda3/lib/R/library")
library(RColorBrewer, lib.loc ="/home/jheller/anaconda3/lib/R/library")
################################################################################
# WORKING DIRECTORY AND SOURCING FUNCTIONS
################################################################################
# file_path<-dirname(rstudioapi::getActiveDocumentContext()$path)
# setwd(file_path)
# setwd("C:/Users/JOE/Documents/Imperial College 2018-2019/Translational Data Science/Barracudas")
# source("C:/Users/JOE/Documents/R_utility_and_self_implementations/FAMD_plots_utility.R")
# source("C:/Users/JOE/Documents/R_utility_and_self_implementations/colors_themes_utility.R")
source("code/utility_functions/FAMD_plots_utility.R")
source("code/utility_functions/colors_themes_utility.R")
################################################################################
################################################################################
# multi-morbid individuals only
################################################################################
################################################################################
multi_morbid=readRDS("../data/processed_V5_females/multi_morbid_ordinal_factors_HW_mod_controls_female.rds")
# multi_morbid=multi_morbid[1:200,]
################################################################################
# FAMD on the multi-morbid individuals
################################################################################
#Adding a +50 to get higher explained variance
FAMD_multi_morbid_res=FAMD(multi_morbid[,15:ncol(multi_morbid)],ncp = ncol(multi_morbid)+50, graph = FALSE)
#IND PLOTS
FAMD_multi_morbid_ind_plot_d12=make_FAMD_ind_plot(FAMD_multi_morbid_res,
dims=c(1,2),
custom_theme=theme_jh,color_scale=distinct_scale[2],show_labels = FALSE)
svg(filename="../results/results_joel_HPC_V5_female/FAMD_ordinal_factors_multi_morbid_ind_plot_d12.svg",width=10,height=10)
print(FAMD_multi_morbid_ind_plot_d12)
dev.off()
FAMD_multi_morbid_ind_plot_d34=make_FAMD_ind_plot(FAMD_multi_morbid_res,
dims=c(3,4),
custom_theme=theme_jh,color_scale=distinct_scale[2],show_labels = FALSE)
svg(filename="../results/results_joel_HPC_V5_female/FAMD_ordinal_factors_multi_morbid_ind_plot_d34.svg",width=10,height=10)
print(FAMD_multi_morbid_ind_plot_d34)
dev.off()
#VAR PLOTS
FAMD_multi_morbid_var_plot_d12 <- make_FAMD_variable_graph(FAMD_multi_morbid_res,dims=c(1,2),custom_theme=theme_jh,color_scale=distinct_scale[2])
svg(filename="../results/results_joel_HPC_V5_female/FAMD_ordinal_factors_multi_morbid_var_plot_d12.svg",width=10,height=10)
print(FAMD_multi_morbid_var_plot_d12)
dev.off()
FAMD_multi_morbid_var_plot_d34 <- make_FAMD_variable_graph(FAMD_multi_morbid_res,dims=c(3,4),custom_theme=theme_jh,color_scale=distinct_scale[2])
svg(filename="../results/results_joel_HPC_V5_female/FAMD_ordinal_factors_multi_morbid_var_plot_d34.svg",width=10,height=10)
print(FAMD_multi_morbid_var_plot_d34)
dev.off()
saveRDS(FAMD_multi_morbid_res,"../data/processed_V5_females/FAMD_ordinal_factors_multi_morbid_res.rds")
|
40a31f619ce9152bf73316c00d12b5e23f14a681 | e535b3cfa23dfd5b298b14c6460ac74d139ace3e | /cachematrix.R | f580121b1f7175605be63703c8ce7de3ddb9df89 | [] | no_license | mbatik/ProgrammingAssignment2 | 9537e553f687fd5214b77480fb4b759db4ed5921 | cb226d2d90b53a2ce45a9aeac4ea4e610ce260b6 | refs/heads/master | 2020-12-25T05:04:36.452112 | 2014-06-19T19:46:38 | 2014-06-19T19:46:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 866 | r | cachematrix.R | ## These functions allow for the cache of a matrix and then allows
## the return of the inverse of that matrix
## makeCacheMatix caches a matrix object
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
setmatrix <- function(x) m <<- x
getmatrix <- function() m
lost(set = set, get = get,
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## function casheSolve gets a vector and then uses the solve function
## to return its inverse
cacheSolve <- function(x, ...) {
m <- x$getmatrix()
if(!is.null(m)) {
message("getting cashed matrix")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setmatrix(m)
m
} |
44f3b222085c33d3e56a005726ca1a15a066f820 | 549b9ea2de06a6704912f5de95d2e9bca87440ae | /man/id.extract.Rd | 243a244a7ea35464e08bdcd6623d7ae65d7d2777 | [] | no_license | akeyel/spatialdemography | 38e863ba231c77b5e9d4d1c7357d98929f171de9 | cf0b006d43d0b55c76b55da3027210a4ceee29ef | refs/heads/master | 2016-12-13T05:19:09.151285 | 2016-04-02T03:56:27 | 2016-04-02T03:56:27 | 39,295,792 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 426 | rd | id.extract.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sdhelper.r
\name{id.extract}
\alias{id.extract}
\title{ID Extract}
\usage{
id.extract(Stage, TimeStep, sub.dat)
}
\arguments{
\item{Stage}{The lifestage to be extracted}
\item{TimeStep}{The timestep to be extracted}
\item{sub.dat}{The dataset containing the data to extract from.}
}
\description{
Simple extraction function to streamline code
}
|
faa4069f5a431ce1493678b3b26203c898c14900 | 27e584096efbf150caf723339f6b1ef40f9bf06e | /Session-5/App/V3/server.r | 22c37fd3fd33489df64b06be88b136885ac25878 | [] | no_license | tbalmat/Duke-Co-lab | 63148b946cbd8ab1b33a2f8851eaea6e00b59d63 | fe78e3f670456a67e515a6545e2d195ad94442ff | refs/heads/master | 2021-06-23T23:26:15.101875 | 2021-04-21T16:16:32 | 2021-04-21T16:16:32 | 213,077,357 | 3 | 7 | null | null | null | null | UTF-8 | R | false | false | 31,157 | r | server.r | #####################################################################################################
# Duke University Co-lab Shiny Workshop, Session 5, November 2019
# Genome Wide Association Study Pleiotropy App
# Inter-GWAS Association Network Using the visNetwork Package
# Shiny server file
# Version 3
#####################################################################################################
# Information on shiny and visnetwork available at:
# https://shiny.rstudio.com/
# https://github.com/rstudio/shiny
# https://cran.r-project.org/web/packages/shiny/shiny.pdf
# https://cran.r-project.org/web/packages/visnetwork/visnetwork.pdf
#####################################################################################################
# GWAS data description
#
# Source: synthesized
#
# Columns:
# GWAS ................... GWAS set
# phenotype .............. textual description of phenotype
# SNP .................... rsID of SNP
# p ...................... significance of allele transition regression coefficient
#####################################################################################################
options(max.print=1000) # number of elements, not rows
options(stringsAsFactors=F)
options(scipen=999999)
#options(device="windows")
library(shiny)
library(visNetwork)
library(DT)
# Dir location
dr <- c("local"="C:\\Projects\\Duke\\Co-lab\\Shiny\\Session-5-visNetwork",
"cloud"="/cloud/project/Duke-Co-lab/Shiny/Session-5-visNetwork")[1]
setwd(dr)
##########################################################################################################
# Set static filter and appearance parameters
##########################################################################################################
# Vertex colors for GWAS 1 and 2
vc1 <- "#66AAFF"
vc2 <- "#FFEE66"
# Edge colors for GWAS 1 and 2 when SNPs form edges (phenotypes forms vertices)
ec1 <- "#080808"
ec2 <- "#C02020"
# Edge colors for GWAS 1 and 2 when phenotypes form edges (SNPs form vertices)
ec3 <- "#66AAFF"
ec4 <- "#FFEE66"
# Vertex and edge font sizes
vfsz <- 8
efsz <- 8
# Font stroke (outline) color
fsc1 <- "#909090"
##########################################################################################################
# GWAS data retrieval function
# Retrieve individual data frames for each GWAS
# Save in global memory so that all function can access observations
# Exclude observations with non-positive p values
##########################################################################################################
readData <- function() {
gwas <- read.table("Data/GWASResults.csv", header=T, sep=",", strip.white=T)
# Return subset GWAS sets
k1 <- which(gwas[,"GWAS"]==1 & gwas[,"p"]>0)
k2 <- which(gwas[,"GWAS"]==2 & gwas[,"p"]>0)
gwas1 <<- data.frame(gwas[k1,c("phenotype", "SNP")], "log_10_p"=-log(gwas[k1,"p"])/log(10))
gwas2 <<- data.frame(gwas[k2,c("phenotype", "SNP")], "log_10_p"=-log(gwas[k2,"p"])/log(10))
}
##########################################################################################################
# Function to assemble graph components (vertices and edges)
##########################################################################################################
assembleNetComponents <- function() {
# Compose data set, with joined GWAS set phenotype as vertices and SNPs as edges
# or SNP as vertices and edges formed by phenotypes from either GWAS set
if(vertexType=="Phenotype") {
# Phenotype as vertex, SNP as edge
gwas <- merge(gwas1[which(gwas1[,"log_10_p"]>=log_10_p),],
gwas2[which(gwas2[,"log_10_p"]>=log_10_p),], by="SNP", suffixes=1:2)
colnames(gwas) <- c("lab", "v1", "log_10_p1", "v2", "log_10_p2")
if(nrow(gwas)>0) {
# Tabulate edges by GWAS and vertex
v1 <- aggregate(1:nrow(gwas), by=list(gwas[,"v1"]), length)
colnames(v1) <- c("lab", "n")
v2 <- aggregate(1:nrow(gwas), by=list(gwas[,"v2"]), length)
colnames(v2) <- c("lab", "n")
# Filter by edge count
# Retain vertices with min edge count, retain all enjoined vertices
v1 <- subset(v1, n>=nedgemin)
v2 <- subset(v2, n>=nedgemin)
gwas <- gwas[which(gwas[,"v1"] %in% v1[,"lab"] | gwas[,"v2"] %in% v2[,"lab"]),]
# Compose edge hover labels
if(nrow(gwas)>0) {
gwas[,"hovtext"] <- paste(gwas[,"v1"], ", log<sub>10</sub>(p)=", round(gwas[,"log_10_p1"], 2),
"<br>", gwas[,"v2"], ", ", "log<sub>10</sub>(p)=", round(gwas[,"log_10_p2"], 2), sep="")
# Compose vertex sets from GWAS edges (at this stage, gwas contains edges where both vertices have p
# at threshold and at least one vertex has number of edges at threshold)
v1 <- aggregate(1:nrow(gwas), by=list(gwas[,"v1"]), length)
colnames(v1) <- c("lab", "n")
v2 <- aggregate(1:nrow(gwas), by=list(gwas[,"v2"]), length)
colnames(v2) <- c("lab", "n")
# Assign vertex color by GWAS set, edge color static
vcolor <- c(rep(vc1, nrow(v1)), rep(vc2, nrow(v2)))
vtcolor <- vcolor
ecolor <- ec1
ehcolor <- ec2
vertex0 <- data.frame("set"=c(rep(1, nrow(v1)), rep(2, nrow(v2))), "v"=c(v1[,"lab"], v2[,"lab"]),
"lab"=c(v1[,"lab"], v2[,"lab"]), "n"=c(v1[,"n"], v2[,"n"]), "hovtext"=c(v1[,"lab"], v2[,"lab"]))
} else {
vertex0 <- data.frame()
}
} else {
vertex0 <- data.frame()
}
vsizefactor <- 1
} else {
# SNP as vertex, phenotype as edge
# Limit vertices to the intersection of SNPs in both GWAS sets
k1 <- which(gwas1[,"SNP"] %in% gwas2[which(gwas2[,"log_10_p"]>=log_10_p),"SNP"] & gwas1[,"log_10_p"]>=log_10_p)
k2 <- which(gwas2[,"SNP"] %in% gwas1[which(gwas1[,"log_10_p"]>=log_10_p),"SNP"] & gwas2[,"log_10_p"]>=log_10_p)
if(length(k1)>0 & length(k2)>0) {
# Compose one vertex from each SNP set
# Note that vertices are represented even when not enjoined to other vertices
# This enables analysis of inter-set SNP phenotype relationships even when no phenotypes
# relate the SNP to another SNP (no edges lead to or from a SNP)
# Construct sets of phenotype within SNP
SNP <- split(rbind(data.frame("set"=1, gwas1[k1,c("phenotype", "log_10_p")]),
data.frame("set"=2, gwas2[k2,c("phenotype", "log_10_p")])),
c(gwas1[k1,"SNP"], gwas2[k2,"SNP"]))
# Compose hover text from phenotypes within SNP
# Include proportion edges by GWAS set within each vertex, for vertex color assignment
vertex0 <- do.call(rbind,
apply(as.matrix(1:length(SNP)), 1,
function(i) {
hovtext <- paste("GWAS set, Phenotype, log<sub>10</sub>(p)<br>",
paste(paste(SNP[[i]][,"set"], ", ", SNP[[i]][,"phenotype"], ", ",
round(SNP[[i]][,"log_10_p"], 2), sep=""), collapse="<br>", sep=""), sep="")
data.frame("v"=names(SNP)[i], "hovtext"=hovtext, "n"=length(SNP[[i]][,"set"]),
"p1"=length(which(SNP[[i]][,"set"]==1))/length(SNP[[i]][,"set"]))
}))
# Construct edges
# Generate SNP sets by phenotype (each SNP pair with phenotype becomes an edge)
ph <- split(rbind(data.frame("set"=1, gwas1[k1,c("SNP", "log_10_p")]),
data.frame("set"=2, gwas2[k2,c("SNP", "log_10_p")])),
c(gwas1[k1,"phenotype"], gwas2[k2,"phenotype"]))
# Generate one edge per SNP pair, phenotype combination
gwas <- do.call(rbind,
apply(as.matrix(1:length(ph)), 1,
function(i)
if(length(ph[[i]][,"SNP"])>1) {
# Generate all combinations of SNP pairs
ij <- t(combn(1:nrow(ph[[i]]), 2))
# Order SNP pairs so that first is alphabetically less than second
# This is needed for unambiguous pairs in collapsing, later
k <- which(ph[[i]][ij[,1],"SNP"]>ph[[i]][ij[,2],"SNP"])
x <- ij[k,2]
ij[k,2] <- ij[k,1]
ij[k,1] <- x
data.frame("set"=ph[[i]][1,"set"], "v1"=ph[[i]][ij[,1],"SNP"], "v2"=ph[[i]][ij[,2],"SNP"],
"lab"=names(ph)[i],
"hovtext"=paste(ph[[i]][ij[,1],"SNP"], ", log<sub>10</sub>(p)=", round(ph[[i]][ij[,1],"log_10_p"], 2),
"<br>", ph[[i]][ij[,2],"SNP"], ", log<sub>10</sub>(p)=", round(ph[[i]][ij[,2],"log_10_p"], 2), sep=""),
"log_10_p1"=ph[[i]][ij[,1],"log_10_p"], "log_10_p2"=ph[[i]][ij[,2],"log_10_p"])
} else {
data.frame()
}))
# Optional: collapse SNP pairs (edges) and compose composite string of phenotypes for hover text
#gwas <- do.call(rbind,
# apply(as.matrix(which(!duplicated(gwas[,"v1"], gwas[,"v2"]))), 1,
# function(i) {
# k <- which(gwas[,"v1"]==gwas[i,"v1"] & gwas[,"v2"]==gwas[i,"v2"])
# k <- k[order(gwas[k,"set"], gwas[k,"lab"])]
# hovtext <- paste("GWAS set, Phenotype, SNP1, log<sub>10</sub>(p1), SNP2, log<sub>10</sub>(p2)<br>",
# paste(paste(gwas[k,"set"], ", ", gwas[k,"lab"], ", ", gwas[k,"v1"], ", ",
# round(gwas[k,"log_10_p1"], 2), ", ", gwas[k,"v2"], ", ",
# round(gwas[k,"log_10_p2"], 2), sep=""), collapse="<br>", sep=""), sep="")
# data.frame("v1"=gwas[i,"v1"], "v2"=gwas[i,"v2"], "lab"="o", "hovtext"=hovtext)
# }))
# Omit vertices and edges when edge count below threshold
# Threshold of 0 retains vertices without edges to enable analysis inter-set, single SNP relations
if(nedgemin>0) {
x <- table(c(gwas[,"v1"], gwas[,"v2"]))
v <- names(x)[which(x>=nedgemin)]
# Retain all vertices having either edge count at threshold or enjoined to a vertex at threshold
# This renders the network associated with at-threshold vertices
k <- which(gwas[,"v1"] %in% v | gwas[,"v2"] %in% v)
vertex0 <- vertex0[which(vertex0[,"v"] %in% unique(c(gwas[k,"v1"], gwas[k,"v2"]))),]
# Retain associated edges
gwas <- gwas[k,]
}
# Compute vertex color based on set1, set2 proportion of edges within SNP
# Edges colored by GWAS set
# Note the association (at time of development) of blue with set 1 and yellow (orange/red) with set 2
# Vertices with high proportion of set 1 phenotypes toward blue, high set 2 toward red
# Green indicates uniform (balanced) distribution
vcolor <- rgb(1-vertex0[,"p1"], 1-abs(vertex0[,"p1"]-0.5), vertex0[,"p1"])
vtcolor="#66aaff"
ecolor <- c(ec3, ec4)[gwas[,"set"]]
ehcolor <- ec2
vsizefactor <- 0.25
} else {
vertex0 <- data.frame()
}
}
# Compose global vertex and edge sets
if(nrow(vertex0)>0) {
# Vertices
vertex <<- data.frame("id"=1:(nrow(vertex0)),
"fixed"=F,
"label"=vertex0[,"v"],
"color"=vcolor,
"font"=list("color"=vtcolor, "size"=vfsz, strokeWidth=1, "strokeColor"=fsc1),
"value"=vsizefactor*vertex0[,"n"]/max(vertex0[,"n"], na.rm=T),
"title"=vertex0[,"hovtext"])
# Include groups for legend configuration (only with phenotypes as vertices)
if(vertexType=="Phenotype")
vertex[,"group"] <<- c("GWAS 1","GWAS 2")[vertex0[,"set"]]
rownames(vertex) <<- NULL
# Compose vertex IDs (they are required for unambiguous identification in edge construction)
vid <-setNames(vertex[,"id"], vertex[,"label"])
# Compose edges
if(nrow(gwas)>0) {
edge <<- data.frame("from"=vid[gwas[,"v1"]],
"to"=vid[gwas[,"v2"]],
"label"=gwas[,"lab"],
# Hover text
"title"=gwas[,"hovtext"],
"hoverWidth"=0,
"selectionWidth"=0,
"color"=list("color"=ecolor, "opacity"=eopacity, "highlight"=ehcolor),
"font"=list("color"="white", "size"=efsz, strokeWidth=1, "strokeColor"=fsc1),
#"length"=20,
"physics"=T,
"smooth"=T)
} else {
edge <<- data.frame()
}
} else {
vertex <<- data.frame()
edge <<- data.frame()
}
print("net assembled")
}
##########################################################################################################
# Function to compose graph using visNetwork() functions
##########################################################################################################
composeNet <- function() {
g <- visNetwork(vertex, edge) %>%
visGroups(groupname="GWAS 1", color=vc1, font=list("color"="white", "size"=12)) %>%
visGroups(groupname="GWAS 2", color=vc2, font=list("color"="#202020", "size"=12)) %>%
visLegend(useGroups=T, position="right") %>%
visOptions(highlightNearest=list("enabled"=T, "hover"=T)) %>%
visInteraction(hover=T, hoverConnectedEdges=T, navigationButtons=T) %>%
visPhysics(timestep=0.25, minVelocity=10, maxVelocity=50,
barnesHut=list("avoidOverlap"=0.5, "springLength"=200, "springConstant"=0.5, "damping"=0.5),
repulsion=list("nodeDistance"=100),
stabilization=list("enabled"=T, "iterations"=1000)) %>%
# Enclose java functions in {} brackets, otherwise they hang with no message
#visEvents(type="once", startStabilizing="function() {
# alert('begin stabilization')
# }") %>%
visEvents(type="once", stabilized="function() {
//alert('stab')
Shiny.onInputChange('stabilized', '0')
}") %>%
# Double click events fire two click events, so use shift-click for doubles
visEvents(type="on", click="function(obj) {
if(obj.event.srcEvent.shiftKey) {
//alert('shift-click')
Shiny.onInputChange('shiftClick', obj)
} else {
//alert('click')
Shiny.onInputChange('click', obj)
}
}")
#visEvents(type="on", doubleClick="function(obj) Shiny.onInputChange('doubleClick', obj)")
# Cluster, if requested
if(nCluster>0)
g <- g %>% visClusteringByHubsize(size=nCluster)
print("net composed")
return(g)
}
##########################################################################################################
# Function to compose table of graph statistics
# Note that auucmulated values (centrality)
##########################################################################################################
composeGraphTable <- function() {
if(vertexType=="Phenotype") {
# Compose SNP edges between GWAS phenotypes
gwas <- merge(gwas1[which(gwas1[,"log_10_p"]>=log_10_p),],
gwas2[which(gwas2[,"log_10_p"]>=log_10_p),],
by="SNP")
colnames(gwas) <- c("edge", "v1", "log_10_p1", "v2", "log_10_p2")
# Enumerate edges by GWAS set and vertex
v1 <- aggregate(1:nrow(gwas), by=list(gwas[,"v1"]), length)
colnames(v1) <- c("v", "n")
v2 <- aggregate(1:nrow(gwas), by=list(gwas[,"v2"]), length)
colnames(v2) <- c("v", "n")
# Identify vertices with edge count at threshold
k1 <- which(v1[,"n"]>=nedgemin)
k2 <- which(v2[,"n"]>=nedgemin)
# Compose centrality table
tabdat <- rbind(data.frame("GWAS"=paste("<center><font size=-2>", rep(1, length(k1)), "</font></center>", sep=""),
"Phenotype"=paste("<font size=-2>", v1[k1,"v"], "</font>", sep=""),
"Centrality"=paste("<center><font size=-2>", v1[k1,"n"], "</font></center>", sep="")),
data.frame("GWAS"=paste("<center><font size=-2>", rep(2, length(k2)), "</font></center>", sep=""),
"Phenotype"=paste("<font size=-2>", v2[k2,"v"], "</font>", sep=""),
"Centrality"=paste("<center><font size=-2>", v2[k2,"n"], "</font></center>", sep="")))
} else {
# Compose within-GWAS edges between SNP vertices
# Pair all possible edges using all phenotypes
# Limit vertices to the intersection of SNPs in both GWAS sets
k1 <- which(gwas1[,"SNP"] %in% gwas2[which(gwas2[,"log_10_p"]>=log_10_p),"SNP"] & gwas1[,"log_10_p"]>=log_10_p)
k2 <- which(gwas2[,"SNP"] %in% gwas1[which(gwas1[,"log_10_p"]>=log_10_p),"SNP"] & gwas2[,"log_10_p"]>=log_10_p)
gwas <- rbind(merge(gwas1[k1,], gwas1[k1,], by="phenotype"),
merge(gwas2[k2,], gwas2[k2,], by="phenotype"))
colnames(gwas) <- c("edge", "v1", "log_10_p1", "v2", "log_10_p2")
# Omit edges from a vertex to itself
gwas <- subset(gwas, v1!=v2)
# Identify unique vertex (SNP) pairs
edge <- unique(gwas[,c("v1", "v2")])
# Enumerate edges by vertex (SNP) origin
v <- aggregate(1:nrow(edge), by=list(edge[,"v1"]), length)
colnames(v) <- c("v", "n")
# Restrict to n-edge threshold
v <- subset(v, n>=nedgemin)
# Compose centrality table
tabdat <- data.frame("SNP"=paste("<font size=-1>", v[,"v"], "</font>", sep=""),
"Centrality"=paste("<center><font size=-1>", v[,"n"], "</font></center>", sep=""))
}
# Compose result data table
if(nrow(tabdat)>0) {
dt <- datatable( # Append GWAS sets 1 and 2 results
# Embed in HTML for appearance
data=tabdat,
# Include a caption
#caption=HTML("<b><font size=+1 color=#0000b0><center>Centrality Table</center></font></b>"),
# Suppress row names, do not escape HTML tags
# Automatically hide nav buttons when rows less than pages per row
rownames=F, escape=F, autoHideNavigation=T,
# Table appearance can be redefined in CSS options
class="cell-border stripe",
# Configure other table options
# Information on data tables options available at https://rstudio.github.io/DT/options.html
options=list(bLengthChange=F, bFilter=F, pageLength=10, autoWidth=T, info=F))
} else {
dt <- NULL
}
print("table composed")
return(dt)
}
##########################################################################################################
# Shiny server function
##########################################################################################################
shinyServer(
function(input, output, session) {
# Set default physics state
updateRadioButtons(session=session, inputId="physics", selected=F)
# Set initial rendering state to true
# All ui variables, except renderInst (because it is in a conditional panel?), are initialized to
# default values on initial load and during session$reload()
# After an input$renderInst event, renderInst contains "", which prevents rendering on reload
# Therefore, use initrend, which initialized to T during load and reload, then set to F after render
initrend <- T
# Reactive control for updating global variables, constructing network components, and rendering graph
# Note that this function is executed once during initialization and whenever vertexType changes
observeEvent(input$renderInst, {
print(paste("renderInst (", input$renderInst, "), initrend=", initrend, sep=""))
if(input$renderInst=="render" | initrend) {
# Update global vars
vertexType <<- input$vertexType
log_10_p <<- input$log_10_p
nedgemin <<- input$nedgemin
eopacity <<- input$eopacity
nCluster <<- input$nCluster
# Assemble network components
assembleNetComponents()
# Render graph
if(nrow(vertex)>0) {
# Net regen is always done with physics enabled, but we want it to be disablead after regen
# Direct disabling of physics (using visPhysics(enabled=F)) has no effect when called immediately after
# renderVisNetwork(), but is effective when executed frimm within a shiny reactive function
# So, although not ideal, force disable of physics by toggling the reaction control with physics par val
output$g1 <- renderVisNetwork(composeNet())
# Compose and render centrality table
output$gTable <- DT::renderDataTable(composeGraphTable())
updateTextInput(session=session, inputId="reactiveInst", value="physicsOff")
updateRadioButtons(session=session, inputId="physics", selected=F)
} else {
output$g1 <- NULL
output$gTable <- NULL
}
# Reset initialization renderInst flags
initrend <<- F
# Note that updating input$renderInst triggers the current function once more
updateTextInput(session=session, inputId="renderInst", value="")
}
}, ignoreInit=F)
# Reactive instruction control - used for triggering actions that do not function when called, typically
# visNetwork functions that are combined with others (called in sequence)
# Although not ideal, reactive calls, here, to functions that would ideally be coupled with others, is effective
observe({
print(paste("reactiveInst (", input$reactiveInst, ")", sep=""))
if(input$reactiveInst=="physicsOff") {
visPhysics(visNetworkProxy("g1"), enabled=F)
} else if(input$reactiveInst=="vertexFixedOff") {
visUpdateNodes(visNetworkProxy("g1"), data.frame("id"=vertex[,"id"], "fixed"=F))
}
updateTextInput(session=session, inputId="reactiveInst", value="")
})
# Vertex type event, configure vertices and edges based on type specified
observeEvent(input$vertexType, {
print("vertexType")
updateTextInput(session=session, inputId="renderInst", value="render")
}, ignoreInit=T)
# Log_10_p filter event
observeEvent(input$log_10_p, {
print("logp")
updateTextInput(session=session, inputId="renderInst", value="render")
}, ignoreInit=T)
# Min edge count filter event
observeEvent(input$nedgemin, {
print("nedgemin")
updateTextInput(session=session, inputId="renderInst", value="render")
}, ignoreInit=T)
# Edge opacity event
# Update edge opactiy and render graph (do not reconstruct network)
observeEvent(input$eopacity, {
print("eopactiy")
eopacity <<- input$eopacity
if(nrow(vertex)>0) {
edge[,"color.opacity"] <<- eopacity
g <- composeNet()
output$g1 <- renderVisNetwork(g)
updateTextInput(session=session, inputId="reactiveInst", value="physicsOff")
updateRadioButtons(session=session, inputId="physics", selected=F)
} else {
output$g1 <- NULL
}
}, ignoreInit=T)
# Cluster size event
# Update edge opactiy and render graph (do not reconstruct network)
observeEvent(input$nCluster, {
print("nCluster")
updateTextInput(session=session, inputId="renderInst", value="render")
}, ignoreInit=T)
# Regenerate event, reconstruct initial graph
observeEvent(input$regen, {
session$reload()
}, ignoreInit=T)
# Stabilized event
# Disable physics after stabilization during initial network construction
# This prevents dynamic repositioning of vertices as connected vertices are moved
# Note that edges are not redrawn during dynamic movement, but are with the stabilize() function
observeEvent(input$stabilized, {
print("stabilized")
visPhysics(visNetworkProxy("g1"), enabled=F)
})
# Physics event
# Enable or disable physics operations (enabling causes repositioning of nodes, if not fixed, and edges)
# Do not disable on first evaluation, during program initialization
observeEvent(input$physics, {
print("physics")
if(input$physics) {
visPhysics(visNetworkProxy("g1"), enabled=T, timestep=0.25, minVelocity=10, maxVelocity=50,
solver=c("barnesHut", "repulsion")[1],
barnesHut=list("avoidOverlap"=0.5, "springLength"=100, "springConstant"=0.5, "damping"=0.5),
#repulsion=list("nodeDistance"=1000),
stabilization=list("enabled"=T, "iterations"=1000))
} else {
visPhysics(visNetworkProxy("g1"), enabled=F)
}
}, ignoreInit=T)
# Redraw edge event
# Redraw by fixing vertex positions, stabilizing, then freeing vertex psitions
observeEvent(input$redrawEdge, {
print("redrawEdge")
# Fix positions
visUpdateNodes(visNetworkProxy("g1"), data.frame("id"=vertex[,"id"], "fixed"=T))
# Stabilize
visStabilize(visNetworkProxy("g1"))
# Free positions
updateTextInput(session=session, inputId="reactiveInst", value="vertexFixedOff")
}, ignoreInit=T)
# Vertex select event
# Compose set of vertex IDs that includes the selected vertex and all vertices adjacent to it
# The following function executed as a result of selectNode event configured in visEvents(), above
# Although the event successfully updates input$nodeSelect (causing a reactive observe to execute),
# current node information is not available at the time of execution of observeEvent() (values current
# prior to the selectNode event are returned)
# Therefore, compose vertex set using edge configutation
observeEvent(input$nodeSelect, {
# Construct set by including selected vertex and all others with edge originating at selected vertex
# Note that edges are constructed by joining GWAS set one vertices to those of GWAS set two, so that all edges
# are directed from set 1 to set 2
# Therefore, include all vertices with an edge originating at the selected vertex and all vertices with edge that
# terminates at the selected node
print("nodeSelect")
v0 <- input$nodeSelect[[1]][[1]][1]
print(v0)
k <- which(vertex[,"id"] %in% c(v0, edge[which(edge[,"from"]==v0),"to"], edge[which(edge[,"to"]==v0),"from"]))
print(k)
}, ignoreInit=T)
# Vertex click event
# Verify that a vertex has been clicked
# At present, simply print selected vertex and all connected to it
observeEvent(input$click, {
print("click")
# Identify selected vertex
v <- input$click[["nodes"]]
if(length(v)>0) {
v0 <- v[[1]][1]
print(v0)
# Identify all vertices connected to selected vertex
k <- which(vertex[,"id"] %in% c(v0, edge[which(edge[,"from"]==v0),"to"], edge[which(edge[,"to"]==v0),"from"]))
print(k)
}
}, ignoreInit=T)
# Vertex shift-click event
# Verify that a vertex has been clicked
# Hide all vertices not connected to selected vertex and all edges attached to hidden vertices
observeEvent(input$shiftClick, {
print("shiftClick")
# Identify selected vertex
v <- input$shiftClick[["nodes"]]
if(length(v)>0) {
v0 <- v[[1]][1]
print(v0)
# Identify all edges connected to selected vertex
ke <- which(edge[,"from"]==v0 | edge[,"to"]==v0)
# Identify all vertices connected to selected vertex
kv <- which(vertex[,"id"] %in% unlist(edge[ke,c("from", "to")]))
# Hide vertices that are not connected to selected vertex
vertex[,"hidden"] <<- {x <- rep(T, nrow(vertex)); x[kv] <- F; x}
vertex[,"physics"] <<- {x <- rep(F, nrow(vertex)); x[kv] <- T; x}
# Hide edges connected to invisible vertices
# Edges do not have a visible property (ugh!)
# Setting transparency leaves labels and hover text, so save and delete edge rows
# Do not replace original edges if edge0 exists (indicating higher order subsetting requested)
# This enables complete reconstruction of graph
# Note that edge0 is removed by the restore hidden subnet function
if(!exists("edge0", envir=.GlobalEnv))
edge0 <<- edge
edge <<- edge[ke,]
g <- composeNet()
output$g1 <- renderVisNetwork(g)
updateTextInput(session=session, inputId="reactiveInst", value="physicsOff")
updateRadioButtons(session=session, inputId="physics", selected=F)
}
}, ignoreInit=T)
# Restore hidden vertices event
observeEvent(input$restoreVertex, {
print("restoreVertex")
# Identify hidden vertices
if("hidden" %in% names(vertex) & exists("edge0", envir=.GlobalEnv)) {
k <- which(vertex[,"hidden"])
if(length(k)>0) {
vertex[,"hidden"] <<- F
vertex[,"physics"] <<- T
edge <<- edge0
g <- composeNet()
output$g1 <- renderVisNetwork(g)
updateTextInput(session=session, inputId="reactiveInst", value="physicsOff")
updateRadioButtons(session=session, inputId="physics", selected=F)
rm(edge0, envir=.GlobalEnv)
}
}
}, ignoreInit=T)
}
)
##########################################################################################################
# Execution begins here
##########################################################################################################
# Retrieve GWAS observations
readData() |
804ec12279f3136c14cfa6047bcb4ccc98649395 | f65ac6eca1f5c524e4ba5d4b760d4f26189f1263 | /R/mainApp.R | 5c0170c1e62cda8aa626ccafe715009773deba9a | [] | no_license | joey10086/ChemRICH | 32bf4e1e70a0ede6ab88a5bb8345be279fd1c03c | 172d6d5015c1e2339bed190c1418ffe7ab20e38d | refs/heads/master | 2020-04-16T06:11:17.221887 | 2019-01-08T21:20:57 | 2019-01-08T21:20:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 42,774 | r | mainApp.R | #raw_data <- readChar("./inst/www/chemrich_example_template.txt", nchars = 10000000)
#stat_file = raw_data
load("./data/cidmesh_smiles_fpbit.RData") ###df.mega.mesh.unique Unique CIDS
load("./data/mesh_bit_loc_list.RData") #bitloclist bit locations for the unique CIDS.
load("./data/treenames.df.RData") ### Name of the TreeBranches. We shall put them on the
load("./data/PubChemMeshCounts.RData") # pubchemdata counts of CIDs and PMIDS for each mesh category.
load("./data/cidsbiosys.RData")
getCNames <- function(x) {
if(length(which(treenames.df$MESHTREE==x))>0){
as.character(treenames.df$ClassName[which(treenames.df$MESHTREE==x)])
} else {
x
}
}
makeSmiles.clean <- function (smi) {
smi <- gsub("@","",smi)
smi <- gsub("O-","O",smi)
smi <- gsub("H[1-4][+]","",smi)
smi <- gsub("N[+]","N",smi)
smi <- gsub("/|[\\]","",smi)
smi <- gsub("[[]C(@{1,}|)H[]]","C",smi)
smi <- gsub("[[]CH2[]]","C",smi)
smi
}
arc.cladelabels<-function(tree=NULL,text,node,ln.offset=1.02,
lab.offset=1.06,cex=1,orientation="curved",...){
## Credit: this function is adopted from http://blog.phytools.org/2017/03/clade-labels-on-circular-fan-tree.html
obj<-get("last_plot.phylo",envir=.PlotPhyloEnv)
if(obj$type!="fan") stop("method works only for type=\"fan\"")
h<-max(sqrt(obj$xx^2+obj$yy^2))
if(is.null(tree)){
tree<-list(edge=obj$edge,tip.label=1:obj$Ntip,
Nnode=obj$Nnode)
class(tree)<-"phylo"
}
d<-getDescendants(tree,node)
d<-sort(d[d<=Ntip(tree)])
deg<-atan(obj$yy[d]/obj$xx[d])*180/pi
ii<-intersect(which(obj$yy[d]>=0),which(obj$xx[d]<0))
deg[ii]<-180+deg[ii]
ii<-intersect(which(obj$yy[d]<0),which(obj$xx[d]<0))
deg[ii]<-180+deg[ii]
ii<-intersect(which(obj$yy[d]<0),which(obj$xx[d]>=0))
deg[ii]<-360+deg[ii]
draw.arc(x=0,y=0,radius=ln.offset*h,deg1=min(deg),deg2=max(deg),lwd = 2)
if(orientation=="curved")
arctext(text,radius=lab.offset*h,
middle=mean(range(deg*pi/180)),cex=cex)
else if(orientation=="horizontal"){
x0<-lab.offset*cos(median(deg)*pi/180)*h
y0<-lab.offset*sin(median(deg)*pi/180)*h
text(x=x0,y=y0,label=text,
adj=c(if(x0>=0) 0 else 1,if(y0>=0) 0 else 1),
offset=0)
}
}
getChemRich_windows <- function (stat_file,cutoff=0.1) {
letters.x <- c(letters,LETTERS)
pacman::p_load(grid,rcdk, RJSONIO,RCurl, dynamicTreeCut,ape,rvg,magrittr,ggplot2,ggrepel,ReporteRs,officer,XLConnect,phytools,plotrix,plotly, htmlwidgets,DT,extrafont,XLConnect)
loadfonts(quiet = T)
stat_file <- gsub("\r","",stat_file)
cfile <- strsplit(stat_file,"\n")[[1]]
df1 <- do.call(rbind, lapply(cfile, function (x) { strsplit(x,"\t")[[1]] } ))
#df1 <- df1[c(1:96),]
colnames(df1) <- sapply(df1[1,],as.character)
df1 <- df1[-1,]
df1 <- data.frame(df1,stringsAsFactors = F)
df1$foldchange <- sapply(df1$foldchange, function(x) { as.numeric(as.character(x)) })
df1$pvalue <- sapply(df1$pvalue, function(x) { as.numeric(as.character(x)) })
df1$CID <- as.integer(df1$Pubchem.ID)
#df1$NewCID <- as.character(df1$CID)
### If SMILES code is missing for even one metabolite, we will break the script and ends here.
if( (length(which(is.na(df1$SMILES)==TRUE)))) { stop("Missing SMILES codes. Please check the input.") }
if( (length(which(is.na(df1$CID)==TRUE)) & length(which(is.na(df1$SMILES)==TRUE)))) { stop("Missing SMILES codes. Please check the input.") }
df.mega.mesh$CompoundName <- tolower(df.mega.mesh$CompoundName)
treenames.df$ClassName <- tolower(treenames.df$ClassName)
###########################################
#### Detection of Fatty Acid Clusters #####
###########################################
smi.all.fas <- as.character(sapply(df1$SMILES, makeSmiles.clean))
falabelvec <- sapply(smi.all.fas, function(x) {
elecount <- table(strsplit(gsub("[0-9]|[)]|[(]|=","",x),"")[[1]])
falabel <- ""
if (length(which(names(elecount)%in%c("C","O")==FALSE))==0) {
if (elecount['C']>7 & length(grep("CCCC",x))==1 & length(grep("C2",x))!=1 ) { # long carbon but not aromatic or cyclic.
if (elecount['O']==2) {
dlen <- length(strsplit(x,"=")[[1]])-2
falabel <- paste(c("FA",elecount['C'],dlen), collapse="_")
}
if(elecount['O']>=3) { ## Put Rules here. How many O and then how many carbon chain. That will make the class.
if( length(grep("C1",x))==1) {
if (length(strsplit(x,"C1")[[1]]) ==3 ) {
dlen <- length(strsplit(x,"=")[[1]])-2
falabel <- paste(c("Prostaglandin",elecount['C']), collapse="_")
} else {
dlen <- length(strsplit(x,"=")[[1]])-2
falabel <- paste(c("Epoxy FA",elecount['C']), collapse="_")
}
} else {
if (length(strsplit(x,"=O|CO|OC")[[1]])-2==0){
dlen <- length(strsplit(x,"=")[[1]])-2
falabel <- paste(c("OH-FA",elecount['C'],dlen,(elecount['O']-2)), collapse="_")
} else {
if (length(strsplit(x,"OC|CO")[[1]]) <3 ) {
dlen <- length(strsplit(x,"=")[[1]])-2
falabel <- paste(c("O=FA",elecount['C'],dlen), collapse="_")
}
}
}
}
}
}
falabel
})
falabelvec[which(falabelvec=="OH-FA_20_3_2")] <- "DiHETrE"
falabelvec[which(falabelvec=="OH-FA_20_4_2")] <- "DiHETE"
falabelvec[which(falabelvec=="O=FA_18_3")] <- "oxo-ODE"
falabelvec[which(falabelvec=="O=FA_20_5")] <- "oxo-ETE"
falabelvec[which(falabelvec=="OH-FA_18_1_2")] <- "DiHOME"
falabelvec[which(falabelvec=="OH-FA_18_1_3")] <- "TriHOME"
falabelvec[which(falabelvec=="OH-FA_18_2_1")] <- "HODE"
falabelvec[which(falabelvec=="OH-FA_18_2_2")] <- "DiHODE"
falabelvec[which(falabelvec=="OH-FA_18_3_1")] <- "HOTrE"
falabelvec[which(falabelvec=="OH-FA_20_3_1")] <- "HETrE"
falabelvec[which(falabelvec=="OH-FA_20_4_1")] <- "HETE"
falabelvec[which(falabelvec=="OH-FA_20_5_1")] <- "HEPE"
falabelvec[which(falabelvec=="OH-FA_22_5_2")] <- "DiHDPE"
falabelvec[which(falabelvec=="Epoxy FA_22")] <- "EpDPE"
falabelvec[which(falabelvec=="Epoxy FA_18")] <- "EpETrE"
falabelvec[which(falabelvec=="Epoxy FA_20")] <- "EpODE"
falabelvec[grep("^FA_[0-9]{1,2}_0$", falabelvec)] <- "Saturated FA"
falabelvec[grep("^FA_[0-9]{1,2}_[1-9]$", falabelvec)] <- "UnSaturated FA"
#exc <- XLConnect::loadWorkbook("ChemRICH_results.xlsx", create = T)
### Get the FP annotations from the CouchDB using CIDS
idlist <- list()
idlist$keys <- as.integer(df1$CID)[which(is.na(df1$CID)==FALSE)]
urlres <- getURL("http://chemrich.fiehnlab.ucdavis.edu/db/chemrichdb_cidfp/_design/data/_view/cid_fp",customrequest='POST',httpheader=c('Content-Type'='application/json'),postfields=RJSONIO::toJSON(idlist))
urlres.list <- RJSONIO::fromJSON(urlres)
cid.fp.df <- as.data.frame(do.call(rbind, urlres.list$rows), stringsAsFactors = F)
### Get the FP annotations from the CouchDB using SMILES
idlist <- list()
idlist$keys <- df1$SMILES
urlres <- getURL("http://chemrich.fiehnlab.ucdavis.edu/db/chemrichdb_smilesfp/_design/data/_view/smiles_fp",customrequest='POST',httpheader=c('Content-Type'='application/json'),postfields=RJSONIO::toJSON(idlist))
urlres.list <- RJSONIO::fromJSON(urlres)
smiles.fp.df <- as.data.frame(do.call(rbind, urlres.list$rows), stringsAsFactors = F)
fps <- t(sapply(1:nrow(df1), function(x) {
#print(x)
xy <- 0
if(is.na(df1$CID[x]) | length(which(smiles.fp.df$key==df1$SMILES[x]))!=0 ) {
whichsmiles <- which(smiles.fp.df$key==df1$SMILES[x])
if (length(whichsmiles)!=0) {
xy <- smiles.fp.df$value[whichsmiles][[1]]
} else {
xy <- as.character(rcdk::get.fingerprint(rcdk::parse.smiles(df1$SMILES[x])[[1]],type="pubchem"))
}
} else {
whichcid <- which(cid.fp.df$key==df1$CID[x])
if (length(whichcid)!=0) {
xy <- cid.fp.df$value[whichcid][[1]]
} else {
xy <- as.character(rcdk::get.fingerprint(rcdk::parse.smiles(df1$SMILES[x])[[1]],type="pubchem"))
}
}
xy
}))
### If any of the smiles codes are wrong. Break the code here.
if( (length(which(fps==0))>0)) { stop("Incorrect SMILES Code provided. Please check the input") }
misseddf <- data.frame(SMILES=(df1$SMILES[which(df1$SMILES%in%smiles.fp.df$key!=TRUE)]), FP=fps[which(df1$SMILES%in%smiles.fp.df$key!=TRUE)], stringsAsFactors = F)
if(nrow(misseddf)!=0) {
elist <- list()
for (i in 1:nrow(misseddf)) {
elist[[i]] <- list()
elist[[i]]$SMILES<- misseddf$SMILES[i]
elist[[i]]$FP <- misseddf$FP[i]
}
elist1<- list()
elist1$docs <- elist
urlres <- getURL("http://chemrich.fiehnlab.ucdavis.edu/db/chemrichdb_smilesfp/_bulk_docs",customrequest='POST',httpheader=c('Content-Type'='application/json'),postfields=RJSONIO::toJSON(elist1))
gc()
}
################################################################################
###### Query Against All the MESH compounds and get back the annotations #######
################################################################################
## CID_MESH
idlist <- list()
idlist$keys <- as.integer(df1$CID)[which(is.na(df1$CID)==FALSE)]
urlres <- getURL("http://chemrich.fiehnlab.ucdavis.edu/db/chemrichdb_cid/_design/data/_view/cid_tree",customrequest='POST',httpheader=c('Content-Type'='application/json'),postfields=RJSONIO::toJSON(idlist))
urlres.list <- RJSONIO::fromJSON(urlres)
cid.mesh.df <- as.data.frame(do.call(rbind, urlres.list$rows), stringsAsFactors = F)
cid.mesh.df$CID <- unlist(cid.mesh.df$key)
## SMILES_MESH
idlist <- list()
idlist$keys <- df1$SMILES
urlres <- getURL("http://chemrich.fiehnlab.ucdavis.edu/db/chemrichdb_smiles/_design/data/_view/smiles_tree",customrequest='POST',httpheader=c('Content-Type'='application/json'),postfields=RJSONIO::toJSON(idlist))
urlres.list <- RJSONIO::fromJSON(urlres)
smiles.mesh.df <- as.data.frame(do.call(rbind, urlres.list$rows), stringsAsFactors = F)
smiles.mesh.df$SMILES <- unlist(smiles.mesh.df$key)
df1$CID[which(is.na(df1$CID)==TRUE)] <- paste0("cid_",which(is.na(df1$CID)==TRUE))
smiles_cid_map <- as.data.frame(do.call(rbind,lapply(1:nrow(smiles.mesh.df), function(x) { cbind(smiles.mesh.df[x,],CID=df1$CID[which(df1$SMILES==smiles.mesh.df$SMILES[x])]) })), stringsAsFactors = F)
##########################################################################################################
######################## Get the MeSH annotation by similarity scoring against mesh compounds. ###########
##########################################################################################################
inmesh.vec <- rep("No",nrow(df1))
inmesh.vec[unique(c(which(df1$CID%in%cid.mesh.df$CID==TRUE), which(df1$SMILES%in%smiles.mesh.df$SMILES==TRUE)))] <- "Yes"
df1.bitmat <- do.call(rbind,lapply(fps,function(x) as.integer(strsplit(x,"")[[1]][1:881])))
df1.bitmat.location <- lapply(1:nrow(df1.bitmat), function(x) { which(df1.bitmat[x,]==1) })
only_b <- sapply(1:length(bitloclist), function(x) { length(bitloclist[[x]]) })
bitmeans <- sapply(1:length(bitloclist), function(x) { median(bitloclist[[x]]) }) # we decided to use median, as some outliers values shall not affect it.
fpsmeans <- sapply(df1.bitmat.location, function(x){median(x)})
################################################
## Direct name searching against MeSH database.#
################################################
directlabels <- sapply(tolower(df1$Compound.Name), function(x) { ## We will ask everyone to use MeSH compound names to enable automated text mining on the metabolomics datasets.
clabel="Not Found"
findind <- which(df.mega.mesh$CompoundName==x)
if(length( findind)>0) {
classvec <- as.character(df.mega.mesh$MESSTREE[findind])
classvec <- strsplit(classvec[1],";")[[1]]
if( length(grep("^D01[.]",classvec)) > 0 ) {
classvec <- classvec[-grep("^D01[.]|^D03[.]",classvec)]
}
clabel <- names(which.max(sapply(classvec,nchar)))
}
clabel
})
labelvec <- sapply(1:nrow(df1), function(i) {
clabel <- "Not Found"
if(falabelvec[i]=="" & inmesh.vec[i]=="No" & directlabels[i]=="Not Found") {
#if(falabelvec[i]=="") {
print(i)
meanindex <- which(bitmeans < (fpsmeans[i]+5) & bitmeans > (fpsmeans[i]-5))
bitloclist.sb <- bitloclist[meanindex]
only_b.sb <- only_b[meanindex]
overlapvec <- sapply(1:length(bitloclist.sb), function(x) { length(which(bitloclist.sb[[x]]%in%df1.bitmat.location[[i]]==TRUE)) })
tmvec <- overlapvec/((length(df1.bitmat.location[[i]])+only_b.sb)-overlapvec)
if(length(which(tmvec>0.90))>0) {
if(length(which(tmvec>0.98))>0){
cidindex <- meanindex[which(tmvec>0.98)]
if(length(cidindex)==1) {
clabel <- df.mega.mesh$MESSTREE[cidindex]
} else {
clabel.table <- sort(table(unlist(sapply(unique(df.mega.mesh[cidindex[order(tmvec[which(tmvec>0.98)])],"MeSHUID"])[1:10], function(x) {if(!is.na(x)) { strsplit(df.mega.mesh$MESSTREE[which(df.mega.mesh$MeSHUID==x)][1],";") }}))),decreasing = T)
clabel.table <- which(clabel.table==clabel.table[1])
clabel.table.sort <- sort(sapply(names(clabel.table),nchar),decreasing = T)
clabel.table.sort.max <- which.max(clabel.table.sort)
if(length(clabel.table.sort.max==1)) {
clabel <- names(clabel.table.sort.max)
} else {
clabel <- sort(names(clabel.table.sort.max))[1]
}
}
} else {
## CID_MESH
cidindex <- meanindex[which(tmvec>0.90)]
if(length(cidindex)==1) {
clabel <- df.mega.mesh$MESSTREE[cidindex]
} else {
clabel.table <- sort(table(unlist(sapply(unique(df.mega.mesh[cidindex[order(tmvec[which(tmvec>0.90)])],"MeSHUID"])[1:10], function(x) {if(!is.na(x)) { strsplit(df.mega.mesh$MESSTREE[which(df.mega.mesh$MeSHUID==x)][1],";") }}))),decreasing = T)
clabel.table <- which(clabel.table==clabel.table[1])
clabel.table.sort <- sort(sapply(names(clabel.table),nchar),decreasing = T)
clabel.table.sort.max <- which.max(clabel.table.sort)
if(length(clabel.table.sort.max==1)) {
clabel <- names(clabel.table.sort.max)
} else {
clabel <- sort(names(clabel.table.sort.max))[1]
}
}
}
}
}
clabel
})
xmllist <- which(labelvec!="Not Found")
if(length(xmllist)!=0) { ## we want to post these high score ones to the couchdb.
elist <- list()
for (i in 1:length(xmllist)) {
elist[[i]] <- list()
elist[[i]]$SMILES <- df1$SMILES[xmllist[i]]
elist[[i]]$MeSHTree <- labelvec[xmllist[i]]
}
elist1<- list()
elist1$docs <- elist
##urlres <- getURL("http://localhost:5984/chemrichdb_smiles/_bulk_docs",customrequest='POST',httpheader=c('Content-Type'='application/json'),postfields=toJSON(elist1))
gc()
}
smiles_cid_map <- smiles_cid_map[,-4]
##########################################
### Preparing the final CID_MESH mapping.####
#########################################
finalMesh.df <- as.data.frame(rbind(cid.mesh.df, smiles_cid_map))
finalMesh.df <- finalMesh.df[which(finalMesh.df$CID%in%df1$CID[which(falabelvec!="")]==FALSE),] ## remove the compounds that are covered by FA labels.
finalMesh.df <- finalMesh.df[which(finalMesh.df$CID%in%df1$CID[which(directlabels!="Not Found")]==FALSE),] ## remove the compounds that have been covered by the direct label mapping.
finalMesh.df <- finalMesh.df[,-1]
finalMesh.df$value <- unlist(finalMesh.df$value)
finalMesh.df <- rbind(finalMesh.df,data.frame(key=df1$CID, value=labelvec,CID=df1$CID))
meshvec <- finalMesh.df$value
finalMesh.df$NewMesh <- meshvec
finalMesh.df <- finalMesh.df[which(finalMesh.df$NewMesh!="Not Found"),]
finalMesh.df <- finalMesh.df[which(finalMesh.df$NewMesh!=""),]
finalMesh.df <- finalMesh.df[!duplicated(finalMesh.df),]
#############################################################
#### Calculation of the simialrity tree and its clustering ##
#############################################################
m <- df1.bitmat
mat <- m%*%t(m)
len <- length(m[,1])
s <- mat.or.vec(len,len)
for (i in 1:len) {
for (j in 1:len){
s[i,j] <- mat[i,j]/(mat[i,i]+mat[j,j]-mat[i,j])
}
}
diag(s) <- 0
##
hc <- hclust(as.dist(1-s), method="average") # ward method provide better clusters.
clust1 <- cutreeDynamic(hc,distM = as.matrix(1-s),deepSplit =4, minClusterSize = 2) # can be used to merge cluster, but it is better to keep them as it is. # Clusters are detected using the average linkage hclust.
glaydf2 <- data.frame(df1,cluster=clust1,stringsAsFactors = F)
df1.order <- df1[hc$order,]
#### Clustering order for the tree ordering and the volcano plot ordering.
s2 <- s[order(df1$Compound.Name),order(df1$Compound.Name)]
hc2 <- hclust(as.dist(1-s2), method="ward.D") # We will use average to detect cluster on the tree and ward.d to sort the clusters and individual compounds. Apparently ward.d is better in sorting the compounds by their degree of unsaturation. It is also interesting to know that the original order in which the data are had influence on the sorting order.
df2.order <- df1[order(df1$Compound.Name),][hc2$order,]
###########################################
#### finding the Non-overlapping terms. ###
###########################################
##
#### Creation of label dataframe.
##
finalterm.df <- data.frame(CID=df1$CID, Clabel=falabelvec,stringsAsFactors = F) # first add the fatty acid labels.
directlabindex <- as.integer(which(directlabels!="Not Found"))[which(as.integer(which(directlabels!="Not Found"))%in%which(finalterm.df$Clabel=="")==TRUE)] ## then add the direct labels found by names matching
finalterm.df$Clabel[directlabindex] <- as.character(directlabels[directlabindex])
for (i in 1:nrow(finalterm.df)) { # now add the mesh ids obtained from CouchDB. This will include the mesh annotation calcualted previously.
if(finalterm.df$Clabel[i]=="" & length(which(finalMesh.df$CID==df1$CID[i]))>0 ) {
print(i)
finalterm.df$Clabel[i] <- names(which.max(sapply(unlist(strsplit(finalMesh.df$NewMesh[which(finalMesh.df$CID==df1$CID[i])],";")),nchar)))
}
}
##########################################
#### Detect for new compound clusters ###
##########################################
finalterm.df.2 <- finalterm.df
#finalterm.df.2$Clabel[which(finalterm.df$Clabel=="D09.400.410.420.525.870")] <- "" #we first remove the sphingolyelins to test it.
#finalterm.df.2$Clabel[which(finalterm.df$Clabel=="D10.351.801")] <- "" #we first remove the sphingolyelins to test it.
newClustVec <- names(which(table(glaydf2$cluster[which(finalterm.df.2$Clabel=="")])>3))
clustMeanvec <- sapply(newClustVec, function(x) { mean(s[which(glaydf2$cluster==x),which(glaydf2$cluster==x)]) } )
newClustVec <- newClustVec[which(clustMeanvec>0.70)]
if(length(newClustVec)>0) {
for(i in which(finalterm.df.2$Clabel=="")) {
if(glaydf2$cluster[i]%in%newClustVec){
finalterm.df$Clabel[i] <- paste0("NewCluster_",glaydf2$cluster[i])
}
}
}
##### Map the compounds that have atleast 0.75 similarity to others. Only for compunds that do not have any labels.
for ( i in which(finalterm.df$Clabel=="")) { ## if there is a metabolite that has score higher than 0.80 then we get the class using that compound.
if(max(s[i,])>0.75) {
simorder <- order(s[i,],decreasing = T)[which(s[i,][order(s[i,],decreasing = T)]>0.75)]
simorder.class <- sapply(simorder, function(x) { finalterm.df$Clabel[x]})
simorder.class <- simorder.class[!is.na(simorder.class)]
if(simorder.class[1]!=""){
finalterm.df$Clabel[i] <- simorder.class[which(simorder.class!="")][1]
} else if(length(simorder.class)>1) {
finalterm.df$Clabel[i] <- simorder.class[which(simorder.class!="")][1]
}
}
}
finalterm.df$Count <- as.integer(sapply(finalterm.df$Clabel, function(x) { length(which(finalterm.df$Clabel==x)) }))
finalterm.df$gCount <- as.integer(sapply(finalterm.df$Clabel, function(x) { length(grep(x,finalterm.df$Clabel)) }))
exclusionVec <- c("D02","D03.383","D03.633.100","D03.633.300","D03.633.400","D03.633","D03.605","D02.241.081") ## we will have some static list of terms that need to be excluded.
exclusionVec <- c(exclusionVec, unique(falabelvec)[-1]) ## if we see Fatty acid label, we dont touch them.
for ( i in which(finalterm.df$gCount<3)) { ## Drop the compound to the neareast one.
qpat <- gsub("[.][0-9]{2,3}$","",finalterm.df$Clabel[i])
if(length(grep(qpat,finalterm.df$Clabel))>2 & !qpat%in%exclusionVec){
finalterm.df$Clabel[i] <- qpat
}
}
finalterm.df$Count <- as.integer(sapply(finalterm.df$Clabel, function(x) { length(which(finalterm.df$Clabel==x)) }))
finalterm.df$gCount <- as.integer(sapply(finalterm.df$Clabel, function(x) { length(grep(x,finalterm.df$Clabel)) }))
for ( i in which(finalterm.df$gCount<3)) { ## Map to the closest ones.
if(max(s[i,])>0.85) {
simorder <- order(s[i,],decreasing = T)[which(s[i,][order(s[i,],decreasing = T)]>0.85)]
simorder.class <- sapply(simorder, function(x) { finalterm.df$Clabel[x]})
simorder.class <- simorder.class[!is.na(simorder.class)]
if(simorder.class[1]!=""){
finalterm.df$Clabel[i] <- simorder.class[which(simorder.class!="")][1]
} else if(length(simorder.class)>1) {
finalterm.df$Clabel[i] <- simorder.class[which(simorder.class!="")][1]
}
}
}
finalterm.df$Count <- as.integer(sapply(finalterm.df$Clabel, function(x) { length(which(finalterm.df$Clabel==x)) }))
finalterm.df$gCount <- as.integer(sapply(finalterm.df$Clabel, function(x) { length(grep(x,finalterm.df$Clabel)) }))
# Repeat it one more time.
for ( i in which(finalterm.df$gCount<3)) { ## Drop the compound to the neareast one.
qpat <- gsub("[.][0-9]{2,3}$","",finalterm.df$Clabel[i])
if(length(grep(qpat,finalterm.df$Clabel))>2 & !qpat%in%exclusionVec){
finalterm.df$Clabel[i] <- qpat
}
}
finalterm.df$Count <- as.integer(sapply(finalterm.df$Clabel, function(x) { length(which(finalterm.df$Clabel==x)) }))
finalterm.df$gCount <- as.integer(sapply(finalterm.df$Clabel, function(x) { length(grep(x,finalterm.df$Clabel)) }))
for ( i in which(finalterm.df$gCount<3)) { ## Map to the closest ones.
if(max(s[i,])>0.85) {
simorder <- order(s[i,],decreasing = T)[which(s[i,][order(s[i,],decreasing = T)]>0.85)]
simorder.class <- sapply(simorder, function(x) { finalterm.df$Clabel[x]})
simorder.class <- simorder.class[!is.na(simorder.class)]
if(simorder.class[1]!=""){
finalterm.df$Clabel[i] <- simorder.class[which(simorder.class!="")][1]
} else if(length(simorder.class)>1) {
finalterm.df$Clabel[i] <- simorder.class[which(simorder.class!="")][1]
}
}
}
finalterm.df$Count <- as.integer(sapply(finalterm.df$Clabel, function(x) { length(which(finalterm.df$Clabel==x)) }))
finalterm.df$gCount <- as.integer(sapply(finalterm.df$Clabel, function(x) { length(grep(x,finalterm.df$Clabel)) }))
finalterm.df$Clabel[which(finalterm.df$Count<3)] <- finalterm.df.2$Clabel[which(finalterm.df$Count<3)] ### We reverse back the original labels as this did not create any higher labels.
finallabelvec <- finalterm.df$Clabel
### Find the saturated and unsaturated compounds in the
HasSaturatedFats <- names(which(table(finallabelvec[grep("D10|D09.400.410",finallabelvec)[which(sapply(grep("D10|D09.400.410",finallabelvec), function(x) { length(grep("C=C",df1$SMILES[x])) })==0)]])>2)) ### we are only selecting lipid classes that has atleast 3 saturated lipids.
for (i in 1:nrow(finalterm.df)) {
if(finallabelvec[i]%in%HasSaturatedFats){
if(length(grep("C=C",df1$SMILES[i]))==0) {
finallabelvec[i] <- paste0("Saturated_",getCNames(finallabelvec[i]))
} else {
finallabelvec[i] <- paste0("Unsaturated_",getCNames(finallabelvec[i]))
}
}
}
#############################################
### Calculation of Enrichment Statistics#####
#############################################
clusterids <- names(which(table(sapply(as.character(finallabelvec),getCNames))>2))
clusterids <- clusterids[which(clusterids!="")]
df1$ClusterLabel <- as.character(sapply(as.character(finallabelvec),getCNames))
cluster.pvalues <- sapply(clusterids, function(x) { # pvalues were calculated if the set has at least 2 metabolites with less than 0.10 pvalue.
cl.member <- which(df1$ClusterLabel==x)
if( length(which(df1$pvalue[cl.member]<.05)) >1 ){
pval.cl.member <- df1$pvalue[cl.member]
p.test.results <- ks.test(pval.cl.member,"punif",alternative="greater")
p.test.results$p.value
} else {
1
}
})
cluster.pvalues[which(cluster.pvalues==0)] <- 2.2e-20 ### All the zero are rounded to the double.eps pvalues.\
#clusterdf <- data.frame(name=clusterids[which(cluster.pvalues!=10)],pvalues=cluster.pvalues[which(cluster.pvalues!=10)], stringsAsFactors = F)
clusterdf <- data.frame(name=clusterids,pvalues=cluster.pvalues, stringsAsFactors = F)
# clusterdf$keycpd <- sapply(clusterdf$name, function(x) {
# dfx <- df1[which(df1$ClusterLabel==x),]
# dfx$SMILES[which.min(dfx$pvalue)]
# })
clusterdf$keycpdname <- sapply(clusterdf$name, function(x) {
dfx <- df1[which(df1$ClusterLabel==x),]
dfx$Compound.Name[which.min(dfx$pvalue)]
})
altrat <- sapply(clusterdf$name, function (k) {
length(which(df1$ClusterLabel==k & df1$pvalue<0.10))/length(which(df1$ClusterLabel==k))
})
uprat <-sapply(clusterdf$name, function (k) {
length(which(df1$ClusterLabel==k & df1$pvalue<0.10 & df1$foldchange > 1.00000000))/length(which(df1$ClusterLabel==k))
})
clust_s_vec <- sapply(clusterdf$name, function (k) {
length(which(df1$ClusterLabel==k))
})
clusterdf$alteredMetabolites <- sapply(clusterdf$name, function (k) {length(which(df1$ClusterLabel==k & df1$pvalue<0.10))})
clusterdf$upcount <- sapply(clusterdf$name, function (k) {length(which(df1$ClusterLabel==k & df1$pvalue<0.10 & df1$foldchange > 1.00000000))})
clusterdf$downcount <- sapply(clusterdf$name, function (k) {length(which(df1$ClusterLabel==k & df1$pvalue<0.10 & df1$foldchange < 1.00000000))})
clusterdf$upratio <- uprat
clusterdf$altratio <- altrat
clusterdf$csize <- clust_s_vec
clusterdf <- clusterdf[which(clusterdf$csize>2),]
clusterdf$adjustedpvalue <- p.adjust(clusterdf$pvalues, method = "fdr")
clustdf <- clusterdf
clustdf.e <- clustdf[order(clustdf$pvalues),]
clustdf.e$pvalues <- signif(clustdf.e$pvalues, digits = 2)
clustdf.e$adjustedpvalue <- signif(clustdf.e$adjustedpvalue, digits = 2)
clustdf.e$upratio <- signif(clustdf.e$upratio, digits = 1)
clustdf.e$altratio <- signif(clustdf.e$altratio, digits = 1)
clustdf.e <- clustdf.e[,c("name","csize","pvalues","adjustedpvalue","keycpdname","alteredMetabolites","upcount","downcount","upratio","altratio")]
names(clustdf.e) <- c("Cluster name","Cluster size","p-values","FDR","Key compound","Altered metabolites","Increased","Decreased","Increased ratio","Altered Ratio")
#XLConnect::createSheet(exc,'ChemRICH_Results')
#XLConnect::writeWorksheet(exc, clustdf.e, sheet = "ChemRICH_Results", startRow = 1, startCol = 2)
#write.table(clustdf.e, file="cluster_level_results_altered.txt", col.names = T, row.names = F, quote = F, sep="\t")
#writeLines(toJSON(clustdf), "clusterJson.json")
gdf <- datatable(clustdf.e,options = list(pageLength = 10),rownames = F)
gdf$width <- "auto"
gdf$height <- "auto"
saveWidget(gdf,file="clusterlevel.html",selfcontained = F)
clusterdf$Compounds <- sapply(clusterdf$name, function(x) {
dfx <- df1[which(df1$ClusterLabel==x),]
paste(dfx$Compound.Name,collapse="<br>")
}) ## this one is the label on the tooltip of the ggplotly plot.
clustdf <- clusterdf[which(cluster.pvalues!=1),]
#################################################
########## Impact Visualization Graph ###########
#################################################
clustdf.alt.impact <- clustdf[which(clustdf$pvalues<0.05 & clustdf$csize>1 & clustdf$alteredMetabolites>1) ,]
clustdf.alt.impact <- clustdf.alt.impact[order(sapply(clustdf.alt.impact$keycpdname, function(x) {which( df2.order$Compound.Name==x)})),]
clustdf.alt.impact$order <- 1:nrow(clustdf.alt.impact) ### Order is decided by the hclust algorithm.
clustdf.alt.impact$logPval <- -log(clustdf.alt.impact$pvalues)
p2 <- ggplot(clustdf.alt.impact,aes(x=order,y=-log(pvalues)))
p2 <- p2 + geom_point(aes(size=csize, color=upratio)) +
#labs(subtitle = "Figure Legend : Point size corresponds to the count of metabolites in the group. Point color shows that proportion of the increased metabolites where red means high and blue means low number of upregulated compounds.")+
scale_color_gradient(low = "blue", high = "red", limits=c(0,1))+
scale_size(range = c(5, 30)) +
scale_y_continuous("-log (pvalue)",limits = c(0, max(-log(clustdf.alt.impact$pvalues))+4 )) +
scale_x_continuous(" cluster order on the similarity tree ") +
theme_bw() +
#labs(title = "ChemRICH cluster impact plot") +
geom_label_repel(aes(label = name), color = "gray20",family="Arial",data=subset(clustdf.alt.impact, csize>2),force = 5)+
theme(text=element_text(family="Arial Black"))+
theme(
plot.title = element_text(face="bold", size=30,hjust = 0.5),
axis.title.x = element_text(face="bold", size=20),
axis.title.y = element_text(face="bold", size=20, angle=90),
panel.grid.major = element_blank(), # switch off major gridlines
panel.grid.minor = element_blank(), # switch off minor gridlines
legend.position = "none", # manually position the legend (numbers being from 0,0 at bottom left of whole plot to 1,1 at top right)
legend.title = element_blank(), # switch off the legend title
legend.text = element_text(size=12),
legend.key.size = unit(1.5, "lines"),
legend.key = element_blank(), # switch off the rectangle around symbols in the legend
legend.spacing = unit(.05, "cm"),
axis.text.x = element_text(size=10,angle = 0, hjust = 1),
axis.text.y = element_text(size=15,angle = 0, hjust = 1)
)
read_pptx() %>%
add_slide(layout = "Title and Content", master = "Office Theme") %>%
ph_with_vg(code = print(p2), type = "body", width=10, height=8, offx =0.0 , offy = 0.0) %>%
print(target = "chemrich_impact_plot.pptx") %>%
invisible()
#wbp <- pptx(template = system.file("data","chem_rich_temp.pptx", package = "ChemRICH"))
#wbp <- pptx(template = "./data/chem_rich_temp.pptx")
#wbp <- addSlide( wbp, "lipidClust" )
#wbp <- addPlot( wbp, function() print(p2), offx =0.0 , offy = 0.0, width = 8, height = 6 , vector.graphic = TRUE )
#writeDoc( wbp, file = "chemrich_impact_plot.pptx" )
ggsave("chemrich_impact_plot.png", p2,height = 8, width = 12, dpi=300)
#ggsave("tst.png",height=9,width=12,dpi=72)
##########################################################
##### Interactive Visualization plot using GGPlotLY ######
###########################################################
p2 <- ggplot(clustdf.alt.impact,aes(label=name,label2=pvalues, label3=csize,label4=Compounds))
p2 <- p2 + geom_point(aes(x=order,y=-log(pvalues),size=csize, color=upratio)) + scale_color_gradient(low = "blue", high = "red", limits=c(0,1))+ scale_size(range = c(5, 30)) +
#labs(caption = "Figure Legend : Point size corresponds to the count of metabolites in the group. Point color shows that proportion of the increased metabolites where red means high and blue means low number of upregulated compounds.")+
scale_y_continuous("-log (pvalue)",limits = c(0, max(-log(clustdf.alt.impact$pvalues))+5 )) +
scale_x_continuous(" cluster order on the similarity tree ") +
theme_bw() +
#labs(title = "ChemRICH cluster impact plot") +
geom_text(aes(x=order,y=-log(pvalues),label = name), color = "gray20",data=subset(clustdf.alt.impact, csize>2))+
theme(
plot.title = element_text(face="bold", size=30,hjust = 0.5),
axis.title.x = element_text(face="bold", size=20),
axis.title.y = element_text(face="bold", size=20, angle=90),
panel.grid.major = element_blank(), # switch off major gridlines
panel.grid.minor = element_blank(), # switch off minor gridlines
legend.position = "none", # manually position the legend (numbers being from 0,0 at bottom left of whole plot to 1,1 at top right)
legend.title = element_blank(), # switch off the legend title
legend.text = element_text(size=12),
legend.key.size = unit(1.5, "lines"),
legend.key = element_blank(), # switch off the rectangle around symbols in the legend
legend.spacing = unit(.05, "cm"),
axis.text.x = element_text(size=15,angle = 0, hjust = 1),
axis.text.y = element_text(size=15,angle = 0, hjust = 1)
)
gg <- ggplotly(p2,tooltip = c("label","label2","label4"), width = 1600, height = 1000)
saveWidget(gg,file = "ggplotly.html", selfcontained = F)
#################################
### Interactive volcano plot ###
#################################
# we need to add the interactive volcano plot for the p-values and fold-change sorted by the MeSH tree order.
df2 <- df2.order
df2$Changed <- "No Change"
df2$Changed[which(df2$pvalue<0.05 & df2$foldchange>1)] <- "UP"
df2$Changed[which(df2$pvalue<0.05 & df2$foldchange<1)] <- "DOWN"
df2$Changed <- as.factor(df2$Changed)
df2$pathway <- "No"
df2$pathway[which(df2$CID%in%cid_biosys==TRUE)] <- "yes"
df2$pathway <- as.factor(df2$pathway)
df2$Compound.Name <- factor(df2$Compound.Name, levels =df2$Compound.Name)
df2$foldchange <- round(sapply(df2$foldchange, function(x) { if(x>1) {x} else {1/x} }), digits = 1)
df2$foldchange[ df2$foldchange>5] <- 5
p2 <- ggplot(df2, aes(label=Compound.Name,x=Compound.Name, y=-log(pvalue,base = 10),colour = Changed,shape=pathway, size=foldchange)) + scale_size(range = c(1, 10)) +
#geom_line(position=pd, size=2)+
#geom_errorbar(aes(ymin = V2-V3 , ymax=V2+V3), width=.3,size=2,position=pd) +
geom_point(stat = "identity") + # 21 is filled circle
#geom_bar(stat="identity", size=.1,position=position_dodge()) +
scale_y_continuous("pvalue (-log)") +
scale_x_discrete("Metabolites: Red- increased,blue-decreased,yellow-not significant, solid-pathway(s) found ") +
scale_color_manual("Student ttest",values=c("blue", "yellow", "red","white")) +
scale_fill_manual("",values=c("white", "yellow", "red","white")) +
scale_shape_manual("Pathway found",values=c(1,16))+
#scale_shape(solid = FALSE) +
theme_bw() +
labs(title = "Metabolic Dys-regulations sorted by chemical similarity") +
theme(
plot.title = element_text(face="bold", size=30,hjust = 0.5),
axis.title.x = element_text(face="bold", size=20),
axis.title.y = element_text(face="bold", size=30, angle=90),
panel.grid.major = element_blank(), # switch off major gridlines
panel.grid.minor = element_blank(), # switch off minor gridlines
#legend.justification=c(1,0),
#legend.position=c(1,.6),
legend.position = "none",
#legend.title = element_blank(), # switch off the legend title
#legend.text = element_text(size=12),
#legend.key.size = unit(1.5, "lines"),
#legend.key = element_blank(), # switch off the rectangle around symbols in the legend
#legend.spacing = unit(.05, "cm"),
#axis.text.x = element_text(size=15,angle = 45, hjust = 1.0),
axis.text.x= element_blank(),
axis.text.y = element_text(size=15,angle = 0, hjust = 0.5)
)
p2
p3 <- ggplotly(p2, width = 1600, height = 1000)
htmlwidgets::saveWidget(p3, "dysregplot.html", selfcontained = F)
######################################################
### Visualization of Chemical Tree ###
######################################################
treeLabels <- rep("",nrow(df1))
plot.fan.chemrich <- function(hc, clus, dirvec,sizevec) {
nclus <- length(unique(clus))
palette <- c('black','green','orange','blue','grey','yellow','pink','brown','purple','violet','skyblue','khaki','lavender','magenta',"gold","sienna","tan","seagreen","orchid","linen","skyblue3","wheat","navyblue","moccasin","navy","dodgerblue","deeppink","chocolate",'red','blue','green','orange','maroon2','grey','yellow','pink','brown','purple','violet','skyblue','khaki','lavender','magenta',"gold","sienna","tan","seagreen","orchid","linen","skyblue3","wheat","navyblue","moccasin","navy","dodgerblue","deeppink","chocolate",'black','green','orange','blue','grey','yellow','pink','brown','purple','violet','skyblue','khaki','lavender','magenta',"gold","sienna","tan","seagreen","orchid","linen","skyblue3","wheat","navyblue","moccasin","navy","dodgerblue","deeppink","chocolate",'red','blue','green','orange','maroon2','grey','yellow','pink','brown','purple','violet','skyblue','khaki','lavender','magenta',"gold","sienna","tan","seagreen","orchid","linen","skyblue3","wheat","navyblue","moccasin","navy","dodgerblue","deeppink","chocolate")
#[1:nclus]
X <- as.phylo(hc)
X$tip.label <- df1$Compound.Name
#X$tip.label <- as.character(clus)
X$tip.label[which(dirvec=="yellow")] <- ""
edge.colors <- rep("lightgray",nrow(X$edge))
for (i in unique(clus)) {
if(i>0) {
difvec <- diff(which(X$edge[,2] %in% which(clus==i)))
if(length(which(difvec>3))==0) {
edge.colors[min(which(X$edge[,2] %in% which(clus==i))):max(which(X$edge[,2] %in% which(clus==i)))] <- "black"
edge.colors[min(which(X$edge[,2] %in% which(clus==i)))-1] <- "black"
#edge.colors[max(which(X$edge[,2] %in% which(clus==i)))+1] <- "black"
#nodelabels(LETTERS[k], node= 74, adj=c(0.5,0.5), frame = "c", bg = "white", cex = 1.0)
} else {
ovec <- which(X$edge[,2] %in% which(clus==i))
ovec <- ovec[-1]
edge.colors[min(ovec):max(ovec)] <- "black"
edge.colors[min(ovec)-1] <- "black"
}
}
}
XX <- plot(X,type='fan', tip.color="black",edge.color=edge.colors,show.tip.label = F,edge.width = 2, label.offset=.01, cex=0.5,no.margin=T)
tiplabels(pch = 21,col = dirvec, bg = dirvec, cex = sizevec)
#edgelabels()
k = 1
for (i in unique(clus)) {
if(i>0) {
difvec <- diff(which(X$edge[,2] %in% which(clus==i)))
if(length(which(difvec>3))==0) {
nodeiddf <- as.data.frame(X$edge[min(which(X$edge[,2] %in% which(clus==i))):max(which(X$edge[,2] %in% which(clus==i))),])
#nodelabels(letters[k], node=min(nodeiddf$V1), adj=c(0.5,0.5), frame = "none", bg = "transparent", cex = 2.0,col="green")
arc.cladelabels(text=letters.x[k],node=min(nodeiddf$V1),cex = 1.0,lab.offset=1.05,ln.offset=1.01)
k = k+1
} else {
ovec <- which(X$edge[,2] %in% which(clus==i))
ovec <- ovec[-1]
nodeiddf <- as.data.frame(X$edge[min(ovec):max(ovec),])
#nodelabels(letters[k], node=min(nodeiddf$V1), adj=c(0.5,0.5), frame = "none", bg = "transparent", cex = 2.0,col="green")
arc.cladelabels(text=letters.x[k],node=min(nodeiddf$V1),cex = 1.0,lab.offset=1.05,ln.offset=1.01)
k = k+1
}
}
}
}
clus <- as.integer(clust1)
if(length(which(clus==0))>0) {
clus[which(clus==0)] <- max(unique(clust1))+1
}
sizevec <- rep(1.0,length(clus))
dirvec <- sapply(glaydf2$foldchange,function(x){ if(x>1) { return("red") } else (return("blue")) })
dirvec <- sapply(1:length(glaydf2$pvalue), function(x) { if(glaydf2$pvalue[x]>0.05) {"yellow"} else{dirvec[x]}})
sizevec[which(dirvec=="yellow")] <- 0.2
clus_pval_list <- sapply(unique(df1$ClusterLabel), function(x) { sapply(df1$ClusterLabel[which(df1$ClusterLabel==x)], function(y) { clusterdf$pvalues[which(clusterdf$name==y)] }) })
alteredClusters <- unique(glaydf2$cluster)[which(unlist(lapply(clus_pval_list, function(xx) { length(which(xx<0.05))/length(xx) }))>0.5)]
clus[which(!clus%in%alteredClusters)] <- 0 ## All the clusters that are not altered are turned off with this command
X <- as.phylo(hc)
k = 1
for (i in unique(clus)) {
if(i>0) {
difvec <- diff(which(X$edge[,2] %in% which(clus==i)))
if(length(which(difvec>3))==0) {
nodeiddf <- as.data.frame(X$edge[min(which(X$edge[,2] %in% which(clus==i))):max(which(X$edge[,2] %in% which(clus==i))),])
treeLabels[which(glaydf2$cluster==i)] <- letters.x[k]
k = k+1
} else {
ovec <- which(X$edge[,2] %in% which(clus==i))
ovec <- ovec[-1]
nodeiddf <- as.data.frame(X$edge[min(ovec):max(ovec),])
treeLabels[which(glaydf2$cluster==i)] <- letters.x[k]
k = k+1
}
}
}
#wbp <- pptx(template = system.file("data","chem_rich_temp.pptx", package = "ChemRICH" )) # use this one when using the installed packages.
#wbp <- pptx(template = "./data/chem_rich_temp.pptx")
#wbp <- addSlide( wbp, "lipidClust" )
#wbp <- addPlot( wbp, function() plot.fan.chemrich(hc,clus, dirvec,sizevec), offx =0.1 , offy = -0.1, width = 8, height = 8 , vector.graphic = FALSE )
#wbp <- addParagraph( wbp, "Compund cluster annotation mapping is provided in the xlsx output file." , offx =0.1 , offy = -0.1, width = 8, height = 8 )
#writeDoc( wbp, file = paste0("chemrich_output_tree.pptx") )
png(file="chemrich_tree.png",width=12,height=15,units="in",res=300)
plot.fan.chemrich(hc,clus, dirvec,sizevec)
text(+0.0, +0.8, "ChemRICH : Chemical Similarity Enrichment Analysis", cex = 2.0)
dev.off()
## Export the final compounds result table.
df1$TreeLabels <- treeLabels
df1$pvalue <- signif(df1$pvalue, digits = 2)
df1$foldchange <- signif(df1$foldchange, digits = 2)
df1$FDR <- signif( p.adjust(df1$pvalue), digits = 2)
gdf <- datatable(df1,options = list(pageLength = 10), rownames = F)
gdf$width <- "auto"
gdf$height <- "auto"
saveWidget(gdf,file="compoundlevel.html",selfcontained = F)
l <- list("ChemRICH_Results" = clustdf.e, "Compound_ChemRICH" = df1 )
openxlsx::write.xlsx(l, file = "ChemRICH_results.xlsx", asTable = TRUE)
}
|
0983f96e94842dd1b61677bbb08d46cd697c9958 | 0479f3d46eb3aeb10a5ca92400c7f2ba6f846744 | /code/initial_mlb_scrape.R | 26d4ac08167887fb8856637979f413669513f0cf | [] | no_license | jalgos/mlb | a3ad8ea28430c7ab0bfaf6ea86147394666fdb94 | b950673c3e27a3b165380ffd70be8e302cdeb50d | refs/heads/master | 2021-01-10T10:29:11.148559 | 2015-07-29T00:54:47 | 2015-07-29T00:54:47 | 36,795,977 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,932 | r | initial_mlb_scrape.R | # This code creates the initial SQLite data base containing all MLB data to 17th June 2015 using pitchRx
setwd("/Users/Michael/Coding/Jalgos/MLB")
sink.reset <- function(){
for(i in seq_len(sink.number())){
sink(NULL)
}
}
sink.reset()
sink(file="mlb_scrape.txt", split=TRUE)
library(dplyr)
library(pitchRx)
library(DBI)
#mlb_db <- src_sqlite("pitchRx.sqlite3", create = TRUE)
mlb_db <- src_sqlite("pitchRx.sqlite3")
#scrape(start = "2008-01-01", end = Sys.Date()-2, connect = mlb_db$con)
#update_db(mlb_db$con) # WARNING: seems to corrupt file data when update fails
scrape_fix <- function(database=mlb_db, mlb_table="atbat",sql_export=TRUE,csv_export=TRUE) {
if( !(mlb_table%in%src_tbls(database))) stop("Specified table not in database")
if(sql_export==FALSE && csv_export==FALSE) stop("No exported files: no fix needed")
message("Please wait, files compiling")
# Scrape fails if name/no. of variables changes and exports batch first to seperate table, then .csv # We ultimately want to merge all these tables and will do it in stages
table_string<-mlb_table
table_export_string<-paste(table_string,"export",sep="_")
# First identify and merge all the .csv files into one table (if they exist)
if(csv_export==TRUE) {
mlb_table_export.csv<-paste(list.files(path=".",pattern=paste(table_export_string,"*",sep="")))
mlb_table_export.csv_data<-rbind_all(lapply(mlb_table_export.csv,read.csv))
}
# Second we create objects for the SQLite export tables (if they exist)
mlb_table_original<-tbl(mlb_db, table_string)
if(sql_export==TRUE) mlb_table_export<-tbl(mlb_db, table_export_string)
if(sql_export==TRUE && csv_export==TRUE) {
# Now we have three tables (two sqlite tables and one R data frame), make a list
mlb_table_data<-list(mlb_table_original,mlb_table_export,mlb_table_export.csv_data)
}
if(sql_export==TRUE && csv_export==FALSE) {
# Now we have two tables (two sqlite tables), make a list
mlb_table_data<-list(mlb_table_original,mlb_table_export)
}
if(sql_export==FALSE && csv_export==TRUE) {
# Now we have two tables (one sqlite tables, one R dataframe), make a list
mlb_table_data<-list(mlb_table_original,mlb_table_export.csv_data)
}
# To merge in SQLite, we will need all the tables to have the same variables.
# Start by identifying the variables in each of these tables
mlb_table_data_vars<-lapply(mlb_table_data,colnames)
# We then identify the missing vars (if any) in each scraped table
mlb_table_data_vars_missing<-lapply(1:length(mlb_table_data_vars), function(n) setdiff(unlist(mlb_table_data_vars[-n]),mlb_table_data_vars[[n]]))
# Now we add the missing variables to each table and compute
mlb_table_new<-lapply(1:length(mlb_table_data), function(n) mutate_(mlb_table_data[[n]],.dots=as.list(mlb_table_data_vars_missing[[n]])))
# Since dplyr doesn't compute until explicitly told to, we must do so before combining
# Note compute creates new table, therefore copy only mutated original table
compute(mlb_table_new[[1]],name=paste(table_string,"new",sep="_"),temporary=FALSE)
# Next, insert mutated export table (SQLite) and concatenated .csv files
lapply(2:length(mlb_table_new), function(n) db_insert_into(con=database$con,table=paste(table_string,"new",sep="_"),values=as.data.frame(mlb_table_new[[n]])))
# Finally, delete original tables, keeping only new one, then rename table_new
dbRemoveTable(database$con,name=table_string)
if(sql_export==TRUE) dbRemoveTable(database$con,name=table_export_string)
dbSendQuery(database$con,paste("ALTER TABLE ", table_string,"_new RENAME to ", table_string,sep=""))
return(database)
}
#Create vector with the tables that we want to fix, according to each scenario
target_tables_sql<-sub("(.*?)_.*", "\\1", grep("_export",src_tbls(mlb_db),value=TRUE))
target_tables_csv<-unique(sub("(.*?)_export-.*", "\\1",list.files(path=".",pattern="*.csv")))
target_tables_both<-intersect(target_tables_sql,target_tables_csv)
target_tables_sql<-setdiff(target_tables_sql,target_tables_both)
target_tables_csv<-setdiff(target_tables_csv,target_tables_both)
#Apply six to each case(both SQLite & csv exports, only csv, etc.)
#Note this will fail if only csv or only sql do not exist
sapply(1:length(target_tables_both),function(n) scrape_fix(mlb_table=target_tables_both[n]))
sapply(1:length(target_tables_csv), function(n) scrape_fix(mlb_table=target_tables_csv[n],sql_export=FALSE))
sapply(1:length(target_tables_sql), function(n) scarpe_fix(mlb_table=target_tables_sql[n]),sql_csv=FALSE)
# Housekeeping, change date as necessary
scrape_export <- list.files(".", pattern="*export*")
if (length(scrape_export)>0) {
export_dir<-paste("scrape_export","2015-06-17",sep="_")
dir.create(export_dir)
for (i in seq(along=scrape_export)){
file.copy(scrape_export[i],export_dir)
file.remove(scrape_export[i])
}
}
savehistory()
|
39290fbc4dbd0b825831f33f5b7704c35bf694b1 | bd03530f8b32c309cc4ad02ca83fa0bb40ef7d70 | /man/adjust_xy.Rd | 09bdf1acd976f05189afb24266c01b2e8b31d60b | [
"MIT"
] | permissive | evanmascitti/soilmesh | e1e2baa178f9332c194f9d4e39b13be45378a186 | f2b7628caa0bda01642867480493eee1c0966175 | refs/heads/master | 2023-08-11T20:50:11.678091 | 2021-09-27T23:41:14 | 2021-09-27T23:41:14 | 310,968,643 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,007 | rd | adjust_xy.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adjust_xy.R
\name{adjust_xy}
\alias{adjust_xy}
\title{Correct x and y offset for a soil specimen}
\usage{
adjust_xy(mesh)
}
\arguments{
\item{mesh}{a "mesh3d" object which is already oriented with its surface lying parallel to the x-y plane}
}
\value{
a "mesh3d" object with its center lying in the normal to the x-y plane
}
\description{
After re-orienting and moving the specimen with \link{orient_sample}, the minute error in the horizontal plane needs to be corrected prior to removing the aluminum sample holder and the outermost portion of the soil surface. This function computes the difference between the maximum and minimum values observed in the x and y directions and corrects the vertex coordinates so the center is truly located at x=0 and y=0.
}
\examples{
adjusted <- adjust_xy(mesh = untrimmed_mesh1)
rgl::shade3d(untrimmed_mesh1, color="firebrick")
rgl::shade3d(adjusted, color= "darkblue")
add_origin_axes()
}
|
df8c31dde2f6ca513d6137b94fa0bdf6782440f8 | 9c2f40ae8269413feb32cffa6d581dfe9f931dd0 | /man/constraint_nomatch.Rd | 1ddc9b5a377c90f54f904f3f786f65b67e758d1d | [
"MIT"
] | permissive | tpetricek/datadiff | ed5ca6cdfe8129ed947c24a42c62ea265aad86ef | 8941269b483da9abcacde804b7f6b6e0a122a57a | refs/heads/master | 2020-07-31T19:09:25.118489 | 2019-09-25T23:55:01 | 2019-09-25T23:55:01 | 210,723,004 | 0 | 0 | MIT | 2019-09-25T00:39:49 | 2019-09-25T00:39:49 | null | UTF-8 | R | false | true | 248 | rd | constraint_nomatch.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/constraints.R
\name{constraint_nomatch}
\alias{constraint_nomatch}
\title{Constraint nomatch}
\usage{
constraint_nomatch(col1, col2)
}
\description{
Constraint nomatch
}
|
ed27784985200b0d8ad56c51a210ffdad7cfec22 | 0ab471ed346069af3abc4d90bba4957ee404b512 | /plot3.R | 48a6e31352670ea03d6ddcf299e6dfed4f5e94cb | [] | no_license | donhicks/ExploratoryDataAnalysis-CourseProject1 | 8e1ae9b97f6ba73fbfae37a56c2bff76a24eb660 | 731e71b6911170ebdebac1d26b7eb1607ec48f82 | refs/heads/master | 2020-12-24T14:10:31.720227 | 2014-09-06T12:04:24 | 2014-09-06T12:04:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,173 | r | plot3.R | plot3 <-function(){
##check working directory
file<-"household_power_consumption.txt"
wf<-paste0(getwd(),"/",file)
##read the outcome data
d = read.table(wf,header=TRUE,
sep=";", na.strings=c("?"),
col.names=c("Date", "Time","Global_active_power","Global_reactive_power",
"Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"),
fill=FALSE,blank.lines.skip = TRUE,
colClasses=c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"),
strip.white=TRUE)
d$Date<- as.Date(d$Date,"%d/%m/%Y")
d<-d[d$Date >= "2007-02-01" & d$Date<= "2007-02-02",]
d$Time<- as.POSIXct(paste(d$Date,d$Time),format="%Y-%m-%d %H:%M:%S")
png(file='plot3.png')
plot(d$Time,d$Sub_metering_1,type="l",xlab='',ylab="Energy Sub metering",col="black")
lines(d$Time,d$Sub_metering_2,col="red")
lines(d$Time,d$Sub_metering_3,col="blue")
legend("topright",lty=1,col=c('black','red','blue'),legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3'))
dev.off()
return()
}
|
f4706482690c656091e9c7b8ae807c8c566aa7c0 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/FRCC/examples/plot_units.Rd.R | 50be8292fa86404a8afcc8340d90dd5d63860ba7 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 523 | r | plot_units.Rd.R | library(FRCC)
### Name: plot_units
### Title: Plots the experimental units in the Canonical Variates Space
### Aliases: plot_units
### Keywords: ~kwd1 ~kwd2
### ** Examples
#Example: NCI-60 micrRNA Data
data("Topoisomerase_II_Inhibitors")
data("microRNA")
my_res <- frcc(t(microRNA),-1*t(Topoisomerase_II_Inhibitors))
for( i in 1:dim(microRNA)[2])
{
colnames(microRNA)[i]<-substr(colnames(microRNA)[i], 1, 2)
}#end for i
dev.new()
plot_units(t(microRNA),-1*t(Topoisomerase_II_Inhibitors),my_res,1,1,text_size=0.01)
|
1c108cadc9f2e920e5a65b0ff7fb4cd3f3847ee0 | 765cc1a769e1667310eff2ea65646543e39588e0 | /R/BUSCAR_AREAS_DE_ATUACAO.R | 3fbfffb1c0e662fc152d13766f38f21705b49aff | [] | no_license | rcbraga/elattes | b66d65d7bb635a80cb343be5deae4cfc2aad3887 | 74578e0b3b70a7a1c2882c48d6831903b2737ad1 | refs/heads/master | 2022-01-24T13:42:25.134874 | 2019-05-31T01:47:44 | 2019-05-31T01:47:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,293 | r | BUSCAR_AREAS_DE_ATUACAO.R | #' @title BUSCAR_AREAS_DE_ATUACAO
#'
#'
#'
#' @param doc a document
#'
#'
#'
#'
#'
#' @export BUSCAR_AREAS_DE_ATUACAO
#'
BUSCAR_AREAS_DE_ATUACAO<-function(doc){
areas<-list()
Resultado<-list()
AREAS_DE_ATUACAO<-list()
tryCatch(areas<-xmlRoot(doc)[["DADOS-GERAIS"]][["AREAS-DE-ATUACAO"]][c("AREA-DE-ATUACAO")]
,
error = function(e) {
} )
if(length(areas) > 0) {
for(area in areas){
AREA_DE_ATUACAO <-new.env(parent=emptyenv())
AREA_DE_ATUACAO$NOME_GRANDE_AREA_DO_CONHECIMENTO<-xmlGetAttr(area, "NOME-GRANDE-AREA-DO-CONHECIMENTO")
AREA_DE_ATUACAO$NOME_DA_AREA_DO_CONHECIMENTO<-xmlGetAttr(area, "NOME-DA-AREA-DO-CONHECIMENTO")
AREA_DE_ATUACAO$NOME_DA_SUB_AREA_DO_CONHECIMENTO<-xmlGetAttr(area, "NOME-DA-SUB-AREA-DO-CONHECIMENTO")
AREA_DE_ATUACAO$NOME_DA_ESPECIALIDADE<-xmlGetAttr(area, "NOME-DA-ESPECIALIDADE")
AREAS_DE_ATUACAO<-c(AREAS_DE_ATUACAO,list(AREA_DE_ATUACAO))
}
Resultado<-AREAS_DE_ATUACAO
} else {
AREA_DE_ATUACAO <-new.env(parent=emptyenv())
AREA_DE_ATUACAO$NOME_GRANDE_AREA_DO_CONHECIMENTO<-""
AREA_DE_ATUACAO$NOME_DA_AREA_DO_CONHECIMENTO<-""
AREA_DE_ATUACAO$NOME_DA_SUB_AREA_DO_CONHECIMENTO<-""
AREA_DE_ATUACAO$NOME_DA_ESPECIALIDADE<-""
Resultado<-AREAS_DE_ATUACAO<-c(AREAS_DE_ATUACAO,list(AREA_DE_ATUACAO))
}
Resultado
}
|
f8e9932ca43240799da64f1824b6a6e93a91ff77 | 79328b5dd4560365d89fdffc5f0d8a402ff2b6c6 | /pkg/ternaryplot/man/xy2ternary-methods.Rd | 7192c1f10c5118c27fe8cdc7cb2cc4f0787efd20 | [] | no_license | julienmoeys/ternaryplot | f1f97e2e409e164f8390acdaa677851a75ba1f2f | 50c9901ce03b8e857eb1990564c43e0e5e58e36e | refs/heads/master | 2021-01-21T04:47:19.042587 | 2016-06-17T16:52:13 | 2016-06-17T16:52:13 | 49,194,207 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,892 | rd | xy2ternary-methods.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xy2ternary.R
\name{xy2ternary}
\alias{xy2ternary}
\alias{xy2ternary.character}
\alias{xy2ternary.ternarySystem}
\title{Converts x-y coordinates into ternary point-data.}
\usage{
xy2ternary(s, data, xyNames = c("x", "y"), ...)
\method{xy2ternary}{character}(s, data, xyNames = c("x", "y"), ...)
\method{xy2ternary}{ternarySystem}(s, data, xyNames = c("x", "y"), ...)
}
\arguments{
\item{s}{A \code{\link[ternaryplot]{ternarySystem}} object or a
character string naming an pre-defined
\code{\link[ternaryplot]{ternarySystem-class}}.}
\item{data}{A \code{\link[base]{data.frame}} or a
\code{\link[base]{matrix}} with x-y coordinates.}
\item{xyNames}{A vector of 2 character strings. Names of the \code{x}-
and \code{y}-variables in table \code{data}.}
\item{\dots}{Additional parameters passed to specific methods.}
}
\value{
Returns a \code{\link[base]{data.frame}} with ternary
point-data. As many rows as \code{nrow(data)} and 3 columns,
one per ternary-variable (same names as \code{blrNames(s)}).
}
\description{
Converts x-y coordinates into ternary point-data (bottom,
left, right axis), according to the specification of a
\code{\link[ternaryplot]{ternarySystem-class}}
}
\examples{
library( "ternaryplot" )
hypres <- getTernarySystem( "hypres" )
xyData <- data.frame(
"x" = c( 0, 100, 0, 100, 50 ),
"y" = c( 0, 100, 100, 0, 50 )
)
tData <- xy2ternary( s = hypres, data = xyData,
yxNames = c( "x", "y" ) )
tData
# # Visualise the result:
# ternaryStyle( "publication" )
# ternaryPlot( hypres )
# points( x = xyData[, "x" ], y = xyData[, "y" ],
# pch = 1, cex = 2 )
# notNeg <- apply( X = tData, MARGIN = 1,
# FUN = function(x){ !any(x < 0) } )
# ternaryPoints( s = hypres, x = tData[ notNeg, ],
# pch = 3 )
}
|
6e1329205708e3cc8315aae7c5e641ffd9508187 | e64d540d5ba99edfa89a82a2e0e413d4eb7aebdd | /R/roster-package.R | badbd5b6b7b572b654483574ea19320b95c1ccdd | [] | no_license | jwmortensen/roster | 89eeb9f5ca8941ce4962d384d3aa6798318f6da8 | d7afcac6b8dc6e04283b5ac4327a1fb3fee18349 | refs/heads/master | 2021-09-19T22:18:57.282988 | 2018-08-01T05:29:21 | 2018-08-01T05:29:21 | 103,295,413 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,545 | r | roster-package.R | #' roster
#'
#' @name roster
#' @docType package
NULL
#' Play-by-play data
#'
#' Play by play data from a game between the Memphis Grizzlies and Sacramento
#' Kings on 1 January 2017.
#'
#'
#' @format A data frame with 525 rows and 75 variables
#' @name pbp
#' @docType data
NULL
#' Player data
#'
#' Player information for game participants
#'
#' \itemize{
#' \item player_id
#' \item full_name
#' \item team_id
#' \item team_market
#' \item team_name
#' \item season
#' }
#' @format a data frame with 23 rows and 6 variables
#' @name players
#' @docType data
NULL
#'
#' make sure play by play is sorted first by period, then by clock
#'
#' @param pbp_df a data.frame
#' @return data.frame
sort_pbp <- function(pbp_df) {
`%>%` <- dplyr::`%>%`
check_time_format <- function(x) {
if (grepl(":(.*):", x)) {
T
} else if (grepl("^[^:]+:[^:]+$", x)) { # regex for only one colon
F
} else {
stop("invalid time format")
}
}
if (class(pbp_df$clock) != "Period") {
num_hms <- sum(sapply(pbp_df$clock, check_time_format))
if (num_hms == nrow(pbp_df)) {
sortable_clock <- lubridate::hms(pbp_df$clock)
} else if (num_hms == 0) {
sortable_clock <- lubridate::ms(pbp_df$clock)
} else {
stop("inconsistent game clock format")
}
} else {
sortable_clock <- pbp_df$clock
}
# didn't use dplyr to sort because of weird conflicts with lubridate.
# after sorting it was converting 12 m 0 s to 12 m 39 s. ¯\_(ツ)_/¯
pbp_df <- pbp_df[order(pbp_df$game_id, -pbp_df$period_sequence, sortable_clock, decreasing = T), ]
pbp_df
}
#' checks to make sure data.frame is sorted properly
#'
#' @param pbp_df a data.frame
#' @return data.frame
check_sort <- function(pbp_df) {
pbp_df <- pbp_df %>%
dplyr::arrange(game_id, period_sequence, desc(clock))
# sorted <- order(pbp_df$game_id, -pbp_df$period_sequence, pbp_df$clock, decreasing = T)
# if (!isTRUE(all.equal(sorted, 1:nrow(pbp_df)))) {
# pbp_df <- sort_pbp(pbp_df)
# }
pbp_df
}
#' assign id to each possession, which can be used to calculate posessions
#' for each lineup.
#'
#' @param pbp_df the play by play data.frame
#' @return data.frame that mirrors pbp_df but with an additional possession_id column
#' @export
assign_possession_ids <- function(pbp_df) {
require(dplyr)
require(stringr)
pbp_df <- pbp_df %>%
arrange(game_id, period_sequence, desc(clock)) %>%
group_by(game_id, period_sequence, clock) %>%
mutate(is_freethrow = grepl(pattern = " free throw ", x = event_description)) %>%
mutate(listed_fta = ifelse(is_freethrow == TRUE,
str_extract(string = event_description, "[0-9]+"),
NA),
listed_fta_tot = ifelse(is_freethrow == TRUE,
last(unlist(str_extract_all(string = event_description, "[0-9]+"))),
NA)) %>%
group_by(game_id, period_sequence, clock, is_freethrow) %>%
mutate(fta_event_id = 1:n()) %>%
mutate(is_freethrow_error_attempt = !listed_fta == row_number()) %>%
mutate(is_freethrow_error_total = !last(fta_event_id) == listed_fta_tot) %>%
ungroup() %>%
mutate(event_description = sapply(1:n(), function(x)
ifelse(is.na(is_freethrow_error_attempt[x]) | !is_freethrow_error_attempt[x],
event_description[x],
gsub(pattern = listed_fta[x], fta_event_id[x],
x = event_description[x], fixed = TRUE)))) %>%
mutate(event_description = sapply(1:n(), function(x)
ifelse(is.na(is_freethrow_error_total[x]) | !is_freethrow_error_total[x],
event_description[x],
gsub(pattern = listed_fta_tot[x], fta_event_id[x],
x = event_description[x], fixed = TRUE)))) %>%
select(-is_freethrow, -is_freethrow_error_attempt, -is_freethrow_error_attempt,
-fta_event_id, -listed_fta_tot, -listed_fta)
end_possession_events <- pbp_df %>%
group_by(game_id, period_sequence, clock) %>%
filter( (shot_made == TRUE & !any(event_type %in% "freethrowmade")) |
(event_type %in% "rebound" & rebound_type %in% "defensive") |
(event_type %in% "freethrowmade" & grepl("1 of 1", event_description) &
all(event_type != "technicalfoul")) |
(event_type %in% "freethrowmade" & grepl("(2 of 2|3 of 3)", event_description)) |
(event_type %in% "turnover")) %>%
filter(grepl(x = as.character(row_number()),
pattern = paste0(sapply(1:length(unique(event_type)),
function(x) last(which(event_type %in% unique(event_type)[x]))),
collapse = "|"))) %>%
ungroup() %>%
select(event_id) %>%
distinct() %>%
pull()
end_period <- pbp_df %>%
group_by(game_id, period_sequence, clock) %>%
filter(any(event_type == "endperiod")) %>%
filter(row_number() == n(), !(event_id %in% end_possession_events)) %>%
ungroup() %>%
select(event_id) %>%
distinct() %>%
pull()
start_possession_events <- pbp_df %>%
select(event_id) %>%
distinct() %>%
mutate(lag_event = lag(event_id)) %>%
filter(lag_event %in% c(end_possession_events, end_period)) %>%
select(event_id) %>%
pull()
pbp_df <- pbp_df %>%
group_by(game_id) %>%
mutate(lag_event = lag(event_id, default = "")) %>%
mutate(possession_id = 1 + cumsum(event_id %in% start_possession_events &
event_id != lag_event)) %>%
mutate(lead_poss = lead(possession_id, default = "")) %>%
tidyr::fill(possession_team_id) %>%
mutate(possession_team = if_else(lead_poss == possession_id, as.character(NA),
if_else((event_type %in% c("twopointmade",
"threepointmade",
"freethrowmade",
"turnover",
"offensivefoul")),
ifelse(home_team_id == event_team_id,
"HOME", "AWAY"),
ifelse(home_team_id == event_team_id,
"AWAY", "HOME")))) %>%
mutate(possession_team = ifelse(is.na(possession_team) &
event_type %in% "endperiod",
ifelse(home_team_id == possession_team_id,
"HOME", "AWAY"),
possession_team)) %>%
group_by(game_id, possession_id) %>%
mutate(points_in_possession = sum(points, na.rm = T)) %>%
select(-lag_event, -lead_poss) %>%
ungroup()
pbp_df
}
#' assign id to each lineup in order to calculate +/-
#'
#' @param pbp_df the play by play data.frame
#' @return data.frame that mirrors pbp_df but with an additional lineup_id column
#' @export
assign_lineup_ids <- function(pbp_df) {
`%>%` <- dplyr::`%>%`
pbp_df <- check_sort(pbp_df)
# sort columns to avoid duplicate lineups
home_players <- pbp_df %>%
dplyr::select(dplyr::matches("home_player(.*)id"))
home_players <- t(apply(home_players, 1, sort, na.last = T))
away_players <- pbp_df %>%
dplyr::select(dplyr::matches("away_player(.*)id"))
away_players <- t(apply(away_players, 1, sort, na.last = T))
pbp_df <- pbp_df %>%
dplyr::mutate(home_player_one_id = home_players[, 1],
home_player_two_id = home_players[, 2],
home_player_three_id = home_players[, 3],
home_player_four_id = home_players[, 4],
home_player_five_id = home_players[, 5],
away_player_one_id = away_players[, 1],
away_player_two_id = away_players[, 2],
away_player_three_id = away_players[, 3],
away_player_four_id = away_players[, 4],
away_player_five_id = away_players[, 5])
lineup_df <- pbp_df %>%
dplyr::select(dplyr::matches("(home|away)_player(.*)id")) %>%
na.omit() %>%
unique() %>%
dplyr::mutate(lineup_id = 1:n())
text_nums <- c("one", "two", "three", "four", "five")
pbp_df <- dplyr::left_join(pbp_df, lineup_df,
by = c(paste0("home_player_", text_nums, "_id"),
paste0("away_player_", text_nums, "_id")))
pbp_df <- pbp_df %>%
dplyr::group_by(game_id, period_sequence, desc(clock)) %>%
dplyr::mutate(lineup_id = if_else(row_number() != 1, as.integer(NA), lineup_id)) %>%
tidyr::fill(lineup_id) %>%
dplyr::ungroup()
pbp_df
}
#' Calculate PM for each lineup in a single game
#'
#' @param pbp_df the play by play data.frame
#' @return data.frame
#' @export
plus_minus <- function(pbp_df) {
require(dplyr)
pbp_df <- assign_possession_ids(pbp_df)
pbp_df <- assign_lineup_ids(pbp_df)
pls_min_df <- pbp_df %>%
filter(!is.na(lineup_id), !is.na(possession_team)) %>%
group_by(lineup_id) %>%
summarise(home_possessions = sum(possession_team == "HOME"),
home_points = sum(if_else(possession_team == "HOME", points_in_possession, as.integer(0))),
home_points_per_poss = home_points / home_possessions,
away_possessions = sum(possession_team == "AWAY"),
away_points = sum(if_else(possession_team == "AWAY", points_in_possession, as.integer(0))),
away_points_per_poss = away_points / away_possessions,
total_possessions = home_possessions + away_possessions)
avg_rating <- sum(pls_min_df$home_points, pls_min_df$away_points) /
sum(pls_min_df$away_possessions, pls_min_df$home_possessions)
pls_min_df <- pls_min_df %>%
mutate(home_points_per_poss = if_else(home_possessions == 0,
avg_rating, home_points_per_poss),
away_points_per_poss = if_else(away_possessions == 0,
avg_rating, away_points_per_poss)) %>%
mutate(pls_min = 100 * (home_points_per_poss - away_points_per_poss))
player_levels <- pbp_df %>%
select(matches("(home|away)_player(.*)id")) %>%
unlist() %>%
na.omit() %>%
unique()
make_player_factor <- function(vec) {
factor(vec, levels = player_levels, exclude = NULL)
}
player_df <- pbp_df %>%
select(lineup_id, matches("(home|away)_player(.*)id")) %>%
distinct() %>%
mutate_at(vars(matches("player")), make_player_factor)
pls_min_df <- left_join(pls_min_df, player_df, by = c("lineup_id"))
pls_min_df
}
offdef_apm <- function(pbp_df, aggregate = F) {
require(Matrix)
require(dplyr)
require(glmnet)
process_row <- function(row) {
home_player_ids <- row %>% select(matches("home_player(.*)id"))
away_player_ids <- row %>% select(matches("away_player(.*)id"))
if (row$possession_team == "HOME") {
df <- data.frame(row$points_in_possession, home_player_ids, away_player_ids, stringsAsFactors = F)
} else if (row$possession_team == "AWAY") {
df <- data.frame(row$points_in_possession, away_player_ids, home_player_ids, stringsAsFactors = F)
}
text_nums <- c("one", "two", "three", "four", "five")
names(df) <- c("points", paste0("offense_player_", text_nums, "_id"),
paste0("defense_player_", text_nums, "_id"))
df
}
pbp_df <- assign_possession_ids(pbp_df)
pbp_df <- assign_lineup_ids(pbp_df)
pm_df <- pbp_df %>%
filter(!is.na(possession_team), !is.na(home_player_one_id))
out_df <- as.data.frame(t(sapply(1:nrow(pm_df), function(i) { process_row(pm_df[i, ]) })))
player_levels <- out_df %>%
select(matches("player")) %>%
unlist() %>%
na.omit() %>%
unique()
make_player_factor <- function(vec) {
factor(vec, levels = player_levels, exclude = NULL)
}
out_df <- out_df %>%
mutate_at(vars(matches("player")), make_player_factor) %>%
mutate(points = as.integer(points))
if(aggregate) {
out_df <- out_df %>%
group_by_at(vars(matches("player"))) %>%
summarise(points = sum(points), possessions = n()) %>%
mutate(points_per_possession = points / possessions)
}
O1 <- sparse.model.matrix(~-1 + offense_player_one_id, out_df)
O2 <- sparse.model.matrix(~-1 + offense_player_two_id, out_df)
O3 <- sparse.model.matrix(~-1 + offense_player_three_id, out_df)
O4 <- sparse.model.matrix(~-1 + offense_player_four_id, out_df)
O5 <- sparse.model.matrix(~-1 + offense_player_five_id, out_df)
O <- O1 + O2 + O3 + O4 + O5
D1 <- sparse.model.matrix(~-1 + defense_player_one_id, out_df)
D2 <- sparse.model.matrix(~-1 + defense_player_two_id, out_df)
D3 <- sparse.model.matrix(~-1 + defense_player_three_id, out_df)
D4 <- sparse.model.matrix(~-1 + defense_player_four_id, out_df)
D5 <- sparse.model.matrix(~-1 + defense_player_five_id, out_df)
D <- D1 + D2 + D3 + D4 + D5
colnames(O) <- paste0("offense_", player_levels)
colnames(D) <- paste0("defense_", player_levels)
X <- cbind(O, D)
nas <- apply(X, 1, function(x) { sum(is.na(x)) })
y <- if (aggregate) 100 * out_df$points_per_possession else 100 * out_df$points
w <- if (aggregate) out_df$possessions else rep(1, nrow(X))
fit <- cv.glmnet(X, y, alpha = 0, weights = w, intercept = T)
fit <- glmnet(X, y, alpha = 0, weights = w, lambda = 137.4059, intercept = T)
fit
}
#' calculate adjusted +/-
#'
#' @param pls_min_df a data.frame
#' @return data.frame
#' @export
apm <- function(pls_min_df, players_df, minutes_threshold = 0, weights = T) {
require(glmnet)
levels_list <- lapply(pls_min_df %>% select(matches("player(.*)id")), levels)
if (length(unique(levels_list)) == 1) {
X1 <- sparse.model.matrix(~-1 + home_player_one_id, pls_min_df)
X2 <- sparse.model.matrix(~-1 + home_player_two_id, pls_min_df)
X3 <- sparse.model.matrix(~-1 + home_player_three_id, pls_min_df)
X4 <- sparse.model.matrix(~-1 + home_player_four_id, pls_min_df)
X5 <- sparse.model.matrix(~-1 + home_player_five_id, pls_min_df)
X6 <- -1 * sparse.model.matrix(~-1 + away_player_one_id, pls_min_df)
X7 <- -1 * sparse.model.matrix(~-1 + away_player_two_id, pls_min_df)
X8 <- -1 * sparse.model.matrix(~-1 + away_player_three_id, pls_min_df)
X9 <- -1 * sparse.model.matrix(~-1 + away_player_four_id, pls_min_df)
X10 <- -1 * sparse.model.matrix(~-1 + away_player_five_id, pls_min_df)
X <- X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8 + X9 + X10
colnames(X) <- levels_list[[1]]
included_players <- players_df %>%
filter(minutes > minutes_threshold) %>%
select(player_id) %>%
pull()
X <- X[, colnames(X) %in% included_players]
w <- if (weights) (pls_min_df$total_possessions) else rep(1, nrow(pls_min_df))
fit <- glmnet(X, pls_min_df$pls_min,
weights = w,
alpha = 0,
intercept = T)
fit
} else {
stop("at least one player_id column has mismatched factor levels.")
}
}
view_pbp <- function(pbp_df) {
pbp_df %>% select(possession_id, lineup_id, clock, event_description, possession_team, points_in_possession) %>% View()
}
|
b1e46f0fbe7ec1d5d75ea3795e945a7785a513a4 | 7f3d861fca2fed7843457c45bea42e7e4b83f160 | /main.R | 116a855a69e46673fdf44be156fca4ce86c508de | [] | no_license | cardat/DatSci_CNOSSOS_au_feasibility_report | 993339e0e15ac3158d5bec3ab9137dc3ff25e2a9 | 70009150e7c3d55f5f1b002e25dc7377e2d9fcef | refs/heads/master | 2023-05-25T12:46:13.326401 | 2020-04-16T16:38:31 | 2020-04-16T16:38:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 287 | r | main.R | ## CNOSSOS feasibility study using Banskstown, Australia
## christygeromboux
# Load libraries
library(rmarkdown)
library(bookdown)
library(yaml)
library(knitr)
#library(pander)
library(tinytex)
library(kableExtra)
# Create the report
rmarkdown::render("report.Rmd", "pdf_document2")
|
6ab9d5484ca85045508498e416f14ff7ace4682f | 16073ff9fa007b8239466aecc3a0b61deb8e5bec | /man/init_cla.Rd | cbdae82f70682600dea458b63c5e9b4e7e7c31b6 | [] | no_license | alejandro-sotolongo/CriticalLineAlgo | 5cdd5a1ba7bdb1b09c1715575fd9138bc075e7a6 | 37904abaca6dc455ca67c97b926385161b707c65 | refs/heads/master | 2020-12-14T04:12:25.661157 | 2020-09-08T18:05:26 | 2020-09-08T18:05:26 | 234,633,974 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 472 | rd | init_cla.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cla.R
\name{init_cla}
\alias{init_cla}
\title{Initialize Critical Line Algo}
\usage{
init_cla(mu_vec, low_bound, up_bound)
}
\arguments{
\item{mu_vec}{vector or column matrix of expected returns}
\item{low_bound}{lower bound weights}
\item{up_bound}{upper bound weights}
}
\value{
list containing initial weight vector and free weight position
}
\description{
Initialize Critical Line Algo
}
|
db75af98d37abc01b40814658c72dba5764723db | 6ee1d2e303700be55ddf55a10b3ab7771a0411d2 | /scripts/prepare_data.R | 2a3f2aca6fddd12923211ba0297da66bc8a19811 | [] | no_license | nrlottig/14C_v_freewaterO2 | 75ad8cddaae4872da4df57f000ca47464176c12e | ea2b9984563b961afe19c64bc0a3bb20e3bc3d05 | refs/heads/master | 2023-05-22T16:27:41.201426 | 2021-06-10T16:09:34 | 2021-06-10T16:09:34 | 286,758,885 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,858 | r | prepare_data.R | #==========
#========== Preliminaries
rm(list=ls())
# load packages
library(tidyverse)
library(lubridate)
library(LakeMetabolizer)
library(rstan)
library(patchwork)
library(plotly)
source("scripts/helper_functions.R")
#===========================================
# Get and process high frequency sensor data
#===========================================
#Lakes are processed one lake at a time
lake <- "acton"
lake_id <- get.abrev(lake)[1]
max_d <- as.numeric(get.abrev(lake)[2])
lake.area <- as.numeric(get.abrev(lake)[3])
out.time.period <- "60 min"
tz <- "US/Eastern"#"US/Central"#"US/Pacific"#"US/Central"
sonde = list.files(paste("data/sonde_raw/clean_data/",lake,sep=""), full.names = T) %>%
lapply(read_csv) %>%
bind_rows()
if(lake == "castle") sonde <- sonde %>% drop_na(datetime)
#examine years in raw data
unique(sonde$year)
years = c(2010:2012,2014) #select years for which to generate model input files
sonde <- sonde %>% filter(year %in% years) #trim dataset to years
#process lake files and drop data that is bad or outside of summer stratified
#time period
if(lake == "sparkling"){
data <- sonde %>%
mutate(datetime = ymd_hms(datetime,tz=tz)) %>%
filter((year != 2010 & yday >= 152 & yday <= 245) |
(year == 2010 & ((yday >=152 & yday <= 189) | (yday >=202 & yday <= 245)))) %>%
drop_na()
}
if(lake == "trout") {
data <- sonde %>% select(-par_sp) %>%
mutate(datetime = ymd_hms(datetime,tz=tz)) %>%
# filter(yday >=152 & yday <= 245) %>%
filter((year == 2007 & yday >=166 & yday <= 245) |
(year == 2008 & yday >=170 & yday <=245) |
(year == 2009 & yday >=152 & yday <=245) |
(year == 2010 & ((yday >=152 & yday <= 195) | (yday >=205 & yday <= 245))) |
(year == 2012 & yday >=152 & yday <= 245)) %>%
# (year == 2009 & ((yday >=152 & yday <= 180) | (yday >=190 & yday <= 245))) |
# (year == 2010 & ((yday >=152 & yday <= 197) | (yday >=205 & yday <= 245))) |
# (year == 2012 & yday >=152 & yday <= 245)) %>%
drop_na()
}
if(lake == "castle"){
data <- sonde %>%
mutate(datetime = ymd_hms(datetime,tz=tz)) %>%
filter((year == 2014 & yday >= 152 & yday <=245) |
(year==2015 & yday >=172 & yday <=245) |
(year == 2016 & yday >= 188 & yday <=245) |
(year == 2017 & yday >=212 & yday <= 245)) %>%
drop_na()
}
if(lake == "acton") {
data <- sonde %>%
mutate(datetime = ymd_hms(datetime,tz=tz)) %>%
filter(yday >=152 & yday <= 245) %>%
drop_na()
}
data <- data %>%
group_by(year,yday) %>%
mutate(do = ifelse(z<0.5,NA,do)) %>% #exclude observations below DO sensor
#this needs to change for Castle lake which was 3m
mutate(obs = sum(!is.na(do))) %>% #identify and filter records that have < 23 hrs of data
ungroup()
freq <- nrlmetab::calc.freq(data$datetime) # determine data frequency obs/day
data <- data %>% filter(obs>=(freq-(freq/24*2))) %>% #allow for 2 hours
mutate(k600 = k.vachon.base(wnd = wspeed,lake.area = lake.area)) %>% #estimate K in m/day
mutate(kgas = k600.2.kGAS.base(k600 = k600,temperature = wtemp,gas = "O2")) %>% #m/d
mutate(k = (kgas/freq)/z) %>% #convert gas to T^-1
select(-kgas,-k600,-obs)
#setting exchange to 0 in castle if sensor was shallower than z
#these data weren't used so not relavent.
if(lake == "castle") {
data <- data %>%
mutate(k = ifelse(z<3,0,k))
}
#
#==========
#========== Prepare for data analysis
#==========
# prepare data (derived from Phillips 2020)
sonde_prep = data %>%
arrange(year, yday, hour) %>%
# for each year, create identifier for uninterrupted stretches of observations
group_by(year) %>%
mutate(i = ifelse(is.na(do)==T, 1, 0),
j = c(1,abs(diff(i)))) %>%
filter(is.na(do)==F) %>%
mutate(series = cumsum(j)) %>%
ungroup() %>%
# create unique index for each series
# remove series with fewer than 24 observations
mutate(unique_series = year + series/length(unique(series))) %>%
group_by(unique_series) %>%
mutate(series_length = length(unique_series)) %>%
ungroup() %>%
# recreate series index and make unique index for days
# create index for observations (for joining later)
# replace 0 par_int with smallest non-zero value
mutate(unique_series = as.factor(unique_series) %>% as.numeric(),
unique_day = paste(year, yday) %>% as.factor() %>% as.numeric(),
index = 1:length(do),
par_int = ifelse(par_int==0,0.00001, par_int)) %>%
select(-i, -j)
# return missing observations for check
sonde_check = data %>%
expand(year,yday,hour) %>%
full_join(sonde_prep) %>%
arrange(year,yday)
ggplot(sonde_check,aes(x=datetime,y=do)) + geom_point(size=0.2) + geom_line() + facet_wrap(vars(year),scales="free_x")
# export prepared data
if(length(years) == 1) {
sonde_check %>%
write_csv(paste("analyses/int_par/model_fit/input/sonde_prep_",lake,"_",years,".csv",sep =""))
} else {
sonde_check %>%
write_csv(paste("data/model_input/sonde_prep_",lake,"_",min(years),"_",max(years),".csv",sep =""))
}
#==========
#========== Package data
#==========
# define variables in evnironment
o2_freq = freq;
o2_obs = 1000*sonde_prep$do # convert to mg m^-3
o2_eq = 1000*sonde_prep$do_eq # convert to mg m^-3
light = sonde_prep$par_int
temp = sonde_prep$wtemp
wspeed = sonde_prep$wspeed
# sch_conv = sonde_prep$sch_conv
map_days = sonde_prep$unique_day
k = sonde_prep$k
if(length(years) == 1) {
days_per_year = array(c({sonde_prep %>%
group_by(year) %>%
summarize(value = length(unique(unique_day)))}$value), dim = 1) #,dim = 1
} else {
days_per_year = array(c({sonde_prep %>%
group_by(year) %>%
summarize(value = length(unique(unique_day)))}$value)) #,dim = 1
}
obs_per_series = c({sonde_prep %>%
group_by(unique_series) %>%
summarize(value = length(unique_series))}$value)
obs_per_day = c({sonde_prep %>%
group_by(unique_day) %>%
summarize(value = length(unique_day))}$value)
z = sonde_prep$z
n_obs = length(o2_obs)
n_series = length(obs_per_series)
n_days = sum(days_per_year)
n_years = length(days_per_year)
# export as .R
if(length(years)>1) {
stan_rdump(c("o2_freq","o2_obs","o2_eq","light","temp","wspeed","map_days","obs_per_series","days_per_year",
"obs_per_day", "z","k","n_obs","n_series","n_days","n_years"),
file=paste("model/input/",lake,"_",min(years),"_",max(years),"_sonde_list.R",sep=""))
} else {
stan_rdump(c("o2_freq","o2_obs","o2_eq","light","temp","wspeed","map_days","obs_per_series","days_per_year",
"obs_per_day", "z","k","n_obs","n_series","n_days","n_years"),
file=paste("model/input/",lake,"_",years,"_sonde_list.R",sep=""))
}
|
b8fc2ae320fa21ff771776139f801db2c0485a4e | 33b5f6126026db66c5fd60f505a1e6797a47694c | /rcourse/class2/delete_me.R | 12e1d04c88a766dea808d9a5af5ee49ed6e89121 | [] | no_license | databrew/malawi | afb3fecb9a26ce24a9b91ec8ec2f960ec1592043 | 9888784f578d61e9e2d5e5396d42efd178c174cb | refs/heads/master | 2020-06-09T17:16:41.896827 | 2019-07-05T15:20:51 | 2019-07-05T15:20:51 | 193,475,551 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,475 | r | delete_me.R | station = "FQMA"; start_year = 2014; end_year = 2016;
save = TRUE; load_saved = TRUE
file_name <- paste0("weather_", station, "_", start_year,
"_", end_year, ".RData")
if (load_saved & file_name %in% dir()) {
load(file_name)
}
else {
station <- toupper(gsub(" ", "%20", station))
start_date <- as.Date(paste0(start_year, "-01-01"))
end_date <- as.Date(paste0(end_year, "-12-31"))
if (end_date > Sys.Date()) {
end_date <- Sys.Date() - 1
}
start_day <- as.numeric(format(start_date, "%d"))
start_month <- as.numeric(format(start_date, "%m"))
start_year <- as.numeric(format(start_date, "%Y"))
end_day <- as.numeric(format(end_date, "%d"))
end_month <- as.numeric(format(end_date, "%m"))
end_year <- as.numeric(format(end_date, "%Y"))
years <- start_year:end_year
results_list <- list()
for (i in 1:length(years)) {
try({
this_year <- years[i]
this_start_month <- 1
this_start_day <- 1
if (this_year == end_year) {
this_end_month <- as.numeric(format(end_date,
"%m"))
this_end_day <- as.numeric(format(end_date,
"%m"))
} else {
this_end_month <- 12
this_end_day <- 31
}
link <- paste0("http://www.wunderground.com/history/airport/",
station, "/", this_year, "/", this_start_month,
"/", this_start_day, "/CustomHistory.html?dayend=",
this_end_day, "&monthend=", this_end_month,
"&yearend=", this_year, "&req_city=NA&req_state=NA&req_statename=NA&format=1")
df <- suppressWarnings(fread(link))
names_df <- names(df)
df <- data.frame(df)
names(df) <- names_df
df <- df[, 1:21]
names(df)[1] <- "date"
df$date <- as.Date(df$date, format = "%Y-%m-%d")
names(df) <- tolower(gsub(" |[/]", "_", names(df)))
df <- df[, !grepl("sea_level|visibility|wind|gust|dew",
names(df))]
names(df) <- c("date", "temp_max", "temp_mean",
"temp_min", "humidity_max", "humidity_mean",
"humidity_min", "precipitation", "cloud_cover")
df$location <- toupper(as.character(station))
message(paste0("Data retrieved for ", this_year))
results_list[[i]] <- df
})
}
x <- do.call("rbind", results_list)
}
if (save) {
save(x, file = file_name)
}
|
28c4f9331ddb14f5d62c3b5d861ff184fd6ddacf | 3fa75cb3c140f20b30b10bd355d93a11cdbadf44 | /Testfxns.R | 63e2a3ae11bf20b60e8afd60a25b839ff87b97df | [] | no_license | Akansha-Kapoor/R-Code-Repo | 3d1c9bbae03471c9fc7df66c9bf27377f85c85c3 | 30fc1b9ff46a3c5bb65ab586036dc34fe8c15bc7 | refs/heads/master | 2020-04-23T15:30:56.879099 | 2019-03-13T11:07:35 | 2019-03-13T11:07:35 | 171,268,044 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 335 | r | Testfxns.R | readinteger <- function()
{
n <- readline(prompt="Enter an integer: ")
if(!grepl("^[0-9]+$",n))
{
return(readinteger())
}
return(as.integer(n))
}
#let me try a random number generator
rnum <- function()
{
runif(1)*10
}
#try the one from course
test1 <- function()
{
x <- runif(100)
mean(x)
}
|
9a1633fc6a0e4c945cd14c2997793070cf6a29f5 | 60bf5a62b4d91b589d310d0d5268fac471530991 | /src/02_generate_train_test_data.R | 2569dd3f9974f47f623d3e8f159c90b5bd9edddb | [
"MIT"
] | permissive | 93degree/AHMoSe-simulation-study | 576494dfa18f0109bd0bbcb63e25852955c525cb | 7356134bacb7d9007a322dc885ca05c287783189 | refs/heads/main | 2023-04-13T08:32:14.690813 | 2021-04-16T12:47:48 | 2021-04-16T12:52:09 | 358,598,018 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,052 | r | 02_generate_train_test_data.R | ## Load libraries ----
library(tidyverse)
## Define constants ----
recode_evaluation <- c(
`very poor` = 1,
`poor` = 2,
`average` = 3,
`good` = 4,
`excellent` = 5
)
# Average temperature (in degree Celsius) for the growing season (April 1st to
# October 31st) from Tagarakis et al. (2014)
growing_average_temp <- c(
`2010` = 21.4,
`2011` = 20.5,
`2012` = 22.2
)
## Load & preprocess data ----
grape_quality_data <-
read_csv(here::here("data", "grape_quality_data.csv")) %>%
# Recode Expert evaluation
mutate(`Expert evaluation` = recode(
`Expert evaluation`,
!!!recode_evaluation
)) %>%
# Drop columns that correspond to the fuzzy model information
select(-c(Output, `Fuzzy evaluation`, `Agreement of evaluation (%)`)) %>%
rename(GTQ = `Expert evaluation`) %>%
# Generate Row and Column from Cell
separate("Cell", c("Row", "Col")) %>%
# Add average temperature for the growing season
mutate(Temp = recode(Year, !!!growing_average_temp))
## Scenario A ----
# Train data 2010-2011
# Test data 2012
# Only original features: Anth, BW, TA, TSS
path_A <- here::here("data", "scenario-A", "datasets")
if (!dir.exists(path_A)) {
dir.create(path_A, recursive = TRUE)
}
test_data <- grape_quality_data %>%
filter(Year == 2012) %>%
select(-c(Year, Row, Col, Temp))
train_data <- grape_quality_data %>%
filter(Year != 2012) %>%
select(-c(Year, Row, Col, Temp))
write_csv(test_data, file.path(path_A, "test_data.csv"))
write_csv(train_data, file.path(path_A, "train_data.csv"))
## Scenario B ----
# Train data 2010-2011
# Test data 2012
# Original features: Anth, BW, TA, TSS + Row, Col and Temp
path_B <- here::here("data", "scenario-B", "datasets")
if (!dir.exists(path_B)) {
dir.create(path_B, recursive = TRUE)
}
test_data <- grape_quality_data %>%
filter(Year == 2012) %>%
select(-Year)
train_data <- grape_quality_data %>%
filter(Year != 2012) %>%
select(-Year)
write_csv(test_data, file.path(path_B, "test_data.csv"))
write_csv(train_data, file.path(path_B, "train_data.csv"))
|
cbe41c7b6fb581d0d1a161b5ab250012a56b7923 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/dlib/R/pkg.R | 009ee94d79c64f4e9a786b706a1b4bdd7e341cf5 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,173 | r | pkg.R | #' @title Allow Access to the Dlib C++ Library
#' @description 'Dlib' <http://dlib.net> is a 'C++' toolkit containing machine learning algorithms and computer vision tools.
#' It is used in a wide range of domains including robotics, embedded devices, mobile phones, and large high performance computing environments. This package allows R users to use 'dlib' through 'Rcpp'.
#' @name dlib-package
#' @aliases dlib-package
#' @docType package
#' @importFrom Rcpp evalCpp
#' @seealso \code{\link{inlineCxxPlugin}}
#' @useDynLib dlib
#' @examples
#' \dontrun{
#' library(Rcpp)
#' library(inline)
#' library(dlib)
#'
#' ##
#' ## Example adapted from http://dlib.net/surf_ex.cpp.html
#' ## Find the SURF (https://en.wikipedia.org/wiki/Speeded_up_robust_features)
#' ## features of an image
#'
#' ##
#' ## Step 1:
#' ## Register a function called dlib_surf to read and extract the FHOG features
#' ## have a look at the file for how it is structured
#' f <- system.file("extdata", "rcpp_surf.cpp", package = "dlib")
#' cat(readLines(f), sep = "\n")
#' sourceCpp(f)
#'
#' ##
#' ## Step 2:
#' ## Convert image from jpeg to BMP3 format as ine dlib_surf function,
#' ## we assume the file is in BMP3 format
#' library(magick)
#' f <- system.file("extdata", "cruise_boat.jpeg", package = "dlib")
#' img <- image_read(path = f)
#' img
#' f_bmp <- tempfile(fileext = ".bmp")
#' image_write(img, path = f_bmp, format = "BMP3")
#'
#' ##
#' ## Step 3:
#' ## Apply the function Rcpp function dlib_surf on the image
#' dlib_surf(f_bmp)
#' }
NULL
#' @title Rcpp plugin for dlib
#' @description Rcpp plugin for dlib
#' @return a list
#' @examples
#' library(Rcpp)
#' library(inline)
#' library(dlib)
#' getPlugin("dlib")
inlineCxxPlugin <- function() {
pl <- list()
pl$env = list(PKG_LIBS = "")
pl$includes = "\n#include <Rcpp.h>\n// [[Rcpp::depends(dlib)]]\n// [[Rcpp::plugins(cpp11)]]\n\n\n#ifndef BEGIN_RCPP\n#define BEGIN_RCPP\n#endif\n\n#ifndef END_RCPP\n#define END_RCPP\n#endif\n\nusing namespace Rcpp;\n"
pl$LinkingTo = c("Rcpp", "dlib")
pl$Depends = c("Rcpp")
pl
}
|
d2d75a408143dcb53e9060ab0f04fc3ac994d63c | 12a4b117bf9cbaf9de0a3e606811af4188486ad3 | /man/retrieve.overlap.Rd | f3fef6ce9b732aae7c342dfe4d7f52207bcfc088 | [
"BSD-2-Clause"
] | permissive | emreg00/guildifyR | 08a5bb28578eb22e7a91c6ee62276cc3a8ee694f | e02f32f47ddf0f20597ff49614225551dcdaacbc | refs/heads/master | 2020-03-23T19:44:51.481523 | 2019-07-17T08:56:08 | 2019-07-17T08:56:08 | 142,000,200 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,401 | rd | retrieve.overlap.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/retrieve.overlap.R
\name{retrieve.overlap}
\alias{retrieve.overlap}
\title{Retrieve overlap between two results using two job ids}
\usage{
retrieve.overlap(job.id1, job.id2, top.validated = T, fetch.files = F,
output.dir = NULL)
}
\arguments{
\item{job.id1}{Job id 1}
\item{job.id2}{Job id 2}
\item{top.validated}{Flag to get overlap between either top-ranking functionally validated proteins (default) or top top-ranking 1\%}
\item{fetch.files}{Flag to fetch result files (for top-ranking 1\%) from server and save them locally in output.dir}
\item{output.dir}{Directory to save the ranking, function, subnetwork and drug info files fetched from the server}
}
\value{
result List containing scores of common top-ranking proteins,
common functions enriched among top-ranking proteins,
drugs targeting common top-ranking proteins
(Note that the number of top-ranking proteins and common functions are limited to 500)
}
\description{
Retrieve overlap between two results using two job ids
}
\examples{
result = retrieve.overlap(job.id1, job.id2)
getSlots(class(result))
#Scores
head(gScores(result))
#Common functions between top ranking genes of two results
head(gFunctions(result))
#Functions of top ranking common genes of two results
head(gFunctions2(result))
#Drugs
head(gDrugs(result))
}
|
a328485dfe1fc6cbce7196e4afa2c0b2e423ec93 | 4dcd968ca1104a67e366179474240a2c5c9b9f0e | /R/tp_NN_keras.R | fdcc09c4e8174e307e9590f4ffc8df3f658c276c | [] | no_license | ThomasDavenel/TP_ACI | 27edb80623c4d58a9ac219d640c0f0921a2ab824 | d3efd5007c2d6f73074e80465d447f806bb8e7e2 | refs/heads/main | 2023-03-18T17:25:27.572962 | 2021-03-07T15:34:12 | 2021-03-07T15:34:12 | 337,032,440 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,630 | r | tp_NN_keras.R | install.packages("tensorflow")
install.packages("keras")
library(tensorflow)
library(keras)
source("./fonctions_utiles.R")
################################################################################
# Exo 1 : Effet de la fonction d'activation
# et des itérations de la descente de gradient
################################################################################
dataset = read.table("./data/exo1_keras.txt", header = T)
plot(dataset[which(dataset$y==0),1:2], col="red", xlim = c(-3,4), ylim = c(-5,9))
points(dataset[which(dataset$y==1),1:2], col="blue")
# Préparation des données pour utiliser des réseaux de neurones
# On met les individus et leur description dans une matrice,
# et les classes dans un vecteur à part
train_x = data.matrix(dataset[,1:2])
train_y = dataset$y
# Définition d'un réseau de neurone avec 1 couche cachée de 10 neurones
model1 <- keras_model_sequential()
model1 %>%
layer_dense(units =10, input_shape =2) %>% # pour la première couche, il faut indiquer le nombre d'input
# dans input_shape (dépend de la dimension des données d'entrée), et le nombre de neurones de la couche cachée dans units.
# On choisit d'abord une activation linéaire sur cette couche (par défaut, en l'absence de specifications)
layer_dense(units = 1, activation = "sigmoid") # couche de sortie : 1 neurone dont la valeur de sortie représente la proba
# d'être de la classe 1 (activation sigmoid, toujours)
# On compile le modèle: toujours la même commande, sauf lorsqu'il y a plus de 2 classes (cf. exo 4)
model1 %>% compile(
loss = 'binary_crossentropy',
optimizer = 'adam',
metrics = c('accuracy')
)
# On lance l'apprentissage avec la commande fit. Paramètres :
# - la matrice contenant les descriptions des individus
# - un vecteur contenant les classes des individus
# - epochs représente le nombre de fois où l'algorithme d'apprentissage voit tous les exemples de l'ensemble d'apprentissage
# - batch représente le nb d'individus dans le batch d'apprentissage (lié à la descente de gradient, on ne modifie pas pour l'instant)
model1 %>% fit(train_x, train_y, epochs = 100, batch.size = 10)
###### Question 1: pour 100 epochs et un mini-batch de taille 10, quel est le nombre d'itération de la descente de gradient ?
# (i.e. le nombre de fois où les poids ont été modifiés ?)
# Vous devez voir apparaître deux courbes qui représentent en fonction des "epochs" :
# 1) l'évolution du "loss" (fonction que minimise le réseau de neurone, ici la binary cross-entropy) sur l'ensemble d'apprentissage
# 2) l'évolution du taux de bonne classification (accuracy) sur l'ensemble d'apprentissage
###### Question 2:
# Comment se comporte le loss ? et l'accuracy ? Quelles sont leurs valeurs finales ?
# Affichage de la frontière (même fonctionnement que les autres fonctions similaires)
dessiner_frontiere_NN(train_x, train_y, model1, -4,4,-8,8, col = c("red", "blue"))
###### Question 3: Quelle est la forme de la frontière ? Est-elle adaptée ici ?
# On ajoute la commande activation = "relu" après input_shape = 2 dans la définition de la première couche.
###### Question 4: qu'est-ce que cela signifie ? quelle est la conséquence attendue ?
model2 <- keras_model_sequential()
model2 %>%
layer_dense(units = 10, input_shape =2, activation = "relu") %>%
layer_dense(units = 1, activation = "sigmoid") # couche de sortie : 1 valeur qui représente la proba
model2 %>% compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = c('accuracy'))
model2 %>% fit(train_x, train_y, epochs = 100, batch.size = 10)
dessiner_frontiere_NN(train_x, train_y, model2, -4,4,-8,8, col = c("red", "blue"))
###### Question 5: La forme de la frontière a t'elle changée ? Pensez-vous qu'elle puisse être améliorée ? Comment ?
# Les valeurs finales de loss et d'accuracy ont elles changées ?
### On peut poursuivre l'apprentissage du modèle en relancant la commande fit (sans réinitialiser le modèle). La descente de gradient
# recommencera là oû elle s'était arretée auparavant.
# L'option view_metrics = F permet de désactiver l'affichage en temps réel de l'évolution des métriques,
# afin d'accélerer l'apprentissage
model2 %>% fit(train_x, train_y, epochs = 100, batch.size = 10, view_metrics = F)
# On refait 100 itérations en partant du modèle déjà appris auparavant.
###### Question 6: L'accuracy s'est-elle améliorée avec ces 100 nouvelles itérations ?
# Regardez la frontière
dessiner_frontiere_NN(train_x, train_y, model2, -4,4,-8,8, col = c("red", "blue"))
### Ajouter encore des itérations, jusqu'à atteindre une accuracy de 1 (vous devriez avoir besoin d'environ
# 300 ou 400 itérations supplémentaires, mais allez y 100 par 100)
model2 %>% fit(train_x, train_y, epochs = 400, batch.size = 10, view_metrics = F)
###### Question 7: Affichez la frontière. Vous convient-elle ?
dessiner_frontiere_NN(train_x, train_y, model2, -4,4,-8,8, col = c("red", "blue"))
###############################################################
# Exo 2 : Structure à plusieurs couches cachées
##############################################################
dataset = read.table("./data/exo2_keras.txt", header = T)
plot(dataset[which(dataset$y==0),1:2], col="red", xlim = c(0,20), ylim = c(0,21))
points(dataset[which(dataset$y==1),1:2], col="blue")
# Préparation des données pour utiliser des réseaux de neurones.
# On met les individus et leur description dans une matrice,
# et les classes dans un vecteur à part
train_x = data.matrix(dataset[,1:2])
train_y = dataset$y
#### On va maintenant utiliser un réseau comprenant 2 couches cachées,
# dont 1 couche avec une activation relu et une autre avec une activation sigmoid:
model <- keras_model_sequential()
model %>%
layer_dense(units = 20, input_shape =2, activation = "sigmoid") %>% # première couche cachée : 20 neurones, sigmoid
layer_dense(units = 20, activation = "relu")%>% # deuxième couche cachée : 20 neurones, relu
layer_dense(units = 1, activation = "sigmoid") # couche de sortie : 1 neurone car 2 classes, sigmoid
model %>% compile(
loss = 'binary_crossentropy',
optimizer = 'adam',
metrics = c('accuracy')
)
model %>% fit(train_x, train_y, epochs = 50, batch.size = 10, view_metrics = F)
###### Question 1: Quelle est l'accuracy après 50 epochs ?
# Affichez la frontière :
dessiner_frontiere_NN(train_x, train_y, model, 0,20,0,21, col = c("red", "blue"))
###### Question 2: Combien de points sont mal classés ?
### Continuez l'apprentissage en ajoutant des itérations jusqu'à avoir une accuracy d'au moins 0.9868
model %>% fit(train_x, train_y, epochs = 100, batch.size = 10, view_metrics = F)
# Puis affichez la frontière.
###### Question 3: Quel phénomène voyez-vous apparaître ?
dessiner_frontiere_NN(train_x, train_y, model, 0,20,0,21, col = c("red", "blue"))
############################################################
# Exo 3 : Apprentissage d'un réseau de neurone en pratique
############################################################
# Pour éviter le sur-apprentissage dû à un trop grand nombre d'itérations, il faut passer par une séparation
# en ensembles d'apprentissage/validation et test. L'ensemble de validation sert ici à estimer l'erreur réelle
# au cours des itérations de la descente de gradient afin d'arrêter l'apprentissage avant le sur-apprentissage.
# La librairie keras effectue elle-même la séparation de l'ensemble des données utilisées en apprentissage
# en ensemble d'apprentissage et de validation. Cet exercice illustre ce processus.
ex3=read.table("./data/exo3_keras.txt", header = T)
head(ex3)
table(ex3$Classe)
plot(ex3[which(ex3$Classe==0),1:2], col="red", xlim = c(-1.2,1.2), ylim = c(-1.2,1.2))
points(ex3[which(ex3$Classe==1),1:2], col="blue")
# On va séparer les données en 2 ensembles seulement : apprentissage qui va contenir 80% des données et test 20%
# C'est keras qui va s'occuper de créer un ensemble de validation à l'intérieur de celui d'apprentissage quand
# il fera l'apprentissage (fit)
nall = nrow(ex3) #total number of rows in data
ntrain = floor(0.80 * nall) # number of rows for train: 80%
ntest = floor(0.20* nall) # number of rows for test: 20%
index = sample(nall) # permutation aléatoire des nombres 1, 2, 3 , ... nall
train_x = ex3[index[1:ntrain],1:2] # ensemble d'apprentisssage
train_y = ex3[index[1:ntrain],3] # labels d'apprentissage
test_x = ex3[index[(ntrain+1):nall],1:2] # ensemble de test
test_y = ex3[index[(ntrain+1):nall],3] # labels de test
train_x = matrix(unlist(train_x), ncol = 2)
test_x = matrix(unlist(test_x), ncol = 2)
# on met en place un réseau à 3 couches cachées:
model <- keras_model_sequential()
model %>%
layer_dense(units = 80, input_shape =2, activation = 'relu') %>%
layer_dense(units = 40, activation = 'relu') %>%
layer_dense(units = 30, activation = 'relu') %>%
layer_dense(units = 1,activation = 'sigmoid')
model %>% compile(
loss = 'binary_crossentropy',
optimizer = 'adam',
metrics = c('accuracy')
)
###### Question 1: Représentez graphiquement ce réseau.
# En utilisant la commande, summary(model), déterminez le nombre de paramètres qui doivent être appris pour ce modèle.
print(summary(model))
# Dans la commande fit, il y a le paramètre 'validation_split' qui permet de créer un ensemble de validation
# à partir des exemples réservés pour l'apprentissage.
# Il suffit d'indiquer la proportion d'éléments que l'on souhaite mettre dans l'ensemble de validation.
# On choisit 0.2 (20%) ici.
###### Question 2: Quelle est la taille de l'ensemble d'apprentissage ? de l'ensemble de validation ?
# Apprentissage du modèle
model %>% fit(train_x, train_y, epochs = 500, batch.size = 10, validation_split = 0.2)
tmp =nrow(train_x)
print(tmp)
print(tmp*0.2)
# Il y a maintenant deux courbes de loss : celle d'apprentissage et celle de validation. De même pour l'accuracy
###### Question 3: Qu'observe t'on sur la courbe de loss validation ? Interprétez ce résultat.
# On peut demander à la fonction 'fit' de keras de s'arrêter lorsque l'on estime que l'on a atteint suffisamment
# d'itérations (avant d'arriver dans la zone de surapprentissage)
# Pour cela, on lui demande de s'arrêter lorsque le loss en validation a cessé de diminuer depuis un certain temps.
# Réinitialiser d'abord le modèle:
model <- keras_model_sequential()
model %>%
layer_dense(units = 80, input_shape =2, activation = 'relu') %>%
layer_dense(units = 40, activation = 'relu') %>%
layer_dense(units = 30, activation = 'relu') %>%
layer_dense(units = 1,activation = 'sigmoid')
model %>% compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = c('accuracy'))
# Puis on va refaire le fit en demandant de s'arreter (early_stopping)
# lorsque le loss en validation (val_los) n'a plus diminué lors des 100 dernières epochs (patience = 100).
# Le modèle retenu est celui qui a obtenu le dernier minimum de val_loss avant l'arrêt.
model %>% fit(train_x, train_y, epochs = 500, batch.size = 10, validation_split = 0.2,callbacks = list(callback_early_stopping("val_loss", patience = 100)))
###### Question 4: combien d'epochs ont été réalisées ?
#### Prédiction sur l'ensemble de test :
# Affichage de la frontiere et des donnees de test (en noir)
tmp_x = test_x[1:5,1:2]
plot(tmp_x[,1:2], col="black", xlim = c(-1.2,1.2), ylim = c(-1.2,1.2))
dessiner_frontiere_NN(train_x, train_y, model, -1.2,1.2,-1.2,1.2, col = c("red", "blue"))
# La commande predict permet d'obtenir la sortie du réseau de neurones, c'est à dire la probabilité d'être de la classe 1 (car on
# est dans le cas d'une classification binaire 0 ou 1):
tmp=predict(model, test_x)
nb = nrow(tmp)
tmp
var=0
for(i in 1:nb){
if(floor(tmp[i]*2)==test_y[i]) var=var+1
}
var/24
# Vous obtenez les probabilités d etre de la classe 1 pour les 24 individus de test
# Pour transformer ces probas en classe, vous pouvez utiliser le ifelse() comme vu dans le TP sur la regression logistique.
# Pour un jeu de données avec 3 classes au moins ce sera un peu différent (cf. exo 4)
###### Question 5: Quelles sont les valeurs des attributs et les classes predites des 5 premiers exemples de l'ensemble de test ?
# Essayez de localiser des 5 premiers exemples de l'ensemble de test sur le graphe représentant la frontiere de décision
# et les points de l'ensemble de test.
# La classe predite et la probabilité associée vous paraissent-elles coherentes par rapport à ce que vous observez visuellement ?
plot(prediction_test[which(ex3$Classe==0),1:2], col="red", xlim = c(-1.2,1.2), ylim = c(-1.2,1.2))
points(prediction_test[which(ex3$Classe==1),1:2], col="blue")
# Calcul de la performance de ce modèle (nb d'erreurs) sur le jeu de test.
# La fonction evaluate permet de calculer l accuracy (taux de bonnes classifications) du modele sur un jeu de données:
model%>%evaluate(test_x,test_y) # vous devez juste indiquer le jeu de données à évaluer ainsi que les
# labels associés. Vous obtiendrez le loss et l'accuracy.
###### Question 6: Quelle est l'accuracy de ce modèle sur le jeu de test?
# C'est cette performance (accuracy) qui est une estimation de l'erreur de généralisation de ce modèle.
# En pratique, vous devrez toujours procéder comme suit:
# - séparation apprentissage/test
# - choix d'une structure (nb couche cachées, nb neurones, activations...)
# - apprentissage du modèle en s'arretant avant le surapprentissage (utilisation d'un ensemble de validation)
# - évaluation du modèle sur le test
# Ensuite, vous pouvez changer de structure, et recommencer. Vous pourrez sélectionner ainsi la
# structure de réseau la plus adpatée à vos données.
##########################################################################
# Exo 4 : Sélection du meilleur réseau sur un jeu de données multiclasse
#########################################################################
dataset = read.table("./data/segment.dat")
# 2310 individus, 19 attributs (colonnes 1 à 19), la classe est dans la colonne 20 (cf description des données dans segment.txt)
# Lorsqu'il y a plus de 2 classes, quelques commandes sont différentes.
# Séparation Apprentissage/Test, classique
nall = nrow(dataset) #total number of rows in data
nall
ntrain = floor(0.80 * nall) # number of rows for train: 80%
ntrain
ntest = floor(0.20* nall) # number of rows for test: 20%
ntest
index = sample(nall) # permutation aléatoire des nombres 1, 2, 3 , ... nall
index
train_x = dataset[index[1:ntrain],1:19] # ensemble d'apprentisssage
train_labels = dataset[index[1:ntrain],20] # labels d'apprentissage
test_x = dataset[index[(ntrain+1):nall],1:19] # ensemble de test
test_labels = dataset[index[(ntrain+1):nall],20] # labels de test
train_x = matrix(unlist(train_x), ncol = 19)
test_x = matrix(unlist(test_x), ncol = 19)
table(train_labels)
# Vous observez que la classe est un entier entre 1 et 7.
# Pour keras, il faut que les labels des classes commencent à zéro
# On va donc soustraire 1 à train_labels et test_labels, et creer train_y et test_y:
train_y = train_labels-1
test_y = test_labels-1
# Puis il faut transformer ces vecteurs pour qu'ils soient au bon format attendu par keras :
# pour chaque individu, il faut un vecteur de taille 7 (nb de classes) où tous les
# composants sont à 0 sauf celui qui correspond à la classe de l'individu.
# Par exemple, pour un individu de classe 3, ce vecteur doit etre 0 0 0 1 0 0 0 (un 1 pour la classe 3, et 0 pour les autres)
# Ceci est fait par la commande :
train_y = to_categorical(train_y)
test_y = to_categorical(test_y)
### Créer un réseau de neurones pour ces données, puis lancer le fit
# Attention :
# 1) les individus sont représentés par 19 attributs donc il doit y avoir 19 neurones sur la couche d'entrée
# 2) Il y a 7 classes possibles donc il doit y avoir 7 neurones sur la couche de sortie
# 3) Pour obtenir des probabilités en sortie, la fonction d'activation de la couche de sortie doit etre 'softmax' (une généralisation de la fonction sigmoide a plusieurs classes)
# 4) Quand il y a plus de 2 classes, il faut mettre loss = 'categorical_crossentropy' dans la commande compile
# au lieu de binary_crossentropy
# 5) N'oubliez pas de mettre l'option view_metrics = F dans la fonction 'fit' pour accélerer l'apprentissage
# 6) Pour visualiser les courbes a la fin de l'apprentissage, on peut stocker les donnees dans la variable 'history':
# history <- model %>% fit(..., view_metrics=F)
# puis afficher sous forme de courbe les informations stockees dans history :
# plot_NN_loss(history)
# plot_NN_accuracy(history)
# (fonctions definies dans fonctions_utiles.R)
model <- keras_model_sequential()
model %>%
layer_dense(units = 80, input_shape =19, activation = 'relu') %>%
layer_dense(units = 40, activation = 'relu') %>%
layer_dense(units = 30, activation = 'relu') %>%
layer_dense(units = 7,activation = 'softmax')
model %>% compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = c('accuracy'))
history <-model %>% fit(train_x, train_y, epochs = 500, batch.size = 10, validation_split = 0.2,callbacks = list(callback_early_stopping("val_loss", patience = 100)),view_metrics = F)
plot_NN_loss(history)
plot_NN_accuracy(history)
###### Question 1: Représentez graphiquement votre réseau.
###### Question 2: Combien a-t-il fallu d'epochs pour apprendre le modèle ?
### Une fois le modèle appris (dans la variable model par exemple), appelez la commande 'predict' pour obtenir la sortie du reseau pour les exemples de l'ensemble de test.
###### Question 3: quelle est la forme de la prediction pour un exemple ? Quelle est la classe associée a chaque exemple ?
predict(model, test_x)
### La fonction 'evaluate' permet de calculer l'accuracy comme tout à l'heure. Calculez l'accuracy sur l'ensemble de test.
###### Question 4: quelle est l'erreur empirique du modèle ? quelle est l'erreur reelle du modèle ?
model%>%evaluate(test_x,test_y)
model%>%evaluate(train_x,train_y)
### Essayez différentes structures (modèles) et calculer leurs erreurs de généralisation.
|
097710b2d3869b402a072f41b8726845c512acbe | a9bda9d1bf0110746739b105157f49a6af991530 | /Calculo_pobreza.R | b056d83f27b682c04d45f276d85211d71e4af315 | [] | no_license | EduTole/Pobreza | b7ced5622eeb6dc5daf61c83ce28a61c885a5053 | f91b02e40559ff4e3d8cfb0f3b8681c288892752 | refs/heads/master | 2023-03-20T20:59:04.295550 | 2021-03-14T15:45:13 | 2021-03-14T15:45:13 | 347,659,104 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 868 | r | Calculo_pobreza.R | # 01----------------------------------
# Directori de archivos
setwd("D:/Dropbox/EcoMaster/RTaller/Diapositivas/Pobreza/Pobreza")
# 02 ---------------------------------
# Se carga las librerias
library(foreign)
library(tidyverse)
# 03 ---------------------------------
#cargar la informacion
sumaria <- read.dta("D:/Dropbox/BASES/ENAHO/2019/sumaria-2019.dta")
sumaria %>% str()
#calculo de las varables
pobreza <- sumaria %>%
mutate(ponderador=factor07*mieperho,
pobre=ifelse(pobreza !="no pobre",1,0),
dpto=str_sub(ubigeo,1,2))
# 04 ----------------------------------------
# libreria de factor de expansión
library(Hmisc)
library(plyr)
Pobreza_dpto <- ddply(pobreza, ~dpto,summarise,
pobreza=wtd.mean(pobre,ponderador,na.rm = TRUE))
#redondear valores
Pobreza_dpto <- Pobreza_dpto %>%
mutate(pobreza=round(pobreza,2))
|
88bd1a7d42ef5ff704b0ad3ebd5669b951c0975f | 8aadf3bf0042c14bbff41d4985eeb42876943485 | /benfordsLaw/R/plotFD.R | 35e518a49c538d3700575d236ec10f4afb96d4e2 | [
"MIT"
] | permissive | nlinc1905/benfordsLaw | 5c0a0b88912ec0ccdb1e603f4923c5bb490ff21f | 43d101a1674d208419bc9d240c6ed471f8859e88 | refs/heads/master | 2021-01-11T12:20:52.338143 | 2018-05-22T23:11:42 | 2018-05-22T23:11:42 | 76,693,359 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,042 | r | plotFD.R | #' Plot of Benford's First Digit Test
#'
#' This function plots Benford's first digit test results as expected vs observed
#' digit frequencies.
#' @param firstDigitdf The results of the \code{benfordFirstDigit} function in dataframe format.
#' @return Returns a plot
#' @export
#' @examples
#' plotFD(benfordFirstDigit(iris$Sepal.Width))
plotFD <- function(firstDigitdf) {
require(ggplot2)
fdPlot <- ggplot(firstDigitdf) +
geom_bar(aes(x=First_Digit, y=Observed_Frequency), width=0.5, stat="identity", fill="slategray") +
geom_line(aes(x=as.numeric(First_Digit), y=Expected_Frequency, color="Expected"), size=1.5) +
scale_colour_manual("", breaks=c("Expected"), values=c("Expected"="indianred")) +
ggtitle("First Digit Observed VS Expected") +
labs(x="Digit", y="Frequency") +
scale_y_continuous(expand=c(0.02, 0)) +
theme(text=element_text(size=20), plot.title=element_text(color="#666666", face="bold", size=20),
axis.title=element_text(color="#666666", face="bold", size=16))
return(fdPlot)
} |
54ae208fa5a1ab157eaa3d15e79e9057a517078a | 065370e27d5d0dd082273f5a707f7c153628828b | /man/evaluatorAction.Rd | e4b65cb203a78b837506888f0727ce591cb1b4f1 | [] | no_license | takewiki/XR | 9622a1b019c28fadca5c254b4d61edd405756ef0 | 296bbcbc4c2d7d1f7761715923c52f17d047c612 | refs/heads/master | 2020-03-25T06:56:55.203485 | 2018-07-26T23:03:33 | 2018-07-26T23:03:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 978 | rd | evaluatorAction.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Interface.R
\docType{methods}
\name{evaluatorAction}
\alias{evaluatorAction}
\alias{evaluatorAction,language-method}
\alias{evaluatorAction,pathEl-method}
\title{Carry Out an Evaluator Initialization Action}
\usage{
evaluatorAction(action, ev)
\S4method{evaluatorAction}{language}(action, ev)
\S4method{evaluatorAction}{pathEl}(action, ev)
}
\arguments{
\item{action}{the action from the table. Must be an expression or some special
class, typically a path element to add to the server path.}
\item{ev}{the evaluator.}
}
\description{
This function is called from the Startup() method of an evalautor and is not
useful to be called directly. It is exported to make it visible from within
a subclass of "Interface".
}
\section{Methods (by class)}{
\itemize{
\item \code{language}: a language object, just evaluate it.
\item \code{pathEl}: a "pathEl" object to add to the server search path.
}}
|
8f9003de9c8f1dce15c5f3eb1c5e772e658aa093 | 599256b93ca49fa50aba87739b9e49ee3eba2081 | /pkg/R/checkWarnArgs.R | e6f40e3b6979f685d3a6ad960cfbaafbe04beae7 | [] | no_license | mamueller/linkeddocs | aad3a4dcd64942092f4efb2380e99f085b8d86d2 | 8c3d6ddf34a13170cc283b91deaa05733f8100bd | refs/heads/master | 2021-01-20T14:47:33.161695 | 2019-10-10T12:37:08 | 2019-10-10T12:37:08 | 90,661,291 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 521 | r | checkWarnArgs.R | #
# vim:set ff=unix expandtab ts=2 sw=2:
checkWarnArgs<-function(
name,
function_args,
documented_args
){
undocumented_args<-setdiff(function_args,documented_args)
if(length(undocumented_args)!=0){
warning(
sprintf("Undocumented arguments found in %s.\n %s\n %s\n %s",
name,
paste("function_args=: ",toString(function_args)),
paste("documented_args=: ",toString(documented_args)),
paste("undocumented_args=: ",toString(undocumented_args))
)
)
}
}
|
7c2d787e9ee6f6489ee04113dc21044fe4c6fabc | bccc0513b5c83a129b909ae3b1ffef988ec87e6b | /R/colorscales.R | ba53ded1367fa12706a471962c95ce5cf4846cca | [] | no_license | Displayr/flipChartBasics | 2c0408e36f00c46cbdae9a53caf76bc10b8c6795 | 4c5573d82f72410bcde7b387a0a7253907b24d59 | refs/heads/master | 2023-07-11T00:18:05.474354 | 2023-06-26T05:09:59 | 2023-06-26T05:09:59 | 61,075,775 | 0 | 2 | null | 2023-08-17T05:54:47 | 2016-06-13T23:12:32 | R | UTF-8 | R | false | false | 6,934 | r | colorscales.R | #' Construct color scale for a numeric value
#'
#' @param x A numeric vector specifying the colors returne
#' @param min.x A numeric value representing the smallest value on the color scale. Values in
#' \code{x} smaller than this will be treated as \code{min.x}.
#' @param max.x A numeric value representing the largest value on the color scale. Values in
#' \code{x} larger than this will be treated as \code{max.x}.
#' @param mid.x A numeric value representing the midpoint on the color scale.
#' @param min.color The color representing the smallest value.
#' @param mid.color The color representing the mid point on the color scale.
#' @param max.color The color representing the max point on the color scale.
#' @export
MapToColors <- function(x, # A vector of values or an integer indicating the number of values
min.x = if (length(x) == 1) 1 else min(x), # The value represented by min.color
max.x = max(x),
mid.x = NULL, # Specify a value for two color divergent scales
min.color = "white",
mid.color = "Displayr grey",
max.color = "Displayr blue")
{
if (length(x) == 1)
x <- 1:x
else
{
x[x < min.x] <- min.x
x[x > max.x] <- max.x
}
if (is.null(mid.x))
{
scaled.x <- (x - min.x) / (max.x - min.x)
min.color <- asColorspace(min.color)[[1]]
mid.color <- asColorspace(mid.color)[[1]]
max.color <- asColorspace(max.color)[[1]]
scaled.x <- c(0, scaled.x, 1) #Ensuing all range is scaled
cols <- diverging.colormap(scaled.x, min.color, max.color)
cols <- cols[2:(length(scaled.x) - 1), ]
colors <- rgb(cols[,1], cols[,2], cols[, 3])
if (!is.null(names(x)))
names(colors) <- names(x)
return(colors)
}
if (mid.x > min(x))
{
lower.colors <- MapToColors(x[x <= mid.x], min.x = min.x, max.x = mid.x, min.color = min.color, max.color = mid.color)
if (mid.x >= max(x)) #
return(lower.colors)
}
upper.colors <- MapToColors(x[x >= mid.x], min.x = mid.x, max.x = max.x, min.color = mid.color, max.color = max.color)
if (mid.x <= min(x))
return(upper.colors)
colors <- rep(NA, length(x))
colors[which(x <= mid.x)] <- lower.colors
colors[which(x >= mid.x)] <- upper.colors
if (!is.null(names(x)))
names(colors) <- names(x)
colors
}
# This function is based on Kenneth Moreland's code for creating Diverging Colormaps.
# Matlab code created by Andy Stein. Translated to R by Jose Gama.
# s is a vector that goes between zero and one
# rgb1,rgb2 are objects from the colorspace package
# RGB, sRGB, HLS, HSV, LAB, LUV, PolarLAB, PolarLUV, XYZ
# outColorspace is the color space for the output
#' @importFrom colorspace LAB
#' @importFrom methods as
#' @importFrom verbs Sum
diverging.colormap <- function(s, rgb1, rgb2, outColorspace = 'sRGB')
{
LabToMsh<-function(Lab)
{
L<-Lab@coords[1]
a<-Lab@coords[2]
b<-Lab@coords[3]
M <- sqrt(L*L + a*a + b*b)
s <- (M > 0.001) * acos(L/M)
h <- (s > 0.001) * atan2(b,a)
if (!is.finite(s))
s <- 0
if (!is.finite(h))
h <- 0
c(M,s,h)
}
MshToLab<-function(Msh)
{
M<-Msh[1]
s<-Msh[2]
h<-Msh[3]
L <- M*cos(s)
a <- M*sin(s)*cos(h)
b <- M*sin(s)*sin(h)
LAB(L,a,b)
}
AngleDiff<-function(a1, a2)
{
# Given two angular orientations, returns the smallest angle between the two.
v1<-matrix(c(cos(a1), sin(a1)),1,2,byrow=TRUE)
v2<-matrix(c(cos(a2), sin(a2)),1,2,byrow=TRUE)
x<-acos(Sum(v1 * v2, remove.missing = FALSE))
x
}
AdjustHue<-function(msh, unsatM)
{
# For the case when interpolating from a saturated color to an unsaturated
# color, find a hue for the unsaturated color that makes sense.
if (msh[1] >= unsatM-0.1 ) {
# The best we can do is hold hue constant.
h <- msh[3]
} else {
# This equation is designed to make the perceptual change of the interpolation to be close to constant.
hueSpin <- (msh[2]*sqrt(unsatM^2 - msh[1]^2)/(msh[1]*sin(msh[2])))
# Spin hue away from 0 except in purple hues.
if (msh[3] > -0.3*pi) h <- msh[3] + hueSpin else h <- msh[3] - hueSpin
}
h
}
diverging.map.1val<-function(s, rgb1, rgb2, outColorspace='sRGB')
{
# Interpolate a diverging color map
# s is a number between 0 and 1
msh1 <- LabToMsh(as(rgb1, "LAB"))
msh2 <- LabToMsh(as(rgb2, "LAB"))
# If the endpoints are distinct saturated colors, then place white in between them
if (msh1[2] > 0.05 & msh2[2] > 0.05 & AngleDiff(msh1[3],msh2[3]) > pi/3)
{
# Insert the white midpoint by setting one end to white and adjusting the scalar value.
Mmid <- max(88.0, msh1[1], msh2[1])
#Mmid <- max(Mmid)
if (s < 0.5)
{
msh2[1] <- Mmid; msh2[2] <- 0.0; msh2[3] <- 0.0;s <- 2.0*s
} else {
msh1[1] <- Mmid; msh1[2] <- 0.0; msh1[3] <- 0.0; s <- 2.0*s - 1.0
}
}
# If one color has no saturation, then its hue value is invalid. In this
# case, we want to set it to something logical so that the interpolation of hue makes sense.
if ((msh1[2] < 0.05) & (msh2[2] > 0.05)) {
msh1[3] <- AdjustHue(msh2, msh1[1])
} else if ((msh2[2] < 0.05) & (msh1[2] > 0.05)) {
msh2[3] <- AdjustHue(msh1, msh2[1])
}
mshTmp<-msh1
mshTmp[1] <- (1-s)*msh1[1] + s*msh2[1]
mshTmp[2] <- (1-s)*msh1[2] + s*msh2[2]
mshTmp[3]<- (1-s)*msh1[3] + s*msh2[3]
# Now convert back to the desired color space
as(MshToLab(mshTmp),outColorspace)
}
dvmap<-matrix(0,length(s),3)
for (n in 1:length(s))
dvmap[n,]<-diverging.map.1val(s[n], rgb1, rgb2, outColorspace)@coords
# Modifications by Tim Bock 7 May 2018
dvmap[dvmap < 0] <- 0
dvmap[dvmap > 1] <- 1
dvmap
}
#' @importFrom colorspace RGB
asColorspace <- function(x)
{
convertColors <- Vectorize(function(a) {
switch(a,
"Displayr red" = colorspace::RGB(250/255, 97/255, 75/255),
"Displayr blue" = colorspace::RGB(62/255, 125/255, 204/255),
"Displayr green" = colorspace::RGB(0, 200/255, 200/255),
"Displayr grey" = colorspace::RGB(.865,.865, .865),
{
cols <- col2rgb(a) / 255
RGB(cols[1], cols[2], cols[3])
})
})
convertColors(x)
}
|
e6e1e6151464d1294d696f4de10976c87996fbaa | a70f7eed26f98618562188a7c3b1e00a0c9e0662 | /plot2.R | 5b985131083a3ebe9420ec10c83825797554a024 | [] | no_license | JgLgRigel/ExData_Plotting1 | cad9169c6e89e33b4f1e4b0f7aaa70243694d191 | e8cb0b01d423b5b0ab7c1b03298de5562d158c18 | refs/heads/master | 2021-01-18T20:57:02.783624 | 2015-03-09T00:13:06 | 2015-03-09T00:13:06 | 31,861,566 | 0 | 0 | null | 2015-03-08T19:06:18 | 2015-03-08T19:06:17 | null | UTF-8 | R | false | false | 403 | r | plot2.R | library(dplyr)
dataset<-read.table("household_power_consumption.txt",header=TRUE,sep=";",na.string="?")
dataset<-filter(dataset,dataset$Date=="1/2/2007"|dataset$Date=="2/2/2007")
dataset$Date_Time<-strptime(paste(dataset$Date,dataset$Time),format="%d/%m/%Y %H:%M:%S")
png("plot2.png")
plot(dataset$Date_Time,dataset$Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab="")
dev.off()
|
43abf860439a1b87094025d3bf116ca333225bbc | 901331f01a6cd4ebdba4e25f30fd4a36da924191 | /man/mopt_config.Rd | 64ef9df4ddee6564cf0cb14e2e6b63339d7a0c53 | [] | no_license | priscillafialho/mopt | f49a26add6ef11096fc97bf6ea89d1cb2d7cc29d | 6b7fc8124a56a0239225296114ff6128cf9b4a40 | refs/heads/master | 2021-01-22T16:37:34.838400 | 2014-10-22T23:11:01 | 2014-10-22T23:11:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 183 | rd | mopt_config.Rd | \name{mopt_config}
\alias{mopt_config}
\title{create the configuration for the MCMC
chain.}
\usage{
mopt_config(p)
}
\description{
create the configuration for the MCMC chain.
}
|
f5476818a8f7218f2b90030deac622f9a3b84a7a | c6804658a1705bcace8b93575bb9296df67484ec | /SW4 Midterm QUIJANO.R | 99632a92c02f7abccadcc41ff070297551c42e12 | [] | no_license | daniellequijano/Midterm | 42b9ca70956afadbd4c4299ce43e83e87acd2dcf | 5361f80ccc468cf0ccb03c1927775bc50c366760 | refs/heads/master | 2020-03-22T05:08:23.833571 | 2018-09-04T02:15:17 | 2018-09-04T02:15:17 | 139,544,757 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 443 | r | SW4 Midterm QUIJANO.R |
library(EBImage)
library(magick)
source("http://bioconductor.org/biocLite.R")
biocLite("EBImage")
library("EBImage")
#LOCALLY
img <- readImage("sak.jpg")
dim(x)[1:2]
y <- resize(img, w =250, h=300)
display(y)
z <- imageData(y)
dim(y)
#WEB
img2 <- image_read("https://d384u2mq2suvbq.cloudfront.net/public/spree/products/1597/jumbo/Japanese-Cherry-Blossom-Fragrance-Oil.jpg?1529607178")
a <- image_resize(img2, "x250")
a
|
2f2ac8e0f9593d69f2f3d292b39ae0060afac773 | c61fc77c97074dea99e06abb0f3f446b0d9aad8b | /tests/testthat/test-HttrAdapter.R | 4726a91b73dacab1b5dd21c80b9a174c66f86e5b | [
"MIT"
] | permissive | Jenaimarr/webmockr | 5717fda34c036c1da9962ac3e155a9abf1cf7753 | 6f0ec003333572efdfb1266634146a0d33bcd9a3 | refs/heads/master | 2020-08-30T12:57:04.926993 | 2019-08-12T16:35:45 | 2019-08-12T16:35:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,918 | r | test-HttrAdapter.R | context("HttrAdapter")
skip_if_not_installed("httr")
library("httr")
aa <- HttrAdapter$new()
test_that("HttrAdapter bits are correct", {
skip_on_cran()
expect_is(HttrAdapter, "R6ClassGenerator")
expect_is(aa, "HttrAdapter")
expect_null(aa$build_httr_request) # pulled out of object, so should be NULL
expect_null(aa$build_httr_response) # pulled out of object, so should be NULL
expect_is(aa$disable, "function")
expect_is(aa$enable, "function")
expect_is(aa$handle_request, "function")
expect_is(aa$remove_httr_stubs, "function")
expect_is(aa$name, "character")
expect_equal(aa$name, "httr_adapter")
})
test_that("HttrAdapter behaves correctly", {
skip_on_cran()
expect_message(aa$enable(), "HttrAdapter enabled!")
expect_message(aa$disable(), "HttrAdapter disabled!")
})
test_that("build_httr_request/response fail well", {
skip_on_cran()
expect_error(build_httr_request(), "argument \"x\" is missing")
expect_error(build_httr_response(), "argument \"req\" is missing")
})
# library(httr)
# z <- GET("https://httpbin.org/get")
# httr_obj <- z$request
# save(httr_obj, file = "tests/testthat/httr_obj.rda")
context("HttrAdapter: date slot")
test_that("HttrAdapter date slot works", {
skip_on_cran()
skip_if_not_installed("vcr")
library("vcr")
path <- file.path(tempdir(), "foobar")
vcr::vcr_configure(dir = path)
vcr::use_cassette("test-date", GET("https://httpbin.org/get"))
# list.files(path)
# readLines(file.path(path, "test-date.yml"))
vcr::insert_cassette("test-date")
x <- httr::GET("https://httpbin.org/get")
# $date is of correct format
expect_output(print(x), "Date")
expect_is(x$date, "POSIXct")
expect_is(format(x$date, "%Y-%m-%d %H:%M"), "character")
# $headers$date is a different format
expect_is(x$headers$date, "character")
expect_error(format(x$headers$date, "%Y-%m-%d %H:%M"), "invalid 'trim'")
vcr::eject_cassette("test-date")
# cleanup
unlink(path, recursive = TRUE)
})
context("HttrAdapter: insensitive headers, webmockr flow")
test_that("HttrAdapter insensitive headers work, webmockr flow", {
skip_on_cran()
unloadNamespace("vcr")
httr_mock()
stub_registry_clear()
invisible(stub_request("get", uri = "https://httpbin.org/get") %>%
to_return(
body = list(foo = "bar"),
headers = list("Content-Type" = "application/json")
))
x <- httr::GET("https://httpbin.org/get")
expect_equal(x$headers[["content-type"]], "application/json")
expect_is(httr::content(x), "list")
expect_is(httr::content(x, "text", encoding = "UTF-8"), "character")
stub_registry_clear()
httr_mock(FALSE)
})
context("HttrAdapter: insensitive headers, vcr flow")
test_that("HttrAdapter insensitive headers work, vcr flow", {
skip_on_cran()
skip_if_not_installed("vcr")
library("vcr")
path <- file.path(tempdir(), "helloworld")
vcr::vcr_configure(dir = path)
vcr::use_cassette("test-date", GET("https://httpbin.org/get"))
vcr::insert_cassette("test-date")
x <- httr::GET("https://httpbin.org/get")
expect_equal(x$headers[["content-type"]], "application/json")
expect_is(httr::content(x), "list")
expect_is(httr::content(x, "text", encoding = "UTF-8"), "character")
vcr::eject_cassette("test-date")
# cleanup
unlink(path, recursive = TRUE)
})
context("HttrAdapter: works with real data")
test_that("HttrAdapter works", {
skip_on_cran()
skip_if_not_installed("vcr")
load("httr_obj.rda")
# load("tests/testthat//httr_obj.rda")
res <- HttrAdapter$new()
# with vcr message
library("vcr")
expect_error(
res$handle_request(httr_obj),
"There is currently no cassette in use"
)
# with webmockr message
# unload vcr
unloadNamespace("vcr")
expect_error(
res$handle_request(httr_obj),
"Real HTTP connections are disabled.\nUnregistered request:\n GET: https://httpbin.org/get"
)
invisible(stub_request("get", "https://httpbin.org/get"))
aa <- res$handle_request(httr_obj)
expect_is(res, "HttrAdapter")
expect_is(aa, "response")
expect_equal(aa$request$method, "GET")
expect_equal(aa$url, "https://httpbin.org/get")
# no response headers
expect_equal(length(aa$headers), 0)
expect_equal(length(aa$all_headers), 1)
# with headers
# clear registry
stub_registry_clear()
# stub with headers
x <- stub_request("get", "https://httpbin.org/get")
x <- to_return(x, headers = list("User-Agent" = "foo-bar"))
aa <- res$handle_request(httr_obj)
expect_is(res, "HttrAdapter")
expect_is(aa, "response")
expect_equal(aa$request$method, "GET")
expect_equal(aa$url, "https://httpbin.org/get")
# has headers and all_headers
expect_equal(length(aa$headers), 1)
expect_is(aa$headers, "list")
expect_named(aa$headers, "user-agent")
expect_equal(length(aa$all_headers), 1)
expect_is(aa$all_headers, "list")
expect_named(aa$all_headers, NULL)
expect_named(aa$all_headers[[1]], c("status", "version", "headers"))
# stub with redirect headers
my_url <- "https://doi.org/10.1007/978-3-642-40455-9_52-1"
x <- stub_request("get", my_url)
x <- to_return(x, status = 302, headers =
list(
status = 302,
location = "http://link.springer.com/10.1007/978-3-642-40455-9_52-1"
)
)
httr_obj$url <- my_url
res <- HttrAdapter$new()
aa <- res$handle_request(httr_obj)
expect_equal(aa$request$method, "GET")
expect_equal(aa$url, my_url)
expect_equal(aa$status_code, 302)
# has headers and all_headers
expect_equal(length(aa$headers), 2)
expect_is(aa$headers, "list")
expect_equal(sort(names(aa$headers)), c("location", "status"))
expect_equal(length(aa$all_headers), 1)
expect_equal(length(aa$all_headers[[1]]), 3)
expect_is(aa$all_headers, "list")
expect_is(aa$all_headers[[1]], "list")
expect_named(aa$all_headers, NULL)
expect_equal(sort(names(aa$all_headers[[1]])),
c("headers", "status", "version"))
})
|
9c7e3629a5cada5ea45f108e0ddf892007c50456 | a79fdb5989b61f031d5cbbe970a038052644814d | /data-raw/children.R | 13a9f0b21d4bdca5058a9c0e34fcd6d740ca8432 | [
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jjchern/melig | 53949300fd03fae912244b4f58203025d54138d2 | f665b66b752e522d68307b4dd656e5aa9768ef07 | refs/heads/master | 2021-01-17T14:53:39.447394 | 2019-02-12T00:04:05 | 2019-02-12T00:04:05 | 46,519,862 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,572 | r | children.R |
# Medicaid and CHIP income cutoff for children --------------------------------
library(tidyverse)
read_csv("data-raw/infants_age_0_1_medicaid.csv", skip = 2) %>%
select(-Footnotes) %>%
filter(Location != "United States") %>%
rename(state = Location) %>%
inner_join(fips::fips, by = "state") %>%
select(state, fips, usps, everything()) %>%
gather(year, cutoff, -state:-usps) %>%
separate(year, c("month", "year")) %>%
mutate(type = "Medicaid", agegrp = "0-1") %>%
mutate(cutoff = as.numeric(cutoff) * 100) %>% # filter(is.na(cutoff))
# TN 2000 and 2002 have not upper limits
mutate(cutoff = if_else(usps == "TN" & year %in% c(2000, 2002), 9999, cutoff)) %>%
select(state, fips, usps, type, agegrp, everything()) %>%
print() -> infant0_1
read_csv("data-raw/children_age_1_5_medicaid.csv", skip = 2) %>%
select(-Footnotes) %>%
filter(Location != "United States") %>%
rename(state = Location) %>%
inner_join(fips::fips, by = "state") %>%
select(state, fips, usps, everything()) %>%
gather(year, cutoff, -state:-usps) %>%
separate(year, c("month", "year")) %>%
mutate(type = "Medicaid", agegrp = "1-5") %>%
mutate(cutoff = as.numeric(cutoff) * 100) %>% # filter(is.na(cutoff))
# TN 2000 and 2002 have not upper limits
mutate(cutoff = if_else(usps == "TN" & year %in% c(2000, 2002), 9999, cutoff)) %>%
select(state, fips, usps, type, agegrp, everything()) %>%
print() -> children1_5
read_csv("data-raw/children_age_6_18_medicaid.csv", skip = 2) %>%
select(-Footnotes) %>%
filter(Location != "United States") %>%
rename(state = Location) %>%
inner_join(fips::fips, by = "state") %>%
select(state, fips, usps, everything()) %>%
tidyr::gather(year, cutoff, -state:-usps) %>%
tidyr::separate(year, c("month", "year")) %>%
mutate(type = "Medicaid", agegrp = "6-18") %>%
mutate(cutoff = as.numeric(cutoff) * 100) %>% # filter(is.na(cutoff))
# TN 2000 and 2002 have not upper limits
mutate(cutoff = if_else(usps == "TN" & year %in% c(2000, 2002), 9999, cutoff)) %>%
select(state, fips, usps, type, agegrp, everything()) %>%
print() -> children6_18
read_csv("data-raw/children_age_0_18_chip.csv", skip = 2) %>%
select(-Footnotes) %>%
filter(Location != "United States") %>%
rename(state = Location) %>%
inner_join(fips::fips, by = "state") %>%
select(state, fips, usps, everything()) %>%
gather(year, cutoff, -state:-usps) %>%
separate(year, c("month", "year")) %>%
mutate(type = "CHIP", agegrp = "0-18") %>%
mutate(cutoff = as.numeric(cutoff) * 100) %>% # filter(is.na(cutoff)), No CHIP program
select(state, fips, usps, type, agegrp, everything()) %>%
print() -> chip0_18
read_csv("data-raw/children_chip_mcaid.csv", skip = 2) %>%
select(-Footnotes) %>%
filter(Location != "United States") %>%
rename(state = Location) %>%
inner_join(fips::fips, by = "state") %>%
select(state, fips, usps, everything()) %>%
gather(year, cutoff, -state:-usps) %>%
separate(year, c("month", "year")) %>%
mutate(type = "CHIP/Mcaid Upper", agegrp = "0-18") %>%
mutate(cutoff = as.numeric(cutoff) * 100) %>% # filter(is.na(cutoff))
# TN 2000 and 2002 have not upper limits
mutate(cutoff = if_else(usps == "TN" & year %in% c(2000, 2002), 9999, cutoff)) %>%
select(state, fips, usps, type, agegrp, everything()) %>%
print() -> chip_mcaid_upper
infant0_1 %>%
bind_rows(children1_5) %>%
bind_rows(children6_18) %>%
bind_rows(chip0_18) %>%
bind_rows(chip_mcaid_upper) %>%
arrange(fips, year) %>%
print() -> children
usethis::use_data(children, overwrite = TRUE)
|
a466d0d17353487d4c7f4d516dfd0e745cd18fd6 | a0a51f98e534dd152f7cee4a66b159dea0aef2f9 | /kangaroo.R | 2b34257e646307694bdbd7cdbcfe5e226445b008 | [] | no_license | rjsaito/Auto-Insurance-Project | 369e425011ddb9a31c4dedf7de6b48696f1bba00 | 8ed468d9a0f7efd799e419323fd9935a9044d303 | refs/heads/master | 2021-01-10T05:18:00.983978 | 2016-09-12T02:10:28 | 2016-09-12T02:10:28 | 50,534,320 | 5 | 5 | null | null | null | null | UTF-8 | R | false | false | 7,077 | r | kangaroo.R | setwd("C:/Users/Riki/Dropbox/UMN Courses/STAT 8051/Travelers/")
kangaroo <- read.csv("Kangaroo.csv")
library(dplyr)
kangtrain <- subset(kangaroo, split == "T")
kangtest <- subset(kangaroo, split != "T")
#"NormalizedGini" is the other half of the metric. This function does most of the work, though
SumModelGini <- function(solution, submission) {
df = data.frame(solution = solution, submission = submission)
df <- df[order(df$submission, decreasing = TRUE),]
df
df$random = (1:nrow(df))/nrow(df)
df
totalPos <- sum(df$solution)
df$cumPosFound <- cumsum(df$solution) # this will store the cumulative number of positive examples found (used for computing "Model Lorentz")
df$Lorentz <- df$cumPosFound / totalPos # this will store the cumulative proportion of positive examples found ("Model Lorentz")
df$Gini <- df$Lorentz - df$random # will store Lorentz minus random
return(sum(df$Gini))
}
NormalizedGini <- function(solution, submission) {
SumModelGini(solution, submission) / SumModelGini(solution, solution)
}
#data
boxplot(claimcst0~veh_body, data=kangtrain)
thsd = TukeyHSD(aov(claimcst0~veh_body, data=subset(kangtrain, clm>0)))
boxplot(claimcst0~gender,data=subset(kangtrain, clm>0))
boxplot(claimcst0~area, data=subset(kangtrain, clm>0))
boxplot(claimcst0~factor(agecat), data=subset(kangtrain, clm>0))
pairs(kangtrain[,c("numclaims","claimcst0","veh_value","exposure","veh_age","area","agecat")])
pairs(subset(kangtrain,clm>0)[,c("numclaims","claimcst0","veh_value","exposure","veh_age","area","agecat")])
jpeg("hist_claimcst0.jpeg")
hist(kangtrain$claimcst0, main="Histogram of claimcst0", xlab="claimcst0")
dev.off()
table(kangtrain$clm)
table(kangtrain$numclaim)
# models
#0 Null Model
NormalizedGini(solution = kangtrain$claimcst0, submission = mean(kangtrain$claimcst0))
#1 Full OLS
ev = c("veh_value","exposure","veh_body","veh_age","gender","area","agecat")
ols <- lm(claimcst0 ~ veh_value+exposure+veh_body+veh_age+gender+
area+agecat, data=kangtrain)
summary(ols)
library(car)
Anova(ols)
(ols.gini = NormalizedGini(solution = kangtrain$claimcst0, submission = predict(ols)))
#2: GLM Tweedie distribution link
library(statmod)
tweed <- glm((claimcst0/exposure) ~ (veh_value+area+veh_age+gender+factor(agecat))^2,
data=kangtrain, family=tweedie(var.power=1.3,link.power=0))
summary(tweed)
(tweed.gini = NormalizedGini(solution = kangtrain$claimcst0, submission = predict(tweed,type="response")*kangtrain$exposure))
#3: two part model (Frequency * Severity)
# We will divide the data into two levels of severity
# offset as a weight (frequency) or offset (count)?
#model 1: count (offset(log(exposure))
pm.count = glm(numclaims ~ offset(log(exposure))+(veh_value+veh_age+gender+area+factor(agecat))^2,
family = poisson, data=kangtrain)
pm.count.sub = step(pm.count, test="Chi")
(dp<-sum(residuals(pm.count.sub,type="pearson")^2)/pm.count.sub$df.res)
summary(pm.count.sub)
pm.small = glm(numclaims ~ offset(log(exposure))+veh_value+area+factor(agecat)+
veh_value:veh_age+veh_value:area+gender:area, family=poisson, data=kangtrain)
summary(pm.small)
# frequency ( exposure as weight )
pm.freq = glm(numclaims ~ (veh_value+veh_age+gender+area+agecat)^2,
weight = exposure, family = poisson, data=kangtrain)
pm.freq.sub = step(pm.freq, test="Chi")
(dp<-sum(residuals(pm.freq.sub,type="pearson")^2)/pm.freq.sub$df.res)
summary(pm.freq.sub)
# freq and count
Freq = predict(pm.freq.sub, newdata = kangaroo, type="response")
Count = predict(pm.small, newdata = kangaroo, type="response")
plot(Freq, Count); abline(0,1)
kang.updated = mutate(kangaroo, freq = as.numeric(Freq), count = as.numeric(Count))
outlier = which(Freq == max(Freq))
kangaroo[outlier,] #this guy is a trouble maker
#model 2: severity
# gamma model (terrible!)
gam.freq <- glm((claimcst0/numclaims) ~ 0+I(freq^2)+(freq+veh_value+gender+area+factor(agecat))^2,
family=Gamma(link="log"),data=subset(kang.updated,clm > 0 & split=="T"))
gam.freq.sub = step(gam.freq)
summary(gam.freq.sub)
gam.count <- glm((claimcst0/numclaims) ~ 0+I(count^2)+(count+veh_value+gender+area+factor(agecat))^2,
family=Gamma(link="log"),data=subset(kang.updated,clm > 0 & split=="T"))
gam.count.sub = step(gam.count)
summary(gam.count.sub)
#inverse gaussian model
ivg <- glm((claimcst0/numclaims) ~ veh_value+gender+area+factor(agecat),
family=inverse.gaussian(link="log"),data=subset(kang.updated, clm > 0 & split=="T"))
ivg.sub = step(ivg)
summary(ivg.sub)
# predictions
preds = data.frame(
claimcst0 = kang.updated$claimcst0,
numclaims = kang.updated$numclaims,
tweedie = predict(tweed, newdata=kang.updated, type="response")*kang.updated$exposure,
gam.freq = Freq*predict(gam.freq.sub, newdata=kang.updated, type="response"),
gam.count = Count*predict(gam.count.sub, newdata=kang.updated, type="response"),
ivg.freq = Freq*predict(ivg.sub, newdata=kang.updated, type="response"),
ivg.count = Count*predict(ivg.sub, newdata=kang.updated, type="response"),
split = kang.updated$split
)
# gini coefficient on train data
apply(subset(preds, split == "T")[,c(3,6:7)], 2, function(x) NormalizedGini(kangtrain$claimcst0, x))
# means of test data predictions
colMeans(subset(preds, split == "T")[,c(3,6:7)])
colMeans(subset(preds, split != "T")[,c(3,6:7)])
library(lattice)
library(latticeExtra)
ecdfplot(~ tweedie + ivg.freq + ivg.count, data=preds, xlim=c(0,20000), auto.key=list(space='right'))
#cross validation with gini coefficients
cv <- function(fit, fit2 = NULL, data, data2 = NULL, K, R){
cost = function(y, yhat) mean((y - yhat)^2)
n = nrow(data)
if(K > 1) s = sample(rep(1:K, ceiling(nrow(data)/K)),nrow(data)) else
if(K == 1) s = rep(1, nrow(data))
glm.y <- fit$y
cost.0 <- cost(glm.y, fitted(fit))
ms <- max(s)
call <- Call <- fit$call
if(!is.null(fit2)) call2 <- Call2 <- fit2$call
CV <- CV.coef <- NULL
for (i in seq_len(ms)) {
j.out <- seq_len(n)[(s == i)]
if(K > 1) j.in <- seq_len(n)[(s != i)] else if (K==1) j.in = j.out
Call$data <- data[j.in, , drop = FALSE]
d.glm <- eval.parent(Call)
pred.glm <- predict(d.glm, newdata=data[j.out,], type="response")
if(!is.null(fit2) & !is.null(data2)){
j2.out.data <- merge(data2, data[j.out,])
if(K > 1) j2.in.data <- merge(data2, data[j.in,]) else if (K==1) j2.in.data = j2.out.data
Call2$data <- j2.in.data
d.glm2 <- eval.parent(Call2)
pred.glm2 <- predict(d.glm2, newdata=data[j.out,], type="response")
}
if(!is.null(fit2)) CV$Fitted = rbind(CV$Fitted, cbind(j.out, pred.glm*pred.glm2)) else
CV$Fitted = rbind(CV$Fitted, cbind(j.out, pred.glm))
CV.coef$coef <- rbind(CV.coef$coef, coef(d.glm))
CV.coef$se <- rbind(CV.coef$se, coef(summary(d.glm))[,2])
}
CV$Fitted <- CV$Fitted[order(CV$Fitted[,1]),2]
CV
}
cv.tweed <- cv(fit=tweed,data= kangtrain, K=10)
NormalizedGini(kangtrain$claimcst0, cv.tweed$Fitted*kangtrain$exposure)
cv.ivg.count <- cv(fit=pm.count.sub, fit2=ivg.sub, data = kangtrain, data2=subset(kangtrain, clm>0), K=10)
NormalizedGini(kangtrain$claimcst0, cv.ivg.count$Fitted)
|
f2b766f7cf7b243da9be7a115df2fa98451602a5 | a588dd1a34555dd71c898c82fbc7016dcc9cbdb3 | /OHDSITrends/man/get_event_id.Rd | 0ff215064618be888f7a9833f5fcf094cee9cec4 | [] | no_license | NEONKID/StudyProtocolSandbox | 5e9b0d66d88a610a3c5cacb6809c900a36bc35c3 | c26bd337da32c6eca3e5179c78ac5c8f91675c0f | refs/heads/master | 2020-03-23T14:02:11.887983 | 2018-10-19T05:33:13 | 2018-10-19T05:33:13 | 141,651,747 | 0 | 1 | null | 2018-07-20T02:10:06 | 2018-07-20T02:10:06 | null | UTF-8 | R | false | true | 592 | rd | get_event_id.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Verified Functions.R
\name{get_event_id}
\alias{get_event_id}
\title{This function returns the Athena event analysis id
affiliated with a particular particular event.}
\usage{
get_event_id(event_type)
}
\arguments{
\item{event_type}{a string-based expression for the medical event of interest
do not include spaces; not case sensitive}
}
\value{
integer analysis id from Athena
}
\description{
This function returns the Athena event analysis id
affiliated with a particular particular event.
}
|
5598fd82a4c25a7461f7716017354748aa92ffa5 | 3747b3b095f9d967468ba301116eefb157dd68ac | /P3.R | ee462843de950993477246cadc744c7e5c34f055 | [] | no_license | marcosvarela5/r | 542e04d3a93efdacfaafb746f1e419e1aa61ac3b | 3da5c403494e94fe116d815b72cdf016d62e066a | refs/heads/master | 2023-01-03T09:21:42.643530 | 2020-10-14T09:07:12 | 2020-10-14T09:07:12 | 299,761,886 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,502 | r | P3.R | set.seed(1) #Establecer una semilla.
sample(1:6,1) #Coge 1 numero aleatorio en el vector 1:6
runif(10, -1, 1) #Generar 10 numeros aleatorios en el intervalo -1, 1
#EJERCICIO 3.3
#1.
glc<-function(n, semilla=1,a=0,b=7^5,m=2^31-1){
u<-numeric(n)
x<-semilla
for(i in 1:n){
x<-(a+b*x)%%m
u[i]<-x/m
}
return (u)
}
#2.
u<-glc(n=500,a=1,b=5,m=512)
#3.
hist(u, freq=FALSE)
abline(h=1,col=2)
#4
mean(u)
#5.
mean((u>0.4)&(u<0.8))
#6.
plot(u[-500], u[-1])
#7.
u<-glc(n=500)
hist(u, freq=FALSE)
abline(h=1,col=2)
mean(u)
mean((u>0.4)&(u<0.8))
plot(u[-500], u[-1])
#EJERCICIO 3.4
#1.
x<-runif(1,-1,1)
y<-runif(1,-1,1)
x+y<0
#2.
N<-10000
x<-runif(N,-1,1)
y<-runif(N,-1,1)
x+y<0
mean(x+y<0)
#3.
1/2
#EJERCICIO 3.5
N<-10000
x<-runif(N,-1,1)
y<-runif(N,-1,1)
mean(x^2+y^2<=1)
pi/4
#EJERCICIO 3.6
moneda<-c(cara=1, cruz=0)
N<-10000
x<-sample(moneda, size=N, replace=TRUE, prob=c(0.5,0.5))
mean(x)
y<-runif(N,0,1)
mean(y<0.5)
#EJERCICIO 3.7
n<- 10000
x1 <- rbinom(n, size=1, prob=0.8)
x2 <- rbinom(n, size=1, prob=0.9)
x3 <- rbinom(n, size=1, prob=0.6)
x4 <- rbinom(n, size=1, prob=0.5)
x5 <- rbinom(n, size=1, prob=0.7)
z1<- x1|x2
z2<- x3|x4
z3<- z1|z2
z4<-z3&x5
mean(z4)
#EJERCICIO 3.8
deMere<-function(n=4){
lanz<-sample(1:6, size=4, replace=TRUE)
return(6%in%lanz)
}
deMere()
N<-10000
mean(replicate(N,deMere()))
1-(5/6)^4
|
51f99b17de9bc11a1bbeb130d157bbe84812cdf7 | 4951e7c534f334c22d498bbc7035c5e93c5b928d | /group-compare/proj-bid.R | 3e21c4aa366e048dfccee7554c56401cf36318cd | [] | no_license | Derek-Jones/ESEUR-code-data | 140f9cf41b2bcc512bbb2e04bcd81b5f82eef3e1 | 2f42f3fb6e46d273a3803db21e7e70eed2c8c09c | refs/heads/master | 2023-04-04T21:32:13.160607 | 2023-03-20T19:19:51 | 2023-03-20T19:19:51 | 49,327,508 | 420 | 50 | null | null | null | null | UTF-8 | R | false | false | 1,076 | r | proj-bid.R | #
# proj-bid.R, 4 Oct 17
#
# Data from:
# An Empirical Study of Software Project Bidding
# Magne Jorgensen and Gunnar J. Carelius
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG project_bidding project_economics
source("ESEUR_config.r")
# CompId,Group,CompSize,ExpApp,ExpTech,DevMeth,Delay,Completeness,Bid,Pre_Bid
comp_bid=read.csv(paste0(ESEUR_dir, "economics/proj-bidding.csv.xz"), as.is=TRUE)
comp_A_pre=read.csv(paste0(ESEUR_dir, "economics/proj-bid-Apre.csv.xz"), as.is=TRUE)
brew_col=rainbow(3)
plot(density(log(comp_A_pre$Bid), kernel="epanechnikov"), col=brew_col[1],
yaxs="i",
xlim=c(9, 14), ylim=c(0, 0.7),
# xlim=c(0, 6e+5), ylim=c(0, 3.2e-6),
main="",
xaxt="n", yaxt="n", xlab="Amount bid", ylab="Density")
lines(density(log(comp_bid$Bid[1:17]), kernel="epanechnikov"),
col=brew_col[2])
lines(density(log(comp_bid$Bid[17:35]), kernel="epanechnikov"),
col=brew_col[3])
legend(x="topleft", legend=c("A Outline", "A Detailed", "B Detailed"),
fill=brew_col, bty="n", cex=1.2)
|
e71c7c29dab81fe384a706f1d2824d19258b71c0 | 57ec7267c3903486f76310dfec2776789ba10170 | /Code/rsrHonors/man/rp.Rd | 3037cf49437b98b5aa74b62c3c9a58358b8a68e0 | [
"MIT"
] | permissive | grahamammal/honors-project | ebed346c6e8941f64f8fd7e1c8a203c208bec293 | be2adf92e05db8c2ba0ad0e6745f7fe485248783 | refs/heads/master | 2023-04-06T13:08:04.497316 | 2021-04-09T23:26:49 | 2021-04-09T23:26:49 | 292,358,292 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 287 | rd | rp.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{rp}
\alias{rp}
\title{Multiply a number by two}
\usage{
rp(phi_r, dist, n_r, r_r, nu_r, num_r, cov_fun)
}
\arguments{
\item{x}{A single integer.}
}
\description{
Multiply a number by two
}
|
57ed84d41baeab638c434c0b703f0edebd69a7e1 | 037e0a791f5f1fcd51b61678f70269d0d27a60b0 | /Day69_catchphrases.R | cc42a95cdf9846b2c676ab3b9fa0bc7cf15c8b1f | [] | no_license | sharifshohan/One_Figure_A_Day_Challenge | ec29dd29b7352669a8f45e6c0c61f703cf62558a | 417684188f41c78c9d1233ecf3a1122de8e45551 | refs/heads/main | 2023-07-17T20:01:55.493394 | 2021-09-06T15:16:47 | 2021-09-06T15:16:47 | 363,593,446 | 5 | 3 | null | null | null | null | UTF-8 | R | false | false | 4,869 | r | Day69_catchphrases.R | #load packages
library(tidyverse)
library(tidytuesdayR) #tidytuesday
library(ggplot2) #plots
library(showtext) #add font
library(ragg) #ggsave
library(patchwork) #put together multiple plots
library(ggimage) #image as annotation
#load data
scooby <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-07-13/scoobydoo.csv')
#clean data
x <-
scooby %>%
select(season, title,
starts_with("caught"),
starts_with("captured"),
starts_with("unmask"),
starts_with("snack"),
-contains("other"),
-contains("not")) %>%
filter(title!= "Wrestle Maniacs")
View(x)
char_actions <- x %>%
pivot_longer(cols = -c("season","title"), names_to = c(".value","character"), names_sep = "_") %>%
mutate(
across(c("caught","captured","unmask","snack"), ~as.logical(.x) %>% as.integer())
)
View(char_actions)
##bar graph - frequency of phrases##
#subset to tv shows
seasons <- c("1", "2", "3")
scooby_tv <- scooby %>% filter(season %in% seasons)
#remove null
scooby_tv <- scooby_tv %>% filter(imdb != "NULL")
scooby_tv <- scooby_tv %>% filter(jinkies != "NULL")
#subset to phrases
phrases <- scooby_tv[, c(8,58:65)]
#get year from date
phrases$date_aired <- as.Date(phrases$date_aired, "%Y-%m-%d")
phrases$year <- format(phrases$date_aired, "%Y")
phrases
#change columns to integers
phrases <- phrases %>% mutate(across(c("year", "jeepers","jinkies","my_glasses","just_about_wrapped_up","zoinks",
"groovy", "scooby_doo_where_are_you", "rooby_rooby_roo"), ~as.integer(.x)))
#group data by year and get sum of counts
phrases_year <- phrases %>% group_by(year) %>% summarise(total_jeepers = sum(jeepers),
total_jinkies = sum(jinkies),
total_glasses = sum(my_glasses),
total_wrapped = sum(just_about_wrapped_up),
total_zoinks = sum(zoinks),
total_groovy = sum(groovy),
total_where = sum(scooby_doo_where_are_you),
total_rooby = sum(rooby_rooby_roo))
#change columns to rows
View(phrases_year)
phrases_flipped <- as.data.frame(t(phrases_year))
#sum rows
phrases_flipped$total <- rep(0, nrow(phrases_flipped))
for (i in 1:nrow(phrases_flipped)){
phrases_flipped[i,30] <- sum(phrases_flipped[i,1:29])
}
phrases_flipped2 <- phrases_flipped[2:9,]
phrases_total <- as.data.frame(cbind(rownames(phrases_flipped2), phrases_flipped2$total))
names(phrases_total) <- c("phrase", "total")
##plotting##
#load font
font_add("scooby", "ScoobyDoo.ttf")
showtext_auto()
#bar graph theme
bar_graph_theme <- my_theme <- theme(
#titles
plot.title=element_text(family="scooby", size=65, color="black", hjust=1, vjust=1),
plot.subtitle=element_text(family="scooby", size=30, color="black", hjust=0.5, vjust=1),
plot.caption=element_text(family="scooby", size=20, color="darkgrey", hjust=0.5, vjust=1),
plot.title.position = "plot",
plot.caption.position = "plot",
#background
panel.border=element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
panel.background = element_rect(fill = "white"),
plot.background = element_rect(fill = "white"),
plot.margin=ggplot2::margin(0.5, 0.5, 0.5, 0.5, "in"),
#axes
axis.ticks.length=unit(0.15, "cm"),
axis.ticks = element_blank(),
axis.line = element_blank(),
axis.title = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_text(size=35, family="scooby", colour="black"),
#no legend
legend.position = "none")
#colors
scooby_colors <- c("#006cdc", "#fda42b", "#880008", "#70498a", "#57911d", "#d0afdb", "#89653f", "#00b4d0")
scooby_labels <- c("Where are my glasses?", "Groovy", "Jeepers", "Jinkies!", "Rooby Rooby Roo!",
"Scooby-Doo, where are you?", "Just about wrapped up...", "Zoinks!")
#plot call
phrases_bar <- phrases_total %>% ggplot(aes(x=phrase, y=as.integer(total))) +
geom_col(fill=scooby_colors) +
geom_text(aes(label=total), family="scooby", size=14, hjust=-0.1, color="black") +
labs(title="|Lot's of Zoinks!!",
subtitle="Catchphrase said in the series") +
scale_x_discrete(labels = scooby_labels) +
coord_flip(clip='off') + bar_graph_theme
phrases_bar
#save plot
ggsave("phrases.png",
plot=phrases_bar,
device = agg_png(width = 6, height = 4, units = "in", res = 300))
|
9637dbbdbefc44a7d82ea88d1a999fd68ca950a0 | 8073958567c5f2366f29ad05645d4f4b32147c4b | /calibration.r | 14a28a0fa0b0600d915f6f9f15fa7d13bb902fdc | [] | no_license | ACSEkevin/Agent-Based-Modelling-Artificial-Anasazi | 3caa3529a54333e243b7f32fadfdd08f27bc7ac7 | c53552c5354472027fe28ae079f18d3b26a4f440 | refs/heads/master | 2023-03-15T15:32:44.807093 | 2020-08-29T11:32:18 | 2020-08-29T11:32:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 980 | r | calibration.r | #Run Calibration Process
library (EasyABC)
mindeathAge = c("unif",20,35)
maxdeathAge = c("unif",30,50)
minFertileAge = c("unif",20,35)
maxFertileAge = c("unif",25,40)
fissionProb = c("unif",0.1,0.15)
MaizeFieldData1 = c("unif",0.7,1.2)
MaizeFieldData2 = c("unif",0,0.5)
anasazi_prior=list(mindeathAge , maxdeathAge, minFertileAge, maxFertileAge, fissionProb, MaizeFieldData1, MaizeFieldData2)
target_data <- read.csv(file='target_data.csv')
tolerance=0.2
#runs the calibration
ABC_sim<-ABC_rejection(model=binary_model("bash model.sh") , prior=anasazi_prior, nb_simul=10, prior_test="(X2>X1) && (X4>X3)", summary_stat_target=c(target_data), tol=tolerance, use_seed = FALSE, seed_count = 0, n_cluster=1, verbose=FALSE, progress_bar = FALSE)
#ABC_sim<-ABC_rejection(model=binary_model("bash model.sh") , prior=anasazi_prior, nb_simul=10, prior_test=NULL, summary_stat_target=NULL, tol=NULL, use_seed = FALSE, seed_count = 0, n_cluster=1, verbose=FALSE, progress_bar = FALSE)
|
4142f625b9d5f5b80891547e89a2cce9e6e662e0 | ecec6445e11e2e7baaeda8e42760b140835e96a6 | /man/kullbackLeibler.Rd | da069532877b4963a6a125c2718a39ef8a3e4104 | [] | no_license | joshbrowning2358/MCLE | 1ec631438c6e3453b8703295df7c7580eaba2209 | baad44b359a1d40dfd7b6d3aca6395fdfb1d2d88 | refs/heads/master | 2023-02-25T15:04:41.331043 | 2016-03-01T15:15:17 | 2016-03-01T15:15:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,507 | rd | kullbackLeibler.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kullbackLeibler.R
\name{kullbackLeibler}
\alias{kullbackLeibler}
\title{Kullback-Leibler divergence}
\usage{
kullbackLeibler(pdf1, pdf2, dimension = 1, lower = rep(-10, dimension),
upper = rep(10, dimension))
}
\arguments{
\item{pdf1}{A function taking a numeric value (univariate) or vector
(multivariate) specifying the coordinate for where the density is
required. This function should be the true or known density.}
\item{pdf2}{Same as pdf1, but for the estimated density.}
\item{dimension}{Integer. The dimension of the space. Assumed to be 1.}
\item{lower}{A vector of the lower bounds in each dimension. Defaults to a
vector of -10.}
\item{upper}{A vector of the upper bounds in each dimension. Defaults to a
vector of 10.}
}
\value{
The Kullback-Leibler divergence.
}
\description{
A simple function to compute the Kullback-Leibler divergence between two
(univariate or multivariate) probability distribution functions.
}
\examples{
# Univariate examples
pdf1 = dnorm
pdf2 = function(x){dnorm(x, mean = 0.3)}
pdf3 = function(x){dnorm(x, mean = 2)}
kullbackLeibler(pdf1, pdf2)
kullbackLeibler(pdf1, pdf3)
# Multivariate examples
pdf1 = function(x){sn::dmsn(x, Omega = diag(2), alpha = 1:2)}
pdf2 = function(x){sn::dmst(x, Omega = diag(2), alpha = 1:2)}
pdf3 = function(x){sn::dmst(x, Omega = diag(c(1,2)), alpha = 1:2)}
kullbackLeibler(pdf1, pdf2, dimension = 2)
kullbackLeibler(pdf1, pdf3, dimension = 2)
}
|
87d7fea07b33839db806b884d7459ba2963544d9 | d155cbf85a33316e3aa9f27a59c6627b64e7d04f | /plotear_resultados_instar.R | 6a217dd91ecc25c5009d5f1190063b08c2a97325 | [] | no_license | MARIASUAM/Analisis_INSTAR | f18bfda1fdce636374fb999d1f594174537df6b0 | a7dd77697b652dc53de20e50a9567b4edd06bf2a | refs/heads/master | 2020-05-29T08:49:59.677963 | 2016-09-23T09:28:52 | 2016-09-23T09:28:52 | 69,004,551 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,299 | r | plotear_resultados_instar.R | setwd("C:/Users/iecolab/Google Drive/Instar/implementaciones/instar_baza/scripts/git_Instar_baza/git_Instar_baza")
#install.packages("lubridate")
library(lubridate) # Para las fechas
INSTAR<-read.table("instar_bz008.txt", header=T, sep=",", dec=".")
INSTAR$fecha <-as.Date(INSTAR$fecha, format="%m-%d-%y")
INSTAR$yday <- yday(INSTAR$fecha) # anadimos el dia del anio con el paquete lubridate
INSTAR$year <- year(INSTAR$fecha) # anadimos el anio con el paquete lubridate
INSTAR$biociclo <- NULL
#write.csv(INSTAR, file = "datos_instar_bz008.csv", row.names=FALSE, na="")
COPLAS<-read.csv("zona_baza.csv", header=TRUE, sep=",")
names(COPLAS)[1] <- "RODAL"
rodal<-subset(COPLAS, RODAL == "GR023008")
rodal<-rodal[-c(1,2, 23),]
# Grafico datos INSTAR
```{r}
# Crear vectores de datos (ahora esta con los 29-feb, decidir si mostrarlos o no y modificar codigo)
huevos <- INSTAR$huevo[5748:7573]
l1 <- INSTAR$L1[5748:7573]
l2 <- INSTAR$L2[5748:7573]
crisalidas <- INSTAR$crisalida[5748:7573]
exergia <- INSTAR$exergia[5748:7573]
fecha <- INSTAR$fecha[5748:7573]
# Para seleccionar solo un trozo de los datos se especifican las filas que se quieren mostrar:
# l1 <- INSTAR_bz2$l1[5748:7573] (4 ciclos completos)
```
```{r}
par(mar=c(5, 4, 4, 5) + 0.1) # Ampliar margenes
plot(fecha, huevos, type="l", col="red",xlab="Fecha", ylab="Numero de individuos", ylim=c(0,max(huevos))) # primera serie, es la que define el eje Y
par(new=TRUE) # permite anadir graficos encima
plot(fecha, l1, type="l", col="blue",xlab="", ylab="", ylim=c(0,max(huevos)), yaxt="n", xaxt="n") # segunda serie
par(new=TRUE)
plot(fecha, l2, type="l", col="green",xlab="", ylab="", ylim=c(0,max(huevos)), yaxt="n", xaxt="n") # tercera serie
par(new=TRUE)
plot(fecha, crisalidas, type="l", col="orange",xlab="", ylab="", ylim=c(0,max(huevos)), yaxt="n", xaxt="n") # cuarta serie
par(new=TRUE)
plot(fecha, exergia, type="l", col="black",xaxt="n",yaxt="n",xlab="",ylab="", lty="dotted") # quinta serie
axis(4) # configuracion del eje Y derecho
mtext("Vigor (%)",side=4,line=3)
legend("topleft",col=c("red","blue","green","orange", "black"),lty=1,legend=c("Huevos","L1", "L2","Crisalida", "Vigor"))
#install.packages("Hmisc")
#library(Hmisc)
#minor.tick(nx=10,ny=1) #para anadir minor ticks (instalar paquete "Hmisc" primero)
|
3b1be025ecd06781e28112d94d5833f8fd2a8c28 | d1625e2223c81a6c510ccf8bb847c67ed85f8e2f | /R/cv-update.R | 80d1a6e7a5cce9cb6f24dd5ddf08b66ad4b04c8e | [] | no_license | bmihaljevic/bnclassify | ea548c832272c54d9e98705bfb2c4b054f047cf3 | 0cb091f49ffa840983fb5cba8946e0ffb194297a | refs/heads/master | 2022-12-08T21:37:53.690791 | 2022-11-20T10:00:18 | 2022-11-20T10:00:18 | 37,710,867 | 20 | 12 | null | 2020-08-13T19:39:24 | 2015-06-19T08:30:56 | R | UTF-8 | R | false | false | 2,652 | r | cv-update.R | bnc_get_update_args <- function(x, dag) {
stopifnot(is.logical(dag))
args <- list(lp_fargs = x$.call_bn)
# lp_fargs must always be present
stopifnot(!is.null(args$lp_fargs))
# If dag then include dag arguments
if (dag) {
args$dag_fargs <- x$.call_struct
stopifnot(!is.null(args$dag_fargs))
}
args
}
bnc_update <- function(args, dataset) {
bnc_update_args(args$lp_fargs, dataset, args$dag_fargs)
}
bnc_update_args <- function(lp_fargs, dataset, dag_fargs = NULL) {
# If dag needs to be called, call it first then feed it into lp arguments
if (!is.null(dag_fargs)) {
# dag_fargs contain both function name and arguments.
dag <- do_bnc_call(dag_fargs, dataset)
lp_fargs$x <- dag
}
# Wrap the result of lp before it's returned
res <- do_bnc_call(lp_fargs, dataset)
# bnc_wrap(res) TODO
res
}
# Optionally updates the dag prior to updating the parameters.
update <- function(x, dataset, dag) {
stopifnot(is.logical(dag))
dg <- NULL
if (dag) {
dg <- update_dag(x, dataset)
}
else {
dg <- bn2dag(x)
}
lp_args <- get_lp_update_args(x)
update_lp(dag = dg, lp_fargs = lp_args, dataset = dataset)
}
save_bnc_call <- function(fun_name, call, env) {
stopifnot(is.character(fun_name))
call[[1]] <- fun_name
# To make sure that this dataset is not accidentaly used on updates.
call['dataset'] <- NULL
lapply(call, eval, envir = env)
}
do_bnc_call <- function(fargs, dataset) {
fargs$dataset <- dataset
call <- make_call(fargs[[1]], fargs[-1])
eval(call)
}
add_dag_call_arg <- function(bnc_dag, fun_name, call, env, force = FALSE) {
add_call_arg(bnc_dag, fun_name, call, env, arg = '.call_struct', force = force)
}
remove_dag_call_arg <- function(bnc_dag) {
bnc_dag[['.call_struct']] <- NULL
bnc_dag
}
add_params_call_arg <- function(bnc_bn, call, env, force = TRUE) {
add_call_arg(bnc_bn, 'lp', call, env, arg = '.call_bn', force = force)
}
add_call_arg <- function(bnc_dag, fun_name, call, env, arg, force) {
# stopifnot(inherits(bnc_dag, "bnc_dag"))
# TODO Fix this for appropriate types
stopifnot(inherits(bnc_dag, "bnc_dag") || inherits(bnc_dag, "bnc_base"))
if (!force) {
stopifnot(is.null(bnc_dag[[arg]]))
}
bnc_dag[[arg]] <- save_bnc_call(fun_name, call, env)
bnc_dag
}
get_lp_update_args <- function(x) {
stopifnot(!is.null(x$.call_bn))
x$.call_bn
}
get_dag_update_args <- function(x) {
stopifnot(!is.null(x$.call_struct))
x$.call_struct
}
update_dag <- function(x, dataset) {
do_bnc_call(get_dag_update_args(x), dataset)
}
update_lp <- function(dag, lp_fargs, dataset) {
lp_fargs$x <- dag
do_bnc_call(lp_fargs, dataset)
} |
4d0e26abddc5415778094ae0ab3c8bd27dc40b0a | 952aad9f69f2884807487d2c766b54d66af35079 | /R/musit_to_zootron.R | 6d9aa3a99c0643e89fad1bddea04312a0a7d5a54 | [] | no_license | vmzomdav/musit_to_ipt | db62530c33aa2bb89c7c3707cba77c8a34da04de | 5fdfe39204d531c504f917f6536b9d71ce72a13e | refs/heads/master | 2021-05-14T10:39:43.882863 | 2018-01-05T08:27:56 | 2018-01-05T08:27:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,389 | r | musit_to_zootron.R |
# To be run as loop, downloading and storing each dataset named in the vector "dataset"
# for each iteration
# script stored at bitbucket, run on server as:
#source("https://git.vm.ntnu.no/projects/MUS/repos/musit_to_ipt/raw/R/musit_to_zootron.R?at=refs%2Fheads%2Fmaster")
# dependencie
library(countrycode)
library(dplyr)
library(dbplyr)
library(RPostgreSQL)
library(stringr)
library(DBI)
# lists of datasets to process ------
# (see http://www.unimus.no/nedlasting/datasett/ )
dataset <- c("entomology_ntnuvmti","marine_ntnuvmmi")
# download, clean and upload to db ------
for (i in 1:length(dataset)){
url <- paste("http://www.unimus.no/nedlasting/datasett/",dataset[i],".gz",sep="")
tmp <- tempfile()
download.file(url,tmp)
# NOTE: dataset is further reffered to as "inndata"
inndata <- read.csv(gzfile(tmp), sep="\t", header=TRUE, stringsAsFactors=FALSE)
# some cleaning of data, and adding of terms
inndata$geodeticDatum <- "WGS84" # add term
inndata$kingdom <- "Animalia" # add term
inndata$countryCode <- countrycode(inndata$country, 'country.name', 'iso3c') # get country code
inndata$dateIdentified[inndata$dateIdentified=="0000-00-00"] <- NA
inndata$eventDate[inndata$eventDate=="0000-00-00"] <- NA
inndata$eventDate <- stringr::str_replace_all(inndata$eventDate,"-00","")
inndata$year <- stringr::str_sub(inndata$eventDate,1,4)
inndata$month <- stringr::str_sub(inndata$eventDate,6,7)
inndata$month[inndata$month==""] <- NA
inndata$day <- stringr::str_sub(inndata$eventDate,9,10)
inndata$day[inndata$day==""] <- NA
inndata$eventDate <- stringr::str_replace_all(inndata$eventDate,"-00","")
inndata$dateIdentified <- stringr::str_replace_all(inndata$dateIdentified,"-00","")
inndata$occurrenceID <- paste("urn:uuid:",inndata$occurrenceID,sep="") # decleare the nature of the identifier by adding urn:uuid at start
inndata$db_import_datetime <- Sys.time()
# upload data to database
con <- DBI::dbConnect(RPostgreSQL::PostgreSQL(), # DB connection
dbname="musit_to_ipt")
dbSendStatement(con,paste("DROP TABLE IF EXISTS", dataset[i])) # delete existing table
copy_to(con,inndata,paste(dataset[i]),temporary = FALSE) # upload table
dbSendStatement(con,paste("GRANT SELECT ON", dataset[i], "TO ipt;")) # make sure db user ipt has read access
dbDisconnect(con) # disconnect from DB
}
|
6ff169a8fba111abced476c5b33b0283127bed76 | 7bc6ec8b8991ce10b2f15a59c417d1bf586464d1 | /Dplyr.R | 8d8d096ddd3b67e698a10f99b28120d178c94d1e | [] | no_license | shivamjolly95/Analytics | 7af56e6236c1383027a7caab74e297475ae21210 | cdc48621c22a261a4e26e5b2e5e17d17a554dddb | refs/heads/master | 2020-03-30T06:38:53.172144 | 2018-10-02T12:12:18 | 2018-10-02T12:12:18 | 150,877,404 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 520 | r | Dplyr.R | #data analysis using dplyr
install.packages(dplyr)
library(dplyr)
dplyr::filter(mtcars , mpg > 25 & am==1 )
?mtcars
filter(mtcars, mpg>25 & am==1)
mtcars %>% filter(mpg>25 & am==1) %>% arrange(wt) %>% summarise(n())
mtcars %>% filter(mpg>25 & am==1) %>% arrange(wt) %>% count(n())
count(mtcars)
mtcars %>% group_by(am) %>% summarise(mean(mpg))
mtcars %>% group_by(wt, gear) %>% summarise(mean(mpg))
mutate(mtcars, displ_l= disp/61.0237) #keeps other col
transmute(mtcars, disp_l=disp/61) #shows only one col
|
5099bcb8b33b3d4d6b9e207c7185182f29302f72 | 50784fd2f2b517cea59cf81aa9a62b0b7c9233dd | /functions.R | a5bf2e02dcd3704b3872735b4d4510da9f6cae70 | [
"MIT"
] | permissive | jogwalker/parasite_transmission_Q0 | 2e3f65d6876ed094e843e3786e6b8c66f8a1d9f9 | d7f9dae7c7d13aa7902265cb248217a6675d6573 | refs/heads/master | 2021-09-04T14:29:57.213352 | 2018-01-19T14:37:59 | 2018-01-19T14:37:59 | 112,845,168 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,024 | r | functions.R | # Functions necessary for Q0_model
##---------------------------------------------------------------------
# smoothNA is a function to fill in missing values in data
# updated to account for multiple leading and trailing NAs
smoothNA <- function(x) { # provide numeric vector
n <- length(x)
replaced <- 0
TF <- is.na(x)
# identify trails of NAs
lead <- which(!is.na(x))[1]-1 # how many NAs at beginning
if(lead > 0) {
x[1:lead] <- x[lead+1]
replaced <- replaced + lead
}
tail <- which(!is.na(rev(x)))[1]-1 # how many NAs at the end
if(tail > 0) {
x[(n-tail+1):n] <- x[n-tail]
replaced <- replaced + tail
}
for(i in 1:n) { # loop through vector
if (is.na(x[i])) { # if a cell is NA
if (!is.na(x[i+1]) & !is.na(x[i-1])) { # and if neighbors are not NA
x[i] <- (x[i+1] + x[i-1])/2 # set the cell to be the mean of the neighbors
replaced <- replaced + 1
}
if (is.na(x[i+1])) { # multiple NA in a row
temp <- x[i:n]
endNA <- min(which(!is.na(temp))) - 1 # find next number
x[i:(i+endNA-1)] <- (x[i-1] + x[i+endNA])/2
replaced <- replaced + endNA
}
}
}
print(paste(replaced, " NAs replaced with imputed values, including",lead, "leading and",tail,"trailing NAs"))
return(cbind(x,TF))
}
#------------------------------------------------------------------
## function to call smoothNA for three columns
imputeAll <- function(climatedata) {
dat <- climatedata
t1 <- smoothNA(climatedata$Precip)
dat$Precip <- t1[,1]
dat$PrecipNA <- as.logical(t1[,2])
t2 <- smoothNA(climatedata$Tmin)
dat$Tmin <- t2[,1]
dat$TminNA <- as.logical(t2[,2])
t3 <- smoothNA(climatedata$Tmax)
dat$Tmax <- t3[,1]
dat$TmaxNA <- as.logical(t3[,2])
return(dat)
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## function to define initial conditions from climate data
setInit <- function(climdat) {
library(geosphere)
study.latitude <<- climdat$Latitude[1] #enter latitude in degrees
duration <- climdat$Date # daylength function can use a Date format object instead of Julian date.
global.t <<- seq(1, length(duration), 1)
photoperiod <<- daylength(lat=study.latitude, doy=duration)
Eggs <<- 0 #number of eggs in faeces
L1L2 <<- 0 #number of L1 and L2 in faeces
L3 <<- 0 #number of L3 in faeces
L3s <<- 0 #number of L3 in soil
L3h <<- 0 #number of L3 on herbage
climate <- data.frame(cbind(climdat$Tmean,climdat$Precip))
names(climate) <- c("Tmean","Precip")
climate <<- climate
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
##
runHaemC <- function(global.t) {
library(deSolve)
source("./Haem_params_FL.r")
egg.correction = ifelse(P.E.1<1, 0.1, 1)
event.times = global.t
event.values = 1e6*egg.correction[event.times] #this function reduces eggs deposited by 90% if P/E<1 within a critical period of deposition
event <<- data.frame(var = "E", time = event.times, value = event.values, method = "add")
source("./Hannah's code/GLOWORM_FL.r")
return(data.frame(para.sol))
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# function to calculate cumulative values from previous time period
cumulativeL3 <- function(Datesneeded,allDates,DailyValues,TimeLag) {
d <- Datesneeded
d.all <- allDates
val <- DailyValues
t <- TimeLag
cumvalues <- vector(mode="numeric",length=length(d))
for (i in 1:length(cumvalues)) {
cumvalues[i] <- sum(val[which((d.all < d[i]) & (d.all >=(d[i] - TimeLag)))])
}
return(cumvalues)
}
#------------------------------------------------------------------#
#function to calculate data for analysis from the model output
calcSummary <- function(data.obsdates,data.alldates) {
data.obsdates$ExpL3Hminusweek <- data.alldates$ExpL3H[which(data.alldates$Date %in% (data.obsdates$Date - 7))]
data.obsdates$ExpL3Hminus2week <- data.alldates$ExpL3H[which(data.alldates$Date %in% (data.obsdates$Date - 14))]
data.obsdates$ExpL3Hminus4week <- data.alldates$ExpL3H[which(data.alldates$Date %in% (data.obsdates$Date - 28))]
data.obsdates$ExpL3Hcumweek <- cumulativeL3(data.obsdates$Date,data.alldates$Date,data.alldates$ExpL3H,7)
data.obsdates$ExpL3Hcum2week <- cumulativeL3(data.obsdates$Date,data.alldates$Date,data.alldates$ExpL3H,14)
data.obsdates$ExpL3Hcum4week <- cumulativeL3(data.obsdates$Date,data.alldates$Date,data.alldates$ExpL3H,28)
data.obsdates$ExpL3Hcum3month <- cumulativeL3(data.obsdates$Date,data.alldates$Date,data.alldates$ExpL3H,91)
data.obsdates$ExpL3Hcum6month <- cumulativeL3(data.obsdates$Date,data.alldates$Date,data.alldates$ExpL3H,182)
# to do data.obsdates$ExpL3Hcumyear will need to run model with full year before first observation date (currently about 9 months)
return(data.obsdates)
}
###############-----------------------------------------------#
# functions to summarize data by week
matchweek <- function(dates) {
library(lubridate)
refdates <- seq(min(dates),max(dates),1)
refyear <- (as.POSIXlt(refdates)$year + 1900)
.refweek <- week(refdates)
refweek <- cbind(refyear,.refweek)
week <- data.frame(matrix(nrow=length(dates),ncol=2))
for (i in 1:length(dates)) {
week[i,] <- refweek[which(refdates==dates[i]),]
}
names(week) <- c("year","week")
return(week)
}
#--------------------------------------------------------------------
# get africa drought monitor data
getClimateData <- function(foldername) {
met <- read.table(paste("./Climate data/africa drought monitor/",foldername,"/Meteorology.txt",sep=""),sep=",",header=T)
wat <- read.table(paste("./Climate data/africa drought monitor/",foldername,"/Water_balance.txt",sep=""),sep=",",header=T)
veg <- read.table(paste("./Climate data/africa drought monitor/",foldername,"/Vegetation.txt",sep=""),sep=",",header=T)
if(any(!met$year == wat$year)) {paste("Error: Date mismatch! Check input data files have the same date range.")}
dat <- data.frame(year=met$year,month=met$month,day=met$day, Tmin=met$Daily.Min - 273,Tmax=met$Daily.Max - 273,Precip=wat$Precipitation,PET=wat$Evaporation, NDVI=veg$NDVI)
dat$Tmean <- (dat$Tmin + dat$Tmax)/2
dat$Date <- as.Date(paste(dat$year,dat$month,dat$day,sep="-"))
# set values < 0 to NA (normally -999, also -1272 after adjusting for Kelvin)
dat[(dat <= -999)] <- NA
return(dat)
}
# impute all data from africa drought monitor to fill in NAs
imputeAll2 <- function(climatedata) {
dat <- climatedata
t1 <- smoothNA(climatedata$Precip)
dat$Precip <- t1[,1]
dat$PrecipNA <- as.logical(t1[,2])
t2 <- smoothNA(climatedata$Tmin)
dat$Tmin <- t2[,1]
dat$TminNA <- as.logical(t2[,2])
t3 <- smoothNA(climatedata$Tmax)
dat$Tmax <- t3[,1]
dat$TmaxNA <- as.logical(t3[,2])
t4 <- smoothNA(climatedata$PET)
dat$PET <- t4[,1]
dat$PETNA <- as.logical(t4[,2])
t5 <- smoothNA(climatedata$Tmean)
dat$Tmean <- t5[,1]
dat$TmeanNA <- as.logical(t5[,2])
t6 <- smoothNA(climatedata$NDVI)
dat$NDVI <- t6[,1]
dat$NDVINA <- as.logical(t6[,2])
return(dat)
}
|
8945439e09da7c5c0a433a5d70a1f8735df397d1 | 6c079037dd6157e0a8c5f978179c9db5405817ce | /R/getData.R | 974028937fdae1765720cf0be430058320fa5451 | [] | no_license | greuell/dischargeValidation | e377dee541fc1bb5d6838d533d7f7dcb9ae247aa | ede3643a755d21ec28ec01a269d9174b6f4dd05f | refs/heads/master | 2020-04-27T00:21:39.265397 | 2019-09-06T11:42:48 | 2019-09-06T11:42:48 | 173,931,508 | 0 | 0 | null | 2019-03-05T11:09:51 | 2019-03-05T11:09:51 | null | UTF-8 | R | false | false | 4,418 | r | getData.R | #' Get validation data object for further analysis
#'
#' @param obsFile Path to the observation file.
#' @param simFiles Path to the simulation file(s). This can also be an empty vector.
#' @param locations List of station locations containing lon/lat values in a vector.
#' @param simOrigins Date origin of the simulation(s). A value per simulation file is required. Defaults to VIC origin.
#' @param simVars Variable name of the simulation(s) to retreive. A value per simulation file is required. Defaults to VIC discharge variable.
#' @param simSkips Number of months to skip for the simulation(s). A value per simulation file is required. Defaults to 0.
#' @param obsOrigin Date origin of the observations.
#' @param obsVar Variable name of the observations to retreive.
#' @param attVars Variable name of attribute(s) to retrieve. If "all" is used, all attributes are retrieved. A variable is recognized as an attribute if it has only lon & lat dimensions (characters can have three dimensions to form strings).
#'
#' @return An object containing values for nloc, nsim, ntime, time, observations, simulations and the requested attributes
#' @export
#'
#' @import ncdf4
#' @examples
getData <- function(obsFile,
simFiles,
locations,
simOrigins = rep("0000-12-30", length(simFiles)),
simVars = rep("OUT_DISCHARGE", length(simFiles)),
simSkips = rep(0, length(simFiles)),
obsOrigin = "1900-01-01",
obsVar = "dis",
attVars = "all")
{
# Set time
time = getTime(obsFile = obsFile,
simFiles = simFiles,
simOrigins = simOrigins,
simSkips = simSkips,
obsOrigin = obsOrigin)
# Set attributes
if(attVars == "all"){
attVars = c()
nc = nc_open(obsFile)
for(var in nc$var){
if((var$prec == "char" && var$ndims == 3) &&
var$dim[[2]]$name == nc$dim$lon$name &&
var$dim[[3]]$name == nc$dim$lat$name){
attVars = c(attVars, var$name)
} else if(var$ndims == 2 &&
var$dim[[1]]$name == nc$dim$lon$name &&
var$dim[[2]]$name == nc$dim$lat$name){
attVars = c(attVars, var$name)
}
}
nc_close(nc)
}
nloc = length(locations)
nsim = length(simFiles)
ntime = length(time)
natt = length(attVars)
observations = array(data = NA, dim = c(nloc, ntime))
simulations = array(data = NA, dim = c(nloc, nsim, ntime))
attributes.array = array(data = NA, dim = c(nloc, natt))
# Set values
for (iLoc in 1:nloc) {
location = locations[[iLoc]]
print(paste0("Location: ",
location[1], " N ", location[2], " E ",
"(", iLoc, " of ", length(locations), ")"))
# Load observation data
observations[iLoc,1:ntime] = getValues(file = obsFile,
location = location,
variable = obsVar,
origin = obsOrigin,
time = time)
for (iSim in 1:nsim) {
simFile = simFiles[iSim]
simOrigin = simOrigins[iSim]
simVar = simVars[iSim]
# Load simulation data
simulations[iLoc,iSim,1:ntime] = getValues(file = simFile,
location = location,
variable = simVar,
origin = simOrigin,
time = time)
}
# Load attribute data
attributes.array[iLoc,1:natt] = getAttributes(file = obsFile,
location = location,
variable = attVars)
}
attributes = list()
for(iAtt in 1:natt){
attVar = attVars[iAtt]
attributes[[attVar]] = array(data = attributes.array[,iAtt], dim = c(nloc))
}
names(attributes) = attVars
## Create object
datum = list(nloc = nloc,
ntime = ntime,
nsim = nsim,
time = time,
observations = observations,
simulations = simulations)
datum = c(datum, attributes)
class(datum) = validationDataClass()
return(datum)
}
|
9ee4df3ac9f85d51cd5c236737c9efad1bbe69b4 | 76cf478e98829d8329fd4f6f66197352eae04c96 | /k-means-centroid-and-methods.R | 5c986731a7e35356825ea51cc0a50526f0ae580a | [
"MIT"
] | permissive | paulinelemenkova/R-8-k-means-Cluster-Analysis | e87f97bbadc99d980d142f6989f82c17dfc79126 | 5bbd0587ae2e7a7149e9f80aebd8b6ab55f8c210 | refs/heads/master | 2020-03-19T09:31:37.855100 | 2019-06-17T13:07:13 | 2019-06-17T13:07:13 | 136,296,542 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,171 | r | k-means-centroid-and-methods.R | # libraries: 'factoextra', 'FactoMiner' ‘zip’, ‘openxlsx’, ‘carData’, ‘pbkrtest’, ‘rio’, ‘car’, ‘flashClust’, ‘leaps’, ‘scatterplot3d’, ‘FactoMineR’, ‘ca’, ‘igraph’
# ЧАСТЬ 1: делаем data.frame с геоморфологией
# шаг-1. загружаем таблицу, делаем датафрейм
MorDF <- read.csv("Morphology.csv", header=TRUE, sep = ",")
head(MorDF)
summary(MorDF)
# ЧАСТЬ 2: рисуем центроид по двум главным дискриминантам функции
#Centroid Plot against 1st 2 discriminant functions
# шаг-2. рисуем центроид по двум главным дискриминантам функции
set.seed(518) # размер выборки. здесь: 518 точек в каждом профиле
# 3 центра
fit3 <- kmeans(MorDF, 3)
# 4 центра
fit4 <- kmeans(MorDF, 4)
# 5 центров
fit5 <- kmeans(MorDF, 5)
# 6 центров
fit6 <- kmeans(MorDF, 6)
fit2 <- clusplot(MorDF, fit4$cluster, color=TRUE, shade=TRUE,
labels=2, lines=0) # визуализируем (выводим на плот) fit4
fit3 <- clusplot(MorDF, fit5$cluster, color=TRUE, shade=TRUE,
labels=2, lines=0) # визуализируем fit5
fit7 <- clusplot(MorDF, fit6$cluster, color=TRUE, shade=TRUE,
labels=2, lines=0) # визуализируем fit3
fit8 <- clusplot(MorDF, fit3$cluster, color=TRUE, shade=TRUE,
labels=2, lines=0)
# ЧАСТЬ 3: выбираем лучший метод кластеризации
# comparing 2 cluster solutions
# шаг-3. рисуем центроид по двум главным дискриминантам функции
library(mclust)
fit <- Mclust(MorDF)
plot(fit)
# plot results выдает 4 разных типа графиков: см. картинку "methods-model-fitting.jpg".
# картинки можно сохранять как pdf. здесь получились: Mclust-uncertainty.pdf, Mclust-BIC.pdf, Mclust-density.pdf, Mclust-classification.pdf
summary(fit) # display the best model. здесь: BIC - лучший метод.
|
f641e59a13518db63883215bb6e115626051c5bf | 01f997b8df7cd8979004b1bea39a8791614f0522 | /1-data-prep/b-length-data/0-functions.R | 18a9dd3bea9801b1af03ca80d0772846d1cffba0 | [
"MIT"
] | permissive | bstaton1/esc-qual-ms-analysis | 4c8d7882b7247a47451c594ebcc8a0fbc21c8332 | d01e771ffd66588f20ed17bd871126ef310eb5e5 | refs/heads/master | 2023-04-07T13:27:09.672295 | 2021-05-17T04:02:57 | 2021-05-17T04:02:57 | 180,231,211 | 0 | 0 | MIT | 2021-05-17T04:02:16 | 2019-04-08T20:53:32 | R | UTF-8 | R | false | false | 4,593 | r | 0-functions.R |
##### CREATE STRATA KEY FROM A RANGE #####
create_strata_key = function(min_x, max_x, len) {
# number of total x elements (e.g., days)
n_x = length(min_x:max_x)
# the starting and ending days of each stratum
s_start = seq(min_x, max_x, len)
s_end = c(seq(min_x + len, max_x, len), max_x)
# number of strata
n_s = length(s_start)
# build the key
strata_key = data.frame(x = min_x:max_x)
strata_key$stratum = NULL
for (i in 1:n_x) {
for (j in 1:n_s) {
if (strata_key[i,"x"] >= s_start[j] & strata_key[i,"x"] < s_end[j]) strata_key[i,"stratum"] = j
}
}
strata_key[n_x,"stratum"] = n_s
strata_key
}
##### PREPARE THE RAW ESCAPEMENT FILES #####
esc_data_prep = function(dat) {
# reformat dates
colnames(dat)[1] = "date"
dat$date = paste(dat$date, dat$year, sep = "/")
# big data manipulation
dat %>%
# remove other species and keep only years in range
filter(species == "Chinook" & year %in% all_years) %>%
# create the doy variable
group_by(year) %>%
mutate(doy = date2doy(date)) %>%
ungroup %>%
# get a total passage estimate, estimates + observed
group_by(year, doy) %>%
summarise(count = sum(count, na.rm = T), .groups = "drop")
}
##### PREPARE THE RAW ASL FILES #####
asl_data_prep = function(dat) {
colnames(dat)[1] = "date"
dat %>%
# remove other species and keep only years in range
filter(species == "Chinook" & year %in% all_years) %>%
# create a day of the year variable
group_by(year) %>%
mutate(doy = date2doy(date)) %>%
ungroup %>%
# remove fish that weren't aged or sexed successfully
filter(!is.na(fw_age) & !is.na(sw_age) & sex %in% c("male", "female") & !is.na(length)) %>%
# at a total age variable
mutate(age = fw_age + sw_age + 1) %>%
# keep only the variables we are interested in
select(year, doy, age, sex, length) %>%
# keep only ages we are interested in
filter(age %in% 4:7)
}
##### PREPARE THE PREP-ED ESCAPEMENT DATA #####
esc_data_prep2 = function(esc) {
# calculates counts by year and strata
esc = esc %>%
group_by(year, stratum) %>%
summarise(count = sum(count), .groups = "drop") %>%
dcast(year ~ stratum, value.var = "count")
esc[is.na(esc)] = 0
esc
}
##### PREPARE THE PREP-ED ASL DATA #####
asl_data_prep2 = function(asl) {
# create an age/sex variable
asl$age_sex = paste(substr(asl$sex, 1, 1), asl$age, sep = "")
# calculate age and sex composition by year and stratum
mean_length = asl %>%
group_by(year, stratum, age_sex) %>%
summarize(mean_length = mean(length), .groups = "drop") %>%
dcast(year + stratum ~ age_sex, value.var = "mean_length")
# calculate number of fish aged/sexed/lengthed successfully
n = asl %>% group_by(year, stratum) %>% summarize(n = n(), .groups = "drop")
merge(mean_length, n, by = c("year", "stratum"), all = T)
}
##### PERFORM THE TEMPORAL WEIGHTED AVERAGE #####
get_wt_avg = function(yr, asl, esc) {
if (yr %in% esc$year & yr %in% asl$year) {
asl_strata = filter(asl, year == yr) %>%
select(stratum) %>%
unlist %>% unname %>%
unique
n_tot = asl %>% filter(year == yr) %>%
select(n) %>% unlist %>% unname %>% sum(na.rm = T)
esc_j = filter(esc, year == yr) %>%
select(-year) %>% select(asl_strata) %>% unlist
mean_length_j = filter(asl, year == yr) %>%
select(-year) %>% select(-n) %>%
melt(id.vars = "stratum", variable.name = "age_sex", value.name = "mean_length") %>%
dcast(age_sex ~ stratum, value.var = "mean_length") %>% select(-age_sex)
x = mean_length_j[1,]
out = apply(mean_length_j, 1, function(x) {
discard_j = which(is.na(x))
tmp_esc_j = esc_j
tmp_esc_j[discard_j] = 0
pi_j = tmp_esc_j/sum(tmp_esc_j)
sum(x * pi_j, na.rm = T)
})
out = c(out, n_tot)
out[out == 0] = NA
} else {
out = c(rep(NA, 8), 0)
}
out
}
interp = function(x) {
# if first or last row is missing, have separate rule
if (is.na(x[1,2])) {
x[1,2] = mean(x[1:10,2], na.rm = T)
}
if (is.na(x[nrow(x),2])) {
x[nrow(x),2] = mean(x[(nrow(x) - 5):nrow(x),2], na.rm = T)
}
obs_y = x[!is.na(x[,2]),"year"]
nobs_y = x[is.na(x[,2]),"year"]
all_y = x[,"year"]
x = x[,-1]
for (o in 1:length(nobs_y)) {
fiy = max(obs_y[obs_y < nobs_y[o]])
liy = min(obs_y[obs_y > nobs_y[o]])
pred = approx(x = c(fiy, liy), y = c(x[all_y %in% c(fiy, liy)]), xout = nobs_y[o])$y
x[all_y == nobs_y[o]] = pred
}
x
}
|
d37c8ad0385a03ebdd6a33d5ab2781d6bdeb8440 | 0d35749c4c44b101afc124d26380574d650fec3a | /man/grattan_darkgrey1.Rd | 40a6da4785d57f0dbd2384d1787e585847f94592 | [
"MIT"
] | permissive | MattCowgill/grattantheme | 3b27ce0488907d46dc52eff65622aef0235d4965 | 0b1dfac4e19a38c8894c0556cc1ebd3f1ee991de | refs/heads/master | 2023-01-09T16:58:04.454028 | 2022-12-29T22:15:58 | 2022-12-29T22:15:58 | 351,668,681 | 0 | 0 | NOASSERTION | 2021-03-26T05:08:54 | 2021-03-26T05:08:54 | null | UTF-8 | R | false | true | 304 | rd | grattan_darkgrey1.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_grattan_colours.R
\docType{data}
\name{grattan_darkgrey1}
\alias{grattan_darkgrey1}
\title{'}
\format{
An object of class \code{character} of length 1.
}
\usage{
grattan_darkgrey1
}
\description{
'
}
\keyword{datasets}
|
02727afda575c42e6fd25c6699113191475a85b5 | ca1776015f5f67178a0a1e36669662bc02ab4e57 | /caribbean_fish_benthic.R | 92577f8bfb7707ad9d0ee1219cc31215b8cd909e | [] | no_license | fishymary/caribbean_fish_benthic | f7ad8b2683f1d473ffa37932f348a2ff1f96d8a3 | 8fb22f6afde693d5fb8c824d1b5f960c4510018d | refs/heads/master | 2020-04-20T09:47:53.567421 | 2019-02-05T06:15:16 | 2019-02-05T06:15:16 | 168,774,306 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 74,342 | r | caribbean_fish_benthic.R | # -----------------------------------------------------------------------
# FISH FUNCTIONAL GROUPS & BENTHIC COVER IN THE CARIBBEAN
# -----------------------------------------------------------------------
rm(list=ls())
Start <- Sys.time()
# initialization ----------------------------------------------------------
library(dplyr) # v.0.7.6
library(tidyr) # v.0.8.0
library(lme4) # v.1.1.13
library(merTools) # v.0.3.0
library(mgcv) # v.1.8-23
library(MuMIn) # v.1.15.6
library(car) # v.2.1.5
library(vegan) # v.2.4-4
# data --------------------------------------------------------------------
fish <- read.csv('data/GCRMN_fish_4dryad.csv')
benthic <- read.csv('data/GCRMN_benthic_4dryad.csv')
# subset fish species
spp.incl <- read.csv("data/spp_include.csv")
fish.use <- left_join(fish, spp.incl, by="Species")
fish.use$bio_use <- ifelse(fish.use$use4bio==1,fish.use$biomass_g_m2,0)
colnames(fish.use)
# fix a loc name
fish.use$Location[fish.use$Location=="Bahamas_Exuma"] <- "Bahamas_Other"
# depth hist ---------------------------------------------------------------
png(file='outputs/SOM_depth.png',height=1500,width=1800,res=300)
temp <- subset(fish.use, Depth_m > 0)
hist(temp$Depth_m,main='',xlab='Depth (meters)',col='lightgrey')
dev.off()
# calculate total biomass -------------------------------------------------
tot.bio <- fish.use %>% group_by(DatasetID,Replicate,Location,Year,Habitat) %>% summarise("tot.bio"=sum(bio_use,na.rm=T)) %>% ungroup()
tot.bio.m <- tot.bio
str(tot.bio.m)
tot.bio.m$DatasetID <- as.factor(tot.bio.m$DatasetID)
tot.bio.m$Location <- as.character(tot.bio.m$Location); tot.bio.m$Location <- as.factor(tot.bio.m$Location)
hist(log(tot.bio.m$tot.bio+1)); summary(tot.bio.m$tot.bio)
plot(tot.bio.m$Location,tot.bio.m$tot.bio)
totbio.mod.l <- lmer(log(tot.bio+1) ~ Location + (1|DatasetID), data=tot.bio.m)
summary(totbio.mod.l); plot(residuals(totbio.mod.l)~fitted(totbio.mod.l))
hist(residuals(totbio.mod.l))
newdat <- data.frame(expand.grid(Location=unique(tot.bio.m$Location),DatasetID=11,Country="USVI"))
totbio.mod.pred1 <- predict(totbio.mod.l,newdata=newdat); totbio.mod.pred1 <- exp(totbio.mod.pred1)+1
totbio.mod.pred1 <- data.frame(Location=newdat$Location,tot.bio=totbio.mod.pred1)
totbio.mod.pred1 <- totbio.mod.pred1[with(totbio.mod.pred1, order(tot.bio)),]
totbio.mod.pred <- totbio.mod.pred1
totbio.mod.pred[totbio.mod.pred$tot.bio == min(totbio.mod.pred$tot.bio),]
totbio.mod.pred[totbio.mod.pred$tot.bio == max(totbio.mod.pred$tot.bio),]
totbio.mod.CI <- predictInterval(totbio.mod.l, newdata = newdat, n.sims = 10000, level=0.05)
totbio.mod.CI <- exp(totbio.mod.CI)-1
totbio.mod.CI <- data.frame(Location=newdat$Location,tot.bio=totbio.mod.CI[,1],tot.up=totbio.mod.CI[,2],tot.down=totbio.mod.CI[,3])
totbio.mod.CI[totbio.mod.CI$tot.bio == min(totbio.mod.CI$tot.bio),]
totbio.mod.CI[totbio.mod.CI$tot.bio == max(totbio.mod.CI$tot.bio),]
totbio.mod.CI[totbio.mod.CI$tot.bio == max(totbio.mod.CI$tot.bio),'tot.bio']/totbio.mod.CI[totbio.mod.CI$tot.bio == min(totbio.mod.CI$tot.bio),'tot.bio']
# calculate biomass by trophic level -------------------------------------------------
temp <- fish.use; temp$bio_use[is.na(temp$Trophic)] <- 0
trophic.bio.raw <- temp %>%
group_by(DatasetID,Replicate,Location,Year,Trophic,Habitat) %>%
summarise("sum"=sum(bio_use,na.rm=T)) %>%
spread(key=Trophic,value=sum) %>%
ungroup()
trophic.bio.raw.m <- trophic.bio.raw
trophic.bio.raw.m[is.na(trophic.bio.raw.m)] <- 0
trophic.bio.raw.m$DatasetID <- as.factor(trophic.bio.raw.m$DatasetID)
trophic.bio.raw.m$Location <- as.character(trophic.bio.raw.m$Location)
trophic.loc.mod <- data.frame(Location=unique(trophic.bio.raw.m$Location),BR=NA,GD=NA,P=NA,SC=NA,SE=NA)
trophic.loc.modCI <- data.frame(Location=unique(trophic.bio.raw.m$Location))
for(k in c(2:6)){
temp <- trophic.bio.raw.m
colnames(temp)[k+4] <- "resp"
tb.mod.l <- lmer(log(resp+1) ~ Location + (1|DatasetID) , data=temp)
plot(residuals(tb.mod.l)~fitted(tb.mod.l))
newdat <- data.frame(expand.grid(Location=unique(temp$Location),DatasetID=11,Country="USVI"))
# tb.mod.pred <- predict(tb.mod.l,newdata=newdat)
# tb.mod.pred <- exp(tb.mod.pred) - 1
# trophic.loc.mod[k] <- tb.mod.pred
trophic.CI <- predictInterval(tb.mod.l, newdata = newdat, n.sims = 10000, level=0.05)
trophic.CI <- exp(trophic.CI)-1
trophic.loc.mod[k] <- trophic.CI$fit
trophic.loc.modCI <- cbind(trophic.loc.modCI,trophic.CI)
}
trophic.loc.mod
colnames(trophic.loc.modCI) <- c("Location","BR.fit","BR.up","BR.down","GD.fit","GD.up","GD.down","P.fit","P.up","P.down","SC.fit","SC.up","SC.down","SE.fit","SE.up","SE.down")
str(trophic.loc.modCI)
trophic.loc.modCI[trophic.loc.modCI$P.fit == max(trophic.loc.modCI$P.fit),'P.fit']/trophic.loc.modCI[trophic.loc.modCI$P.fit == min(trophic.loc.modCI$P.fit),'P.fit']
# barplot -----------------------------------------------------------------
labels.loc <- data.frame(
Location = as.factor(c("Jamaic_NorthCentral", "Mexico_SouthEastYucatan",
"Jamaic_MontegoBay", "Jamaic_West",
"Belize_GulfHonduras", "PuertoRico_Turrumote",
"Belize_SouthernBarrier", "Mexico_NorthEastYucatan",
"DominicanRepublic_North", "Belize_AtollLeeward",
"DominicanRepublic_South", "PuertoRico_LaPaguera",
"Mexico_ChinchorroBank", "USVI_StJohn", "PuertoRico_JobosBay",
"StVincentGrenadines_Grenadines",
"Belize_CentralBarrier", "Colombia_SanAndreas", "Panama_SanBlas",
"Guadaloupe_Guadalpupe", "TurksCaicos_TurksCaicos",
"StBarthelemy_StBarthelemy",
"StKittsNevis_StKittsNevis", "Cuba_North", "Belize_AtollWindward",
"Cuba_Southwest", "Honduras_BayIslands",
"Belize_InnerBarrier", "PuertoRico_Vieques_Culebra", "USVI_StThomas",
"BVI_BVI", "CaymanIslands_LittleandBrac",
"CaymanIslands_GrandCayman", "Martinique_Martinique",
"Curacao_NorthWest", "Colombia_Providencia",
"Bahamas_Other", "AandB_Antigua", "StEustatius_StEustatius",
"Bahamas_CaySalBank", "Honduras_NearShore",
"Cuba_JardinesdelaReina", "Mexico_Cozumel_Leeward",
"Bahamas_Remote", "Bahamas_Nassau", "Bahamas_Andros",
"AandB_Barbuda", "Florida_LowerKeys", "Bahamas_Exuma",
"Florida_UpperKeys", "Florida_MiddleKeys")),
Label = as.factor(c("Jamica NC", "Mexico SE Yucatan", "Jamaica MB",
"Jamaica W", "Belize Gulf Honduras",
"Puerto Rico Turrumote", "Belize S Barrier",
"Mexico NE Yucatan", "DR North", "Belize Atoll Leeward", "DR South",
"Puerto Rico La Paguera", "Mexico Chinchorro",
"USVI St John", "Puerto Rico Jobos Bay", "Grenadines",
"Belize C Barrier", "Colombia San Andres",
"Panama San Blas", "Guadeloupe", "Turks & Caicos",
"St Barthelemy", "St Kitts & Nevis", "Cuba North",
"Belize Atoll Windward", "Cuba Southeast",
"Honduras Bay Islands", "Belize Inner Barrier",
"Puerto Rico Vieques", "USVI St. Thomas", "BVI", "Little Cayman",
"Grand Cayman", "Martinique", "Curacao Northwest",
"Colombia Providencia", "Bahamas other", "Antigua",
"St. Eustatius", "Cay Sal Bank", "Honduras Nearshore",
"Cuba Jardines", "Mexico Cozumel",
"Bahamas Remote", "Bahamas Nassau", "Bahamas Andros", "Barbuda",
"Florida Lower Keys", "Bahamas Exuma",
"Florida Upper Keys", "Florida Middle Keys"))
)
png(file='outputs/Fig2.png',height = 1500,width=3000,res=300)
par(mar=c(7,4,2,1),mgp=c(1.6,.7,0))
temp <- trophic.loc.mod
temp <- left_join(temp, labels.loc)
temp$sum <- rowSums(temp[2:6])
temp <- temp[with(temp, order(sum)),]
col.vec <- c("aquamarine4","aquamarine","lightgreen","#f2a54f","deepskyblue1") #red was "#e0606e"
temp$order <- seq(1:nrow(temp))
b <- barplot(t(temp[c(6,2,3,5,4)]),ylim=c(0,75),xaxt="n",col=col.vec,cex.axis=1.3
,ylab=expression("Biomass"~~bgroup("(","g "*m^{-2},")"))
,cex.lab=1.2)
text(x=b+.7, y=-3, temp$Label, xpd=TRUE, srt=35, pos=2,cex=0.8)
legend("topleft",legend=c("Predators","Sec. Consumers","Grazers","Browsers","Scrapers"),bty="n",pt.bg=rev(col.vec),pch=22,pt.cex=2)
rows <- temp[c(temp$Location=="Belize_GulfHonduras"|temp$Location=="Belize_SouthernBarrier"|temp$Location=="CaymanIslands_GrandCayman"|temp$Location=="CaymanIslands_LittleandBrac"|temp$Location== "Florida_LowerKeys"|temp$Location== "Florida_UpperKeys"|temp$Location== "Florida_MiddleKeys"|temp$Location=="Mexico_Cozumel_Leeward"),]
points(b[rows$order],temp$sum[rows$order]+2,pch=19,cex=1)
rows <- temp[c(temp$Location=="Bahamas_Remote"|temp$Location=="AandB_Antigua"|temp$Location=="AandB_Barbuda"|temp$Location=="Bahamas_Exuma"|temp$Location=="Bahamas_Other"|temp$Location=="Belize_AtollLeeward"|temp$Location=="Belize_InnerBarrier"|temp$Location=="Cuba_JardinesdelaReina"|temp$Location=="Cuba_Southwest"|temp$Location=="Curacao_NorthWest"|temp$Location=="Guadaloupe_Guadalpupe"|temp$Location=="Jamaic_MontegoBay"|temp$Location=="Jamaic_West"|temp$Location=="PuertoRico_Vieques_Culebra"|temp$Location=="TurksCaicos_TurksCaicos"|temp$Location=="USVI_StJohn"|temp$Location=="USVI_StThomas"),]
points(b[rows$order],temp$sum[rows$order]+2,pch=5,cex=1)
dev.off()
temp <- trophic.loc.modCI
temp <- left_join(temp, labels.loc)
temp$sum <- rowSums(temp[c('BR.fit','GD.fit','P.fit','SC.fit','SE.fit')])
temp <- temp[with(temp, order(sum)),]
write.csv(temp, 'outputs/SOM5.csv',row.names=F)
temp[temp$P.fit == min(temp$P.fit),]
temp[temp$P.fit == max(temp$P.fit),]
temp$P.fit[temp$P.fit == max(temp$P.fit)]/temp$P.fit[temp$P.fit == min(temp$P.fit)]
temp[temp$SC.fit == min(temp$SC.fit),]
temp[temp$SC.fit == max(temp$SC.fit),]
temp[temp$BR.fit == min(temp$BR.fit),]
temp[temp$BR.fit == max(temp$BR.fit),]
temp[temp$GD.fit == min(temp$GD.fit),]
temp[temp$GD.fit == max(temp$GD.fit),]
temp[temp$SE.fit == min(temp$SE.fit),]
temp[temp$SE.fit == max(temp$SE.fit),]
# correlations among fish -------------------------------------------------
panel.hist <- function(x, ...)
{ usr <- par("usr"); on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5) )
h <- hist(x, plot = FALSE)
breaks <- h$breaks; nB <- length(breaks)
y <- h$counts; y <- y/max(y)
rect(breaks[-nB], 0, breaks[-1], y, col="cyan", ...)
}
panel.blank <- function(x, y)
{ }
panel.cor <- function(x, y, digits=2, prefix="", cex.cor)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- abs(cor(x, y))
txt <- format(c(r, 0.123456789), digits=digits)[1]
txt <- paste(prefix, txt, sep="")
if(missing(cex.cor)) cex <- 0.6/strwidth(txt)
#text(0.5, 0.5, txt, cex = cex * r)
text(0.5, 0.5, txt, cex = cex)
}
pairs(trophic.loc.mod[c(2:6)], lower.panel=panel.smooth, upper.panel=panel.cor, diag.panel=panel.hist)
temp <- left_join(totbio.mod.CI,trophic.loc.modCI,by="Location")
head(temp)
png(file=file.path(getwd(),'outputs',"SOM8.png"),height=3000,width=3200,res=300)
par(mfrow=c(5,5),mar=c(1,1,1,1),oma=c(4,4,0,0),mgp=c(1.6,.7,0),xpd=T)
x <- temp$tot.bio; y <- temp$P.fit
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,labels=NA); axis(2,cex.axis=1.4)
if((cor.test(x,y))$p.value < 0.05){
text(0.1*max(x),0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
plot(c(1,1),c(1,1),type="n",xaxt="n",yaxt="n",bty="n",xlab="",ylab="")
plot(c(1,1),c(1,1),type="n",xaxt="n",yaxt="n",bty="n",xlab="",ylab="")
plot(c(1,1),c(1,1),type="n",xaxt="n",yaxt="n",bty="n",xlab="",ylab="")
plot(c(1,1),c(1,1),type="n",xaxt="n",yaxt="n",bty="n",xlab="",ylab="")
x <- temp$tot.bio; y <- temp$SC.fit
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,labels=NA); axis(2,cex.axis=1.4)
if((cor.test(x,y))$p.value < 0.05){
text(0.1*max(x),0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
x <- temp$P.fit; y <- temp$SC.fit
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,labels=NA); axis(2,labels=NA)
if((cor.test(x,y))$p.value < 0.05){
text(0.1*max(x),0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
plot(c(1,1),c(1,1),type="n",xaxt="n",yaxt="n",bty="n",xlab="",ylab="")
plot(c(1,1),c(1,1),type="n",xaxt="n",yaxt="n",bty="n",xlab="",ylab="")
plot(c(1,1),c(1,1),type="n",xaxt="n",yaxt="n",bty="n",xlab="",ylab="")
x <- temp$tot.bio; y <- temp$GD.fit
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,labels=NA); axis(2,cex.axis=1.4)
if((cor.test(x,y))$p.value < 0.05){
text(0.1*max(x),0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
x <- temp$P.fit; y <- temp$GD.fit
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,labels=NA); axis(2,labels=NA)
if((cor.test(x,y))$p.value < 0.05){
text(0.1*max(x),0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
x <- temp$SC.fit; y <- temp$GD.fit
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,labels=NA); axis(2,labels=NA)
if((cor.test(x,y))$p.value < 0.05){
text(0.1*max(x),0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
plot(c(1,1),c(1,1),type="n",xaxt="n",yaxt="n",bty="n",xlab="",ylab="")
plot(c(1,1),c(1,1),type="n",xaxt="n",yaxt="n",bty="n",xlab="",ylab="")
x <- temp$tot.bio; y <- temp$BR.fit
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,labels=NA); axis(2,cex.axis=1.4)
if((cor.test(x,y))$p.value < 0.05){
text(0.1*max(x),0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
x <- temp$P.fit; y <- temp$BR.fit
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,labels=NA); axis(2,labels=NA)
if((cor.test(x,y))$p.value < 0.05){
text(0.1*max(x),0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
x <- temp$SC.fit; y <- temp$BR.fit
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,labels=NA); axis(2,labels=NA)
if((cor.test(x,y))$p.value < 0.05){
text(0.1*max(x),0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
x <- temp$GD.fit; y <- temp$BR.fit
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,labels=NA); axis(2,labels=NA)
if((cor.test(x,y))$p.value < 0.05){
text(0.1*max(x),0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
plot(c(1,1),c(1,1),type="n",xaxt="n",yaxt="n",bty="n",xlab="",ylab="")
x <- temp$tot.bio; y <- temp$SE.fit
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,cex.axis=1.4); axis(2,cex.axis=1.4)
if((cor.test(x,y))$p.value < 0.05){
text(0.1*max(x),0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
x <- temp$P.fit; y <- temp$SE.fit
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,cex.axis=1.4); axis(2,labels=NA)
if((cor.test(x,y))$p.value < 0.05){
text(0.1*max(x),0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
x <- temp$SC.fit; y <- temp$SE.fit
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,cex.axis=1.4); axis(2,labels=NA)
if((cor.test(x,y))$p.value < 0.05){
text(0.1*max(x),0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
x <- temp$GD.fit; y <- temp$SE.fit
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,cex.axis=1.4); axis(2,labels=NA)
if((cor.test(x,y))$p.value < 0.05){
text(0.1*max(x),0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
x <- temp$BR.fit; y <- temp$SE.fit
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,cex.axis=1.4); axis(2,labels=NA)
if((cor.test(x,y))$p.value < 0.05){
text(0.1*max(x),0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
mtext("Total",outer=T,side=1,at=0.1,line=1.3,cex=1.5)
mtext("Predators",outer=T,side=1,at=0.3,line=1.3,cex=1.5)
mtext("Sec. Consumers",outer=T,side=1,at=0.5,line=1.3,cex=1.5)
mtext("Grazers",outer=T,side=1,at=0.7,line=1.3,cex=1.5)
mtext("Browsers",outer=T,side=1,at=0.9,line=1.3,cex=1.5)
# mtext(expression("Biomass"~~bgroup("(","g"*m^{-2},")")),outer=T,side=1,line=3.5,cex=1.5)
mtext("Predators",outer=T,side=2,at=0.9,line=1.3,cex=1.5)
mtext("Sec. Consumers",outer=T,side=2,at=0.7,line=1.3,cex=1.5)
mtext("Grazers",outer=T,side=2,at=0.5,line=1.3,cex=1.5)
mtext("Browsers",outer=T,side=2,at=0.3,line=1.3,cex=1.5)
mtext("Scrapers",outer=T,side=2,at=0.1,line=1.3,cex=1.5)
# mtext(expression("Biomass"~~bgroup("(","g"*m^{-2},")")),outer=T,side=2,line=3.5,cex=1.5)
dev.off()
# summarise benthic cover -------------------------------------------------
# modeled
coral.m <- benthic
coral.m <- coral.m[!is.na(coral.m$TotalCoral),]
temp <- coral.m %>% group_by(Location) %>% summarise(nrow=length(TotalCoral)) %>% ungroup(); temp <- subset(temp, nrow > 5)
coral.m <- coral.m[coral.m$Location %in% temp$Location,]
coral.m$Location <- as.character(coral.m$Location); coral.m$Location <- as.factor(coral.m$Location)
coral.m$DatasetID <- as.factor(coral.m$DatasetID); coral.m$Year <- as.factor(coral.m$Year)
str(coral.m)
coral.mod <- lmer(sqrt(sqrt(TotalCoral)) ~ Location + (1|DatasetID) , data=coral.m)
summary(coral.mod); plot(residuals(coral.mod)~fitted(coral.mod))
newdat <- data.frame(expand.grid(Location=levels(coral.m$Location),DatasetID=16))
coral.mod.pred <- predict(coral.mod,newdata=newdat); coral.mod.pred <- (coral.mod.pred)^4
coral.mod.pred <- data.frame(Location=newdat$Location,coral=coral.mod.pred)
coral.mod.pred <- coral.mod.pred[with(coral.mod.pred, order(coral)),]
coral.mod.pred
coral.mod.CI <- predictInterval(coral.mod, newdata = newdat, n.sims = 10000, level=0.05)
coral.mod.CI <- (coral.mod.CI)^4
coral.mod.CI <- data.frame(Location=newdat$Location,coral=coral.mod.CI[,1],coral.up=coral.mod.CI[,2],coral.down=coral.mod.CI[,3])
coral.mod.CI[coral.mod.CI$coral == min(coral.mod.CI$coral),]
coral.mod.CI[coral.mod.CI$coral == max(coral.mod.CI$coral),]
macro.m <- benthic
macro.m <- macro.m[!is.na(macro.m$TotalMacro),]
macro.m$DatasetID <- as.factor(macro.m$DatasetID); macro.m$Year <- as.factor(macro.m$Year)
macro.m$Location <- as.character(macro.m$Location); macro.m$Location <- as.factor(macro.m$Location)
macro.mod <- lmer(sqrt(sqrt(TotalMacro)) ~ Location + (1|DatasetID) , data=macro.m)
summary(macro.mod); plot(residuals(macro.mod)~fitted(macro.mod))
newdat <- data.frame(expand.grid(Location=unique(macro.m$Location),DatasetID=10))
macro.mod.pred <- predict(macro.mod,newdata=newdat); macro.mod.pred <- (macro.mod.pred)^4
macro.mod.pred <- data.frame(Location=newdat$Location,macro=macro.mod.pred)
macro.mod.pred <- macro.mod.pred[with(macro.mod.pred, order(macro)),]
macro.mod.pred
macro.mod.CI <- predictInterval(macro.mod, newdata = newdat, n.sims = 10000, level=0.05)
macro.mod.CI <- (macro.mod.CI)^4
macro.mod.CI <- data.frame(Location=newdat$Location,macro=macro.mod.CI[,1],macro.up=macro.mod.CI[,2],macro.down=macro.mod.CI[,3])
macro.mod.CI[macro.mod.CI$macro == min(macro.mod.CI$macro),]
macro.mod.CI[macro.mod.CI$macro == max(macro.mod.CI$macro),]
temp <- left_join(totbio.mod.CI, coral.mod.CI, by="Location")
temp <- left_join(temp, macro.mod.CI, by="Location")
temp <- left_join(temp, labels.loc)
write.csv(temp, file.path(getwd(),'outputs',"SOM4.csv"),row.names=F)
# total bio v benthic -----------------------------------------------------
totbio.pred.ben <- left_join(totbio.mod.CI, coral.mod.CI, by="Location")
totbio.pred.ben <- left_join(totbio.pred.ben, macro.mod.CI, by="Location")
plot(totbio.pred.ben$tot.bio,totbio.pred.ben$coral)
plot(totbio.pred.ben$tot.bio,totbio.pred.ben$macro)
totbio.pred.ben$cm <- log(totbio.pred.ben$coral/totbio.pred.ben$macro)
# join trophic and benthic ------------------------------------------------
trophic.benthic.m <- left_join(trophic.loc.mod, coral.mod.CI, by="Location")
trophic.benthic.m <- left_join(trophic.benthic.m, macro.mod.CI, by="Location")
str(trophic.benthic.m)
pairs(trophic.benthic.m[c(2:7,10)], lower.panel=panel.smooth, upper.panel=panel.cor, diag.panel=panel.hist)
trophic.benthic.m$cm <- log(trophic.benthic.m$coral/trophic.benthic.m$macro)
# panel fig - trophic biomass -------------------------------------------------------------
plot.fish <- function(dat, x, y, i, metric){ #i is what row of mod.out to fill in
temp <- dat[!is.na(dat[,y]),]
tp <- lm(temp[,y]~temp[,x])
cook <- cooks.distance(tp)
temp.sub <- temp[!cook >= 0.50, ]
tp <- lm(temp.sub[, y] ~ temp.sub[, x])
tpg <- gam(temp.sub[, y] ~ s(temp.sub[, x], k = 3), family = gaussian)
m.sel <- model.sel(tp, tpg)
if(ncvTest(tp)$p <= 0.05){print('Non-constant Error')}
if(metric=='biomass'){
ylim.go <- c(0,60); if(c(i > 6 & i < 13)){ylim.go <- c(0,80)}; if(i > 12){ylim.go <- c(-2.2,2.2)}
xlim.go <- c(0,85); if(i==2|i==8|i==14){xlim.go <- c(0,25)}; if(i==3|i==9|i==15){xlim.go <- c(0,36)}
if(i==4|i==10|i==16){xlim.go <- c(0,8)}; if(i==5|i==11|i==17){xlim.go <- c(0,8.5)}; if(i==6|i==12|i==18){xlim.go <- c(0,20)}
}
if(metric=='size'){
ylim.go <- c(0,60); if(c(i > 6 & i < 13)){ylim.go <- c(0,80)}; if(i > 12){ylim.go <- c(-2.2,2.2)}
xlim.go <- c(5,30)
}
xaxt.go <- rep('n',18); if(i > 12){xaxt.go <- 's'}
yaxt.go <- rep('n',18); if(i==1|i==7|i==13){yaxt.go <- 's'}
if(m.sel$class[1]=='lm' & summary(tp)$coefficients[8] <= 0.05){
tp.pred <- predict(tp,se.fit=T)
tp.pred <- data.frame(pred= tp.pred$fit,se=tp.pred$se.fit, x=temp.sub[,x])
tp.pred <- tp.pred[with(tp.pred, order(x)),]
tp.pred$CIup <- tp.pred$pred + (1.96*tp.pred$se); tp.pred$CIdown <- tp.pred$pred - (1.96*tp.pred$se)
plot(temp.sub[,y]~temp.sub[,x],pch=21,col="black",bg="grey",bty="l",xlab="",ylab=""
,cex.axis=1.2,cex.lab=1.3,ylim=ylim.go,xlim=xlim.go,xaxt=xaxt.go,yaxt=yaxt.go,type="n")
# points(tp.pred$pred~tp.pred$x,type="l",lwd=2,lty=2)
axis(1,labels=NA)
axis(2,labels=NA)
polygon(c(tp.pred$x,rev(tp.pred$x)),c(tp.pred$CIup,rev(tp.pred$CIdown))
,col=rgb(140,140,140,100,max=255),border=NA)
points(tp.pred$pred~tp.pred$x,type="l",lwd=2)
points(temp[,y]~temp[,x],pch=21,col="black",bg="grey")
points(temp[cook >= 0.50,y]~temp[cook >= 0.50,x],pch=1,col="red",cex=2)
} else {
if(summary(tpg)$s.table[4] <= 0.05){
tpg.pred <- predict(tpg, se.fit=T)
tpg.pred <- data.frame(x=temp.sub[,x],y=tpg.pred$fit,up=tpg.pred$fit+1.96*tpg.pred$se.fit
,down=tpg.pred$fit-1.96*tpg.pred$se.fit)
tpg.pred <- tpg.pred[with(tpg.pred,order(x)),]
plot(temp.sub[,y]~temp.sub[,x],pch=21,col="black",bg="grey",bty="l",xlab="",ylab=""
,cex.axis=1.2,cex.lab=1.3,ylim=ylim.go,xlim=xlim.go,xaxt=xaxt.go,yaxt=yaxt.go,type="n")
axis(2,labels=NA)
axis(1,labels=NA)
polygon(c(tpg.pred$x,rev(tpg.pred$x)),c(tpg.pred$up,rev(tpg.pred$down)),col=rgb(169,169,169,150,max=255),border=NA)
points(tpg.pred$x,tpg.pred$y,type="l",col="black",lwd=2)
points(temp[,y]~temp[,x],pch=21,col="black",bg="grey")
points(temp[cook >= 0.50,y]~temp[cook >= 0.50,x],pch=1,col="red",cex=2)
} else {
plot(temp.sub[,y]~temp.sub[,x],pch=21,col="black",bg="grey",bty="l",xlab="",ylab=""
,cex.axis=1.2,cex.lab=1.3,ylim=ylim.go,xlim=xlim.go,xaxt=xaxt.go,yaxt=yaxt.go,type="n")
axis(2,labels=NA)
axis(1,labels=NA)
points(temp[,y]~temp[,x],pch=21,col="black",bg="grey")
}
}
}
png(file.path(getwd(),'outputs',"Fig3.png"),height=2000,width=3600,res=300)
par(mfrow=c(3,6),mar=c(1,1,1,1),oma=c(4,3,0,0),xpd=F,mgp=c(1.6,.7,0))
plot.fish(totbio.pred.ben,'tot.bio','coral',1,'biomass')
tp <- c('P','SC','GD','BR','SE')
for(z in 1:5){
plot.fish(trophic.benthic.m,x=tp[z],y='coral',i=z+1,'biomass')
}
plot.fish(totbio.pred.ben,'tot.bio','macro',7,'biomass')
for(z in 1:5){
plot.fish(trophic.benthic.m,x=tp[z],y='macro',i=z+7,'biomass')
}
plot.fish(totbio.pred.ben,'tot.bio','cm',13,'biomass')
for(z in 1:5){
plot.fish(trophic.benthic.m,x=tp[z],y='cm',i=z+13,'biomass')
}
mtext("Coral Cover (%)", outer=T, side=2, at=0.83,line=1)
mtext("Macroalgal Cover (%)", outer=T, side=2, at=0.5,line=1)
mtext("log(Coral/Macrogalae)", outer=T, side=2, at=0.175,line=1)
mtext("Total",outer=T,side=1,at=0.09,line=1)
mtext("Predators",outer=T,side=1,at=0.248,line=1)
mtext("Sec. Consumers",outer=T,side=1,at=0.416,line=1)
mtext("Grazers",outer=T,side=1,at=0.584,line=1)
mtext("Browsers",outer=T,side=1,at=0.752,line=1)
mtext("Scrapers",outer=T,side=1,at=0.92,line=1)
mtext(expression("Biomass"~~bgroup("(","g"*m^{-2},")")),outer=T,side=1,line=2.8)
dev.off()
mod.out <- data.frame(
y = as.character(c("coral", "coral", "coral", "coral", "coral",
"coral", "macro", "macro", "macro", "macro",
"macro", "macro", "cm", "cm", "cm", "cm", "cm", "cm")),
x = as.character(c("total.bio", "P", "SC", "GD", "BR", "SE",
"total.bio", "P", "SC", "GD", "BR", "SE",
"total.bio", "P", "SC", "GD", "BR", "SE")),
int = rep(NA,18),
int.se = rep(NA,18),
int.t = rep(NA,18),
int.p = rep(NA,18),
slp = rep(NA,18),
slp.se = rep(NA,18),
slp.t = rep(NA,18),
slp.p = rep(NA,18),
f = rep(NA,18),
df = rep(NA,18),
r2 = rep(NA,18), stringsAsFactors=F
)
for(i in c(1,7,13)){
y <- mod.out[i,'y']
x <- 'tot.bio'
dat <- totbio.pred.ben
temp <- dat[!is.na(dat[,y]),]
tp <- lm(temp[,y]~temp[,x])
cook <- cooks.distance(tp)
temp.sub <- temp[!cook >= 0.50, ]
tp <- lm(temp.sub[, y] ~ temp.sub[, x])
tpg <- gam(temp.sub[, y] ~ s(temp.sub[, x], k = 3), family = gaussian)
m.sel <- model.sel(tp, tpg)
if(m.sel$class[1]=='lm'){
mod.out[i,3] <- summary(tp)$coefficients[1]; mod.out[i,7] <- summary(tp)$coefficients[2]
mod.out[i,4] <- summary(tp)$coefficients[3]; mod.out[i,8] <- summary(tp)$coefficients[4]
mod.out[i,5] <- summary(tp)$coefficients[5]; mod.out[i,9] <- summary(tp)$coefficients[6]
mod.out[i,6] <- summary(tp)$coefficients[7]; mod.out[i,10] <- summary(tp)$coefficients[8]
mod.out[i,11] <- summary(tp)$fstatistic[1]
mod.out[i,12] <- summary(tp)$df[2]
mod.out[i,13] <- summary(tp)$r.squared
}
if(m.sel$class[1]=='gam'){
mod.out[i,3] <- summary(tpg)$p.table[1]; mod.out[i,4] <- summary(tpg)$p.table[2]
mod.out[i,5] <- summary(tpg)$p.table[3]; mod.out[i,6] <- summary(tpg)$p.table[4]
mod.out[i,7] <- summary(tpg)$s.table[1]; mod.out[i,8] <- summary(tpg)$s.table[2]
mod.out[i,10] <- summary(tpg)$s.table[4]; mod.out[i,11] <- summary(tpg)$s.table[3]
mod.out[i,12] <- summary(tpg)$n
mod.out[i,13] <- summary(tpg)$dev.expl
}
}
for(i in c(2:6,8:12,14:18)){
y <- mod.out[i,'y']
x <- mod.out[i,'x']
dat <- trophic.benthic.m
temp <- dat[!is.na(dat[,y]),]
tp <- lm(temp[,y]~temp[,x])
cook <- cooks.distance(tp)
temp.sub <- temp[!cook >= 0.50, ]
tp <- lm(temp.sub[, y] ~ temp.sub[, x])
tpg <- gam(temp.sub[, y] ~ s(temp.sub[, x], k = 3), family = gaussian)
m.sel <- model.sel(tp, tpg)
if(m.sel$class[1]=='lm'){
mod.out[i,3] <- summary(tp)$coefficients[1]; mod.out[i,7] <- summary(tp)$coefficients[2]
mod.out[i,4] <- summary(tp)$coefficients[3]; mod.out[i,8] <- summary(tp)$coefficients[4]
mod.out[i,5] <- summary(tp)$coefficients[5]; mod.out[i,9] <- summary(tp)$coefficients[6]
mod.out[i,6] <- summary(tp)$coefficients[7]; mod.out[i,10] <- summary(tp)$coefficients[8]
mod.out[i,11] <- summary(tp)$fstatistic[1]
mod.out[i,12] <- summary(tp)$df[2]
mod.out[i,13] <- summary(tp)$r.squared
}
if(m.sel$class[1]=='gam'){
mod.out[i,3] <- summary(tpg)$p.table[1]; mod.out[i,4] <- summary(tpg)$p.table[2]
mod.out[i,5] <- summary(tpg)$p.table[3]; mod.out[i,6] <- summary(tpg)$p.table[4]
mod.out[i,7] <- summary(tpg)$s.table[1]; mod.out[i,8] <- summary(tpg)$s.table[2]
mod.out[i,10] <- summary(tpg)$s.table[4]; mod.out[i,11] <- summary(tpg)$s.table[3]
mod.out[i,12] <- summary(tpg)$n
mod.out[i,13] <- summary(tpg)$dev.expl
}
}
write.csv(mod.out, 'outputs/Table1.csv',row.names=F)
# mean size ------------------------------
### need to replicate the lengths based on number column
size.bio <- fish.use %>% group_by(DatasetID,Replicate,Location,Year,Length) %>% summarise("num"=sum(Number,na.rm=T)) %>% ungroup()
size.bio <- size.bio[!is.na(size.bio$Length),]
size.bio$num[size.bio$num < 1 & size.bio$num > 0] <- 1
size.bio <- subset(size.bio, Length > 0)
size.out <- data.frame(DatasetID=NA,Location=NA,size=NA)
for(i in 1:nrow(size.bio)){
t <- rep(size.bio$Length[i],size.bio$num[i])
t <- data.frame(DatasetID=size.bio$DatasetID[i],Location=size.bio$Location[i],size=t)
size.out <- rbind(size.out,t)
}
str(size.out)
size.out <- size.out[2:nrow(size.out),]
size.out$DatasetID <- as.character(size.out$DatasetID); size.out$DatasetID <- as.factor(size.out$DatasetID)
size.out$Location <- as.character(size.out$Location); size.out$Location <- as.factor(size.out$Location)
hist(size.out$size)
hist(log(size.out$size+1))
size.bio.mod <- lmer(log(size+1) ~ Location + (1|DatasetID), data=size.out)
summary(size.bio.mod); plot(residuals(size.bio.mod)~fitted(size.bio.mod))
hist(residuals(size.bio.mod))
newdat <- data.frame(expand.grid(Location=unique(size.out$Location),DatasetID=48,Country="USVI"))
size.bio.mod.pred <- predict(size.bio.mod,newdata=newdat); size.bio.mod.pred <- exp(size.bio.mod.pred)+1
size.bio.mod.pred <- data.frame(Location=newdat$Location,mean.size=size.bio.mod.pred)
size.bio.mod.pred <- size.bio.mod.pred[with(size.bio.mod.pred, order(mean.size)),]
size.bio.mod.pred <- size.bio.mod.pred
size.bio.mod.CI <- predictInterval(size.bio.mod, newdata = newdat, n.sims = 10000, level=0.05)
size.bio.mod.CI <- exp(size.bio.mod.CI)+1
size.bio.mod.CI <- data.frame(Location=newdat$Location,mean.size=size.bio.mod.CI[,1],mean.size.up=size.bio.mod.CI[,2],mean.size.down=size.bio.mod.CI[,3])
size.bio.ben <- left_join(size.bio.mod.CI, coral.mod.pred, by="Location")
size.bio.ben <- left_join(size.bio.ben, macro.mod.pred, by="Location")
pairs(size.bio.ben[c(2,5:6)], lower.panel=panel.smooth, upper.panel=panel.cor, diag.panel=panel.hist)
size.bio.ben$cm <- log(size.bio.ben$coral/size.bio.ben$macro)
### by trophic level
size.bioT <- fish.use %>% group_by(DatasetID,Replicate,Location,Year,Length,Trophic) %>% summarise("num"=sum(Number,na.rm=T)) %>% ungroup()
size.bioT <- size.bioT[!is.na(size.bioT$Length),]
size.bioT$num[size.bioT$num < 1 & size.bioT$num > 0] <- 1
size.bioT <- subset(size.bioT, Length > 0)
str(size.bioT)
########### PREDATORS
size.bioP <- subset(size.bioT, Trophic=="P")
size.bioP.out <- data.frame(DatasetID=NA,Location=NA,size=NA)
for(i in 1:nrow(size.bioP)){
t <- rep(size.bioP$Length[i],size.bioP$num[i])
t <- data.frame(DatasetID=size.bioP$DatasetID[i],Location=size.bioP$Location[i],size=t)
size.bioP.out <- rbind(size.bioP.out,t)
}
str(size.bioP.out)
size.bioP.out <- size.bioP.out[2:nrow(size.bioP.out),]
size.bioP.out$DatasetID <- as.character(size.bioP.out$DatasetID)
size.bioP.out$DatasetID <- as.factor(size.bioP.out$DatasetID)
size.bioP.out$Location <- as.character(size.bioP.out$Location)
size.bioP.out$Location <- as.factor(size.bioP.out$Location)
size.bio.mod.P <- lmer(log(size+1) ~ Location + (1|DatasetID), data=size.bioP.out)
summary(size.bio.mod.P)
plot(residuals(size.bio.mod.P)~fitted(size.bio.mod.P))
hist(residuals(size.bio.mod.P))
newdat <- data.frame(expand.grid(Location=unique(size.bioP.out$Location),DatasetID=48,Country="USVI"))
size.bio.mod.P.pred <- predict(size.bio.mod.P,newdata=newdat); size.bio.mod.P.pred <- exp(size.bio.mod.P.pred)+1
size.bio.mod.P.pred <- data.frame(Location=newdat$Location,mean.size=size.bio.mod.P.pred)
size.bio.mod.P.pred <- size.bio.mod.P.pred[with(size.bio.mod.P.pred, order(mean.size)),]
size.bio.mod.P.pred <- size.bio.mod.P.pred
size.bio.P.mod.CI <- predictInterval(size.bio.mod.P, newdata = newdat, n.sims = 10000, level=0.05)
size.bio.P.mod.CI <- exp(size.bio.P.mod.CI)+1
size.bio.P.mod.CI <- data.frame(Location=newdat$Location,Pmean.size=size.bio.P.mod.CI[,1],Pmean.size.up=size.bio.P.mod.CI[,2],Pmean.size.down=size.bio.P.mod.CI[,3])
sizeP.bio.ben <- left_join(size.bio.P.mod.CI, coral.mod.pred, by="Location")
sizeP.bio.ben <- left_join(sizeP.bio.ben, macro.mod.pred, by="Location")
pairs(sizeP.bio.ben[c(2,5:6)], lower.panel=panel.smooth, upper.panel=panel.cor, diag.panel=panel.hist)
sizeP.bio.ben$cm <- log(sizeP.bio.ben$coral/sizeP.bio.ben$macro)
########### Sec Con
size.bioSC <- subset(size.bioT, Trophic=="SC")
size.bioSC.out <- data.frame(DatasetID=NA,Location=NA,size=NA)
for(i in 1:nrow(size.bioSC)){
t <- rep(size.bioSC$Length[i],size.bioSC$num[i])
t <- data.frame(DatasetID=size.bioSC$DatasetID[i],Location=size.bioSC$Location[i],size=t)
size.bioSC.out <- rbind(size.bioSC.out,t)
}
str(size.bioSC.out)
size.bioSC.out <- size.bioSC.out[2:nrow(size.bioSC.out),]
size.bioSC.out$DatasetID <- as.character(size.bioSC.out$DatasetID)
size.bioSC.out$DatasetID <- as.factor(size.bioSC.out$DatasetID)
size.bioSC.out$Location <- as.character(size.bioSC.out$Location)
size.bioSC.out$Location <- as.factor(size.bioSC.out$Location)
size.bio.mod.SC <- lmer(log(size+1) ~ Location + (1|DatasetID), data=size.bioSC.out)
summary(size.bio.mod.SC)
plot(residuals(size.bio.mod.SC)~fitted(size.bio.mod.SC))
hist(residuals(size.bio.mod.SC))
newdat <- data.frame(expand.grid(Location=unique(size.bioSC.out$Location),DatasetID=48,Country="USVI"))
size.bio.mod.SC.pred <- predict(size.bio.mod.SC,newdata=newdat); size.bio.mod.SC.pred <- exp(size.bio.mod.SC.pred)+1
size.bio.mod.SC.pred <- data.frame(Location=newdat$Location,mean.size=size.bio.mod.SC.pred)
size.bio.mod.SC.pred <- size.bio.mod.SC.pred[with(size.bio.mod.SC.pred, order(mean.size)),]
size.bio.mod.SC.pred <- size.bio.mod.SC.pred
size.bio.SC.mod.CI <- predictInterval(size.bio.mod.SC, newdata = newdat, n.sims = 10000, level=0.05)
size.bio.SC.mod.CI <- exp(size.bio.SC.mod.CI)+1
size.bio.SC.mod.CI <- data.frame(Location=newdat$Location,SCmean.size=size.bio.SC.mod.CI[,1],SCmean.size.up=size.bio.SC.mod.CI[,2],SCmean.size.down=size.bio.SC.mod.CI[,3])
sizeSC.bio.ben <- left_join(size.bio.SC.mod.CI, coral.mod.pred, by="Location")
sizeSC.bio.ben <- left_join(sizeSC.bio.ben, macro.mod.pred, by="Location")
pairs(sizeSC.bio.ben[c(2,5:6)], lower.panel=panel.smooth, upper.panel=panel.cor, diag.panel=panel.hist)
sizeSC.bio.ben$cm <- log(sizeSC.bio.ben$coral/sizeSC.bio.ben$macro)
########### Grazers
size.bioGD <- subset(size.bioT, Trophic=="GD")
size.bioGD.out <- data.frame(DatasetID=NA,Location=NA,size=NA)
for(i in 1:nrow(size.bioGD)){
t <- rep(size.bioGD$Length[i],size.bioGD$num[i])
t <- data.frame(DatasetID=size.bioGD$DatasetID[i],Location=size.bioGD$Location[i],size=t)
size.bioGD.out <- rbind(size.bioGD.out,t)
}
str(size.bioGD.out)
size.bioGD.out <- size.bioGD.out[2:nrow(size.bioGD.out),]
size.bioGD.out$DatasetID <- as.character(size.bioGD.out$DatasetID)
size.bioGD.out$DatasetID <- as.factor(size.bioGD.out$DatasetID)
size.bioGD.out$Location <- as.character(size.bioGD.out$Location)
size.bioGD.out$Location <- as.factor(size.bioGD.out$Location)
size.bio.mod.GD <- lmer(log(size+1) ~ Location + (1|DatasetID), data=size.bioGD.out)
summary(size.bio.mod.GD)
plot(residuals(size.bio.mod.GD)~fitted(size.bio.mod.GD))
hist(residuals(size.bio.mod.GD))
newdat <- data.frame(expand.grid(Location=unique(size.bioGD.out$Location),DatasetID=48,Country="USVI"))
size.bio.mod.GD.pred <- predict(size.bio.mod.GD,newdata=newdat); size.bio.mod.GD.pred <- exp(size.bio.mod.GD.pred)+1
size.bio.mod.GD.pred <- data.frame(Location=newdat$Location,mean.size=size.bio.mod.GD.pred)
size.bio.mod.GD.pred <- size.bio.mod.GD.pred[with(size.bio.mod.GD.pred, order(mean.size)),]
size.bio.mod.GD.pred <- size.bio.mod.GD.pred
size.bio.GD.mod.CI <- predictInterval(size.bio.mod.GD, newdata = newdat, n.sims = 10000, level=0.05)
size.bio.GD.mod.CI <- exp(size.bio.GD.mod.CI)+1
size.bio.GD.mod.CI <- data.frame(Location=newdat$Location,GDmean.size=size.bio.GD.mod.CI[,1],GDmean.size.up=size.bio.GD.mod.CI[,2],GDmean.size.down=size.bio.GD.mod.CI[,3])
sizeGD.bio.ben <- left_join(size.bio.GD.mod.CI, coral.mod.pred, by="Location")
sizeGD.bio.ben <- left_join(sizeGD.bio.ben, macro.mod.pred, by="Location")
pairs(sizeGD.bio.ben[c(2,5:6)], lower.panel=panel.smooth, upper.panel=panel.cor, diag.panel=panel.hist)
sizeGD.bio.ben$cm <- log(sizeGD.bio.ben$coral/sizeGD.bio.ben$macro)
########### Browsers
size.bioBR <- subset(size.bioT, Trophic=="BR")
size.bioBR.out <- data.frame(DatasetID=NA,Location=NA,size=NA)
for(i in 1:nrow(size.bioBR)){
t <- rep(size.bioBR$Length[i],size.bioBR$num[i])
t <- data.frame(DatasetID=size.bioBR$DatasetID[i],Location=size.bioBR$Location[i],size=t)
size.bioBR.out <- rbind(size.bioBR.out,t)
}
str(size.bioBR.out)
size.bioBR.out <- size.bioBR.out[2:nrow(size.bioBR.out),]
size.bioBR.out$DatasetID <- as.character(size.bioBR.out$DatasetID)
size.bioBR.out$DatasetID <- as.factor(size.bioBR.out$DatasetID)
size.bioBR.out$Location <- as.character(size.bioBR.out$Location)
size.bioBR.out$Location <- as.factor(size.bioBR.out$Location)
size.bio.mod.BR <- lmer(log(size+1) ~ Location + (1|DatasetID), data=size.bioBR.out)
summary(size.bio.mod.BR)
plot(residuals(size.bio.mod.BR)~fitted(size.bio.mod.BR))
hist(residuals(size.bio.mod.BR))
newdat <- data.frame(expand.grid(Location=unique(size.bioBR.out$Location),DatasetID=48,Country="USVI"))
size.bio.mod.BR.pred <- predict(size.bio.mod.BR,newdata=newdat); size.bio.mod.BR.pred <- exp(size.bio.mod.BR.pred)+1
size.bio.mod.BR.pred <- data.frame(Location=newdat$Location,mean.size=size.bio.mod.BR.pred)
size.bio.mod.BR.pred <- size.bio.mod.BR.pred[with(size.bio.mod.BR.pred, order(mean.size)),]
size.bio.mod.BR.pred <- size.bio.mod.BR.pred
size.bio.BR.mod.CI <- predictInterval(size.bio.mod.BR, newdata = newdat, n.sims = 10000, level=0.05)
size.bio.BR.mod.CI <- exp(size.bio.BR.mod.CI)+1
size.bio.BR.mod.CI <- data.frame(Location=newdat$Location,BRmean.size=size.bio.BR.mod.CI[,1],BRmean.size.up=size.bio.BR.mod.CI[,2],BRmean.size.down=size.bio.BR.mod.CI[,3])
sizeBR.bio.ben <- left_join(size.bio.BR.mod.CI, coral.mod.pred, by="Location")
sizeBR.bio.ben <- left_join(sizeBR.bio.ben, macro.mod.pred, by="Location")
pairs(sizeBR.bio.ben[c(2,5:6)], lower.panel=panel.smooth, upper.panel=panel.cor, diag.panel=panel.hist)
sizeBR.bio.ben$cm <- log(sizeBR.bio.ben$coral/sizeBR.bio.ben$macro)
########### Scrapers
size.bioSE <- subset(size.bioT, Trophic=="SE")
size.bioSE.out <- data.frame(DatasetID=NA,Location=NA,size=NA)
for(i in 1:nrow(size.bioSE)){
t <- rep(size.bioSE$Length[i],size.bioSE$num[i])
t <- data.frame(DatasetID=size.bioSE$DatasetID[i],Location=size.bioSE$Location[i],size=t)
size.bioSE.out <- rbind(size.bioSE.out,t)
}
str(size.bioSE.out)
size.bioSE.out <- size.bioSE.out[2:nrow(size.bioSE.out),]
size.bioSE.out$DatasetID <- as.character(size.bioSE.out$DatasetID)
size.bioSE.out$DatasetID <- as.factor(size.bioSE.out$DatasetID)
size.bioSE.out$Location <- as.character(size.bioSE.out$Location)
size.bioSE.out$Location <- as.factor(size.bioSE.out$Location)
size.bio.mod.SE <- lmer(log(size+1) ~ Location + (1|DatasetID), data=size.bioSE.out)
summary(size.bio.mod.SE)
plot(residuals(size.bio.mod.SE)~fitted(size.bio.mod.SE))
hist(residuals(size.bio.mod.SE))
newdat <- data.frame(expand.grid(Location=unique(size.bioSE.out$Location),DatasetID=48,Country="USVI"))
size.bio.mod.SE.pred <- predict(size.bio.mod.SE,newdata=newdat); size.bio.mod.SE.pred <- exp(size.bio.mod.SE.pred)+1
size.bio.mod.SE.pred <- data.frame(Location=newdat$Location,mean.size=size.bio.mod.SE.pred)
size.bio.mod.SE.pred <- size.bio.mod.SE.pred[with(size.bio.mod.SE.pred, order(mean.size)),]
size.bio.mod.SE.pred <- size.bio.mod.SE.pred
size.bio.SE.mod.CI <- predictInterval(size.bio.mod.SE, newdata = newdat, n.sims = 10000, level=0.05)
size.bio.SE.mod.CI <- exp(size.bio.SE.mod.CI)+1
size.bio.SE.mod.CI <- data.frame(Location=newdat$Location,SEmean.size=size.bio.SE.mod.CI[,1],SEmean.size.up=size.bio.SE.mod.CI[,2],SEmean.size.down=size.bio.SE.mod.CI[,3])
sizeSE.bio.ben <- left_join(size.bio.SE.mod.CI, coral.mod.pred, by="Location")
sizeSE.bio.ben <- left_join(sizeSE.bio.ben, macro.mod.pred, by="Location")
pairs(sizeSE.bio.ben[c(2,5:6)], lower.panel=panel.smooth, upper.panel=panel.cor, diag.panel=panel.hist)
sizeSE.bio.ben$cm <- log(sizeSE.bio.ben$coral/sizeSE.bio.ben$macro)
#output table
size_est_tbl <- left_join(size.bio.mod.CI,size.bio.P.mod.CI,by="Location")
size_est_tbl <- left_join(size_est_tbl,size.bio.SC.mod.CI,by="Location")
size_est_tbl <- left_join(size_est_tbl,size.bio.BR.mod.CI,by="Location")
size_est_tbl <- left_join(size_est_tbl,size.bio.GD.mod.CI,by="Location")
size_est_tbl <- left_join(size_est_tbl,size.bio.SE.mod.CI,by="Location")
str(size_est_tbl)
write.csv(size_est_tbl, 'outputs/SOM6.csv',row.names=F)
# panel fig - mean size ---------------------------------------------------
png('outputs/Fig4.png',height=2000,width=3600,res=300)
par(mfrow=c(3,6),mar=c(1,1,1,1),oma=c(4,3,0,0),xpd=F,mgp=c(1.6,.7,0))
plot.fish(size.bio.ben,'mean.size','coral',1,'size')
plot.fish(sizeP.bio.ben,'Pmean.size','coral',2,'size')
plot.fish(sizeSC.bio.ben,'SCmean.size','coral',3,'size')
plot.fish(sizeGD.bio.ben,'GDmean.size','coral',4,'size')
plot.fish(sizeBR.bio.ben,'BRmean.size','coral',5,'size')
plot.fish(sizeSE.bio.ben,'SEmean.size','coral',6,'size')
plot.fish(size.bio.ben,'mean.size','macro',7,'size')
plot.fish(sizeP.bio.ben,'Pmean.size','macro',8,'size')
plot.fish(sizeSC.bio.ben,'SCmean.size','macro',9,'size')
plot.fish(sizeGD.bio.ben,'GDmean.size','macro',10,'size')
plot.fish(sizeBR.bio.ben,'BRmean.size','macro',11,'size')
plot.fish(sizeSE.bio.ben,'SEmean.size','macro',12,'size')
plot.fish(size.bio.ben,'mean.size','cm',13,'size')
plot.fish(sizeP.bio.ben,'Pmean.size','cm',14,'size')
plot.fish(sizeSC.bio.ben,'SCmean.size','cm',15,'size')
plot.fish(sizeGD.bio.ben,'GDmean.size','cm',16,'size')
plot.fish(sizeBR.bio.ben,'BRmean.size','cm',17,'size')
plot.fish(sizeSE.bio.ben,'SEmean.size','cm',18,'size')
mtext("Coral Cover (%)", outer=T, side=2, at=0.83,line=1)
mtext("Macroalgal Cover (%)", outer=T, side=2, at=0.5,line=1)
mtext("log(Coral/Macrogalae)", outer=T, side=2, at=0.175,line=1)
mtext("Total",outer=T,side=1,at=0.09,line=1)
mtext("Predators",outer=T,side=1,at=0.248,line=1)
mtext("Sec. Consumers",outer=T,side=1,at=0.416,line=1)
mtext("Grazers",outer=T,side=1,at=0.584,line=1)
mtext("Browsers",outer=T,side=1,at=0.752,line=1)
mtext("Scrapers",outer=T,side=1,at=0.92,line=1)
mtext(expression("Biomass"~~bgroup("(","g"*m^{-2},")")),outer=T,side=1,line=2.8)
dev.off()
mod.out <- data.frame(
y = as.character(c("coral", "coral", "coral", "coral", "coral",
"coral", "macro", "macro", "macro", "macro",
"macro", "macro", "cm", "cm", "cm", "cm", "cm", "cm")),
x = as.character(c('mean.size','Pmean.size','SCmean.size','GDmean.size','BRmean.size','SEmean.size',
'mean.size','Pmean.size','SCmean.size','GDmean.size','BRmean.size','SEmean.size',
'mean.size','Pmean.size','SCmean.size','GDmean.size','BRmean.size','SEmean.size')),
int = rep(NA,18),
int.se = rep(NA,18),
int.t = rep(NA,18),
int.p = rep(NA,18),
slp = rep(NA,18),
slp.se = rep(NA,18),
slp.t = rep(NA,18),
slp.p = rep(NA,18),
f = rep(NA,18),
df = rep(NA,18),
r2 = rep(NA,18), stringsAsFactors=F
)
for(i in c(1,7,13)){
y <- mod.out[i,'y']
x <- mod.out[i,'x']
dat <- size.bio.ben
temp <- dat[!is.na(dat[,y]),]
tp <- lm(temp[,y]~temp[,x])
cook <- cooks.distance(tp)
temp.sub <- temp[!cook >= 0.50, ]
tp <- lm(temp.sub[, y] ~ temp.sub[, x])
tpg <- gam(temp.sub[, y] ~ s(temp.sub[, x], k = 3), family = gaussian)
m.sel <- model.sel(tp, tpg)
if(m.sel$class[1]=='lm'){
mod.out[i,3] <- summary(tp)$coefficients[1]; mod.out[i,7] <- summary(tp)$coefficients[2]
mod.out[i,4] <- summary(tp)$coefficients[3]; mod.out[i,8] <- summary(tp)$coefficients[4]
mod.out[i,5] <- summary(tp)$coefficients[5]; mod.out[i,9] <- summary(tp)$coefficients[6]
mod.out[i,6] <- summary(tp)$coefficients[7]; mod.out[i,10] <- summary(tp)$coefficients[8]
mod.out[i,11] <- summary(tp)$fstatistic[1]
mod.out[i,12] <- summary(tp)$df[2]
mod.out[i,13] <- summary(tp)$r.squared
}
if(m.sel$class[1]=='gam'){
mod.out[i,3] <- summary(tpg)$p.table[1]; mod.out[i,4] <- summary(tpg)$p.table[2]
mod.out[i,5] <- summary(tpg)$p.table[3]; mod.out[i,6] <- summary(tpg)$p.table[4]
mod.out[i,7] <- summary(tpg)$s.table[1]; mod.out[i,8] <- summary(tpg)$s.table[2]
mod.out[i,10] <- summary(tpg)$s.table[4]; mod.out[i,11] <- summary(tpg)$s.table[3]
mod.out[i,12] <- summary(tpg)$n
mod.out[i,13] <- summary(tpg)$dev.expl
}
}
sizelist <- list(size.bio.ben,sizeP.bio.ben,sizeSC.bio.ben,sizeGD.bio.ben,sizeBR.bio.ben,sizeSE.bio.ben)
for(i in c(2:6)){
y <- mod.out[i,'y']
x <- mod.out[i,'x']
dat <- sizelist[[i]]
temp <- dat[!is.na(dat[,y]),]
tp <- lm(temp[,y]~temp[,x])
cook <- cooks.distance(tp)
temp.sub <- temp[!cook >= 0.50, ]
tp <- lm(temp.sub[, y] ~ temp.sub[, x])
tpg <- gam(temp.sub[, y] ~ s(temp.sub[, x], k = 3), family = gaussian)
m.sel <- model.sel(tp, tpg)
if(m.sel$class[1]=='lm'){
mod.out[i,3] <- summary(tp)$coefficients[1]; mod.out[i,7] <- summary(tp)$coefficients[2]
mod.out[i,4] <- summary(tp)$coefficients[3]; mod.out[i,8] <- summary(tp)$coefficients[4]
mod.out[i,5] <- summary(tp)$coefficients[5]; mod.out[i,9] <- summary(tp)$coefficients[6]
mod.out[i,6] <- summary(tp)$coefficients[7]; mod.out[i,10] <- summary(tp)$coefficients[8]
mod.out[i,11] <- summary(tp)$fstatistic[1]
mod.out[i,12] <- summary(tp)$df[2]
mod.out[i,13] <- summary(tp)$r.squared
}
if(m.sel$class[1]=='gam'){
mod.out[i,3] <- summary(tpg)$p.table[1]; mod.out[i,4] <- summary(tpg)$p.table[2]
mod.out[i,5] <- summary(tpg)$p.table[3]; mod.out[i,6] <- summary(tpg)$p.table[4]
mod.out[i,7] <- summary(tpg)$s.table[1]; mod.out[i,8] <- summary(tpg)$s.table[2]
mod.out[i,10] <- summary(tpg)$s.table[4]; mod.out[i,11] <- summary(tpg)$s.table[3]
mod.out[i,12] <- summary(tpg)$n
mod.out[i,13] <- summary(tpg)$dev.expl
}
}
for(i in c(8:12)){
y <- mod.out[i,'y']
x <- mod.out[i,'x']
dat <- sizelist[[i-6]]
temp <- dat[!is.na(dat[,y]),]
tp <- lm(temp[,y]~temp[,x])
cook <- cooks.distance(tp)
temp.sub <- temp[!cook >= 0.50, ]
tp <- lm(temp.sub[, y] ~ temp.sub[, x])
tpg <- gam(temp.sub[, y] ~ s(temp.sub[, x], k = 3), family = gaussian)
m.sel <- model.sel(tp, tpg)
if(m.sel$class[1]=='lm'){
mod.out[i,3] <- summary(tp)$coefficients[1]; mod.out[i,7] <- summary(tp)$coefficients[2]
mod.out[i,4] <- summary(tp)$coefficients[3]; mod.out[i,8] <- summary(tp)$coefficients[4]
mod.out[i,5] <- summary(tp)$coefficients[5]; mod.out[i,9] <- summary(tp)$coefficients[6]
mod.out[i,6] <- summary(tp)$coefficients[7]; mod.out[i,10] <- summary(tp)$coefficients[8]
mod.out[i,11] <- summary(tp)$fstatistic[1]
mod.out[i,12] <- summary(tp)$df[2]
mod.out[i,13] <- summary(tp)$r.squared
}
if(m.sel$class[1]=='gam'){
mod.out[i,3] <- summary(tpg)$p.table[1]; mod.out[i,4] <- summary(tpg)$p.table[2]
mod.out[i,5] <- summary(tpg)$p.table[3]; mod.out[i,6] <- summary(tpg)$p.table[4]
mod.out[i,7] <- summary(tpg)$s.table[1]; mod.out[i,8] <- summary(tpg)$s.table[2]
mod.out[i,10] <- summary(tpg)$s.table[4]; mod.out[i,11] <- summary(tpg)$s.table[3]
mod.out[i,12] <- summary(tpg)$n
mod.out[i,13] <- summary(tpg)$dev.expl
}
}
for(i in c(14:18)){
y <- mod.out[i,'y']
x <- mod.out[i,'x']
dat <- sizelist[[i-12]]
temp <- dat[!is.na(dat[,y]),]
tp <- lm(temp[,y]~temp[,x])
cook <- cooks.distance(tp)
temp.sub <- temp[!cook >= 0.50, ]
tp <- lm(temp.sub[, y] ~ temp.sub[, x])
tpg <- gam(temp.sub[, y] ~ s(temp.sub[, x], k = 3), family = gaussian)
m.sel <- model.sel(tp, tpg)
if(m.sel$class[1]=='lm'){
mod.out[i,3] <- summary(tp)$coefficients[1]; mod.out[i,7] <- summary(tp)$coefficients[2]
mod.out[i,4] <- summary(tp)$coefficients[3]; mod.out[i,8] <- summary(tp)$coefficients[4]
mod.out[i,5] <- summary(tp)$coefficients[5]; mod.out[i,9] <- summary(tp)$coefficients[6]
mod.out[i,6] <- summary(tp)$coefficients[7]; mod.out[i,10] <- summary(tp)$coefficients[8]
mod.out[i,11] <- summary(tp)$fstatistic[1]
mod.out[i,12] <- summary(tp)$df[2]
mod.out[i,13] <- summary(tp)$r.squared
}
if(m.sel$class[1]=='gam'){
mod.out[i,3] <- summary(tpg)$p.table[1]; mod.out[i,4] <- summary(tpg)$p.table[2]
mod.out[i,5] <- summary(tpg)$p.table[3]; mod.out[i,6] <- summary(tpg)$p.table[4]
mod.out[i,7] <- summary(tpg)$s.table[1]; mod.out[i,8] <- summary(tpg)$s.table[2]
mod.out[i,10] <- summary(tpg)$s.table[4]; mod.out[i,11] <- summary(tpg)$s.table[3]
mod.out[i,12] <- summary(tpg)$n
mod.out[i,13] <- summary(tpg)$dev.expl
}
}
write.csv(mod.out, 'outputs/Table2.csv',row.names=F)
# multivariate trophic biomass ---------------------------------------------------
temp <- trophic.benthic.m[!is.na(trophic.benthic.m$macro),]
temp <- temp[!is.na(temp$coral),]
temp$cm <- log(temp$coral/temp$macro)
temp$coral.p <- temp$coral/100
temp$macro.p <- temp$macro/100
spp.mat <- temp[c('BR','GD','P','SC','SE')]
std <- function(x){(x-min(x))/(max(x)-min(x))}
for(i in 1:5) spp.mat[i] <- std(spp.mat[i])
summary(spp.mat)
temp$coral.ps <- std(temp$coral.p)
temp$macro.ps <- std(temp$macro.p)
trop.rda <- rda((spp.mat),(temp[c('coral.ps','macro.ps')]))
summary(trop.rda)
anova(trop.rda)
permutest(trop.rda, permutations = 1000)
RsquareAdj(trop.rda)
c <- temp['coral.ps']
m <- temp['macro.ps']
cm <- as.matrix(temp[c('coral.ps','macro.ps')])
trop.rda.f <- rda(cm~.,data=spp.mat)
anova(trop.rda.f)
permutest(trop.rda.f, permutations = 1000)
RsquareAdj(trop.rda.f)
plot(trop.rda.f,scaling=3)
summary(trop.rda.f)
anova(trop.rda.f,by="terms")
# multivariate - size ----------------------------------------------------
all.size <- left_join(sizeP.bio.ben[c("Location","Pmean.size","coral","macro")],sizeSC.bio.ben[c("Location","SCmean.size")],by="Location")
all.size <- left_join(all.size,sizeBR.bio.ben[c("Location","BRmean.size")],by="Location")
all.size <- left_join(all.size,sizeGD.bio.ben[c("Location","GDmean.size")],by="Location")
all.size <- left_join(all.size,sizeSE.bio.ben[c("Location","SEmean.size")],by="Location")
colnames(all.size) <- c("Location","P","coral","macro","SC","BR","GD","SE")
all.size <- all.size[c(1,3,4,2,5:8)]
str(all.size)
all.size <- all.size[!is.na(all.size$coral),]
all.size <- all.size[!is.na(all.size$macro),]
spp.mat <- all.size[4:8]
for(i in 1:5) spp.mat[i] <- std(spp.mat[i])
summary(spp.mat)
all.size$coral.ps <- std(all.size$coral/100)
all.size$macro.ps <- std(all.size$macro/100)
size.rda <- rda((spp.mat),(all.size[9:10]))
plot(size.rda,scaling=3)
summary(size.rda)
anova(size.rda)
permutest(trop.rda, permutations = 10000)
RsquareAdj(size.rda)
cm <- as.matrix(all.size[c(9:10)])
size.rda.f <- rda(cm~.,data=spp.mat)
anova(size.rda.f)
permutest(size.rda.f, permutations = 1000)
RsquareAdj(size.rda.f)
plot(size.rda.f,scaling=3)
summary(size.rda.f)
anova(size.rda.f,by="terms")
anova(size.rda.f,by="axis")
anova(size.rda.f,by="margin")
# combine multivariate plots ----------------------------------------------
png(file='outputs/Fig5.png',height=1800,width=3800,res=300)
par(mfrow=c(1,2),mar=c(3,3,2,1),oma=c(2,2,0,0))
plot(trop.rda.f,scaling=3,type="n",cex.axis=1.4,xlim=c(-1,1.05))
points(scores(trop.rda.f,display="sites",scaling=3),pch=21,col="black",bg="grey",cex=1.4)
reg.arrow <- scores(trop.rda.f, display="bp", scaling=3)
arrows(0,0,reg.arrow[,1],reg.arrow[,2],length=0,lty=1,cex=3,lwd=1,col="blue")
temp <- scores(trop.rda.f, display="b", scaling=3)
text(temp[1]+.25,temp[6]-0.06,labels=c("Browsers"),col="blue",cex=1.3)
text(temp[2]+.25,temp[7]-0.05,labels=c("Grazers"),col="blue",cex=1.3)
text(temp[3]+0.20,temp[8]-0.03,labels=c("Predators"),col="blue",cex=1.3)
text(temp[4]+.32,temp[9]-0.03,labels=c("Sec. Cons."),col="blue",cex=1.3)
text(temp[5]+0.23,temp[10],labels=c("Scrapers"),col="blue",cex=1.3)
reg.arrow <- scores(trop.rda.f, display="sp", scaling=3)
arrows(0,0,reg.arrow[,1],reg.arrow[,2],length=0,lty=1,cex=3,lwd=2,col="red")
temp <- scores(trop.rda.f, display="sp", scaling=3)
text(temp[1]-0.02,temp[3]+0.07,labels=c("Coral"),col="red",cex=1.5)
text(temp[2]-0.02,temp[4]+0.05,labels=c("Macroalgae"),col="red",cex=1.5)
text(-1.15,1,"A",cex=1.5)
plot(size.rda.f,scaling=3,type="n",cex.axis=1.4,xlim=c(-0.9,1.4),ylim=c(-.7,1))
points(scores(size.rda.f,display="sites",scaling=3),pch=21,col="black",bg="grey",cex=1.4)
reg.arrow <- scores(size.rda.f, display="bp", scaling=3)
arrows(0,0,reg.arrow[,1],reg.arrow[,2],length=0,lty=1,cex=3,lwd=1,col="blue")
temp <- scores(size.rda.f, display="b", scaling=3)
text(temp[1],temp[6]-0.05,labels=c("Predators"),col="blue",cex=1.3)
text(temp[2]+0.2,temp[7]+.06,labels=c("Sec. Cons."),col="blue",cex=1.3)
text(temp[3],temp[8]-0.05,labels=c("Browsers"),col="blue",cex=1.3)
text(temp[4]+0.1,temp[9]-0.05,labels=c("Grazers"),col="blue",cex=1.3)
text(temp[5]+0.2,temp[10]+.03,labels=c("Scrapers"),col="blue",cex=1.3)
reg.arrow <- scores(size.rda.f, display="sp", scaling=3)
arrows(0,0,reg.arrow[,1],reg.arrow[,2],length=0,lty=1,cex=3,lwd=2,col="red")
temp <- scores(size.rda.f, display="sp", scaling=3)
text(temp[1]+0.18,temp[3],labels=c("Coral"),col="red",cex=1.5)
text(temp[2],temp[4]+0.05,labels=c("Macroalgae"),col="red",cex=1.5)
text(-.80,1.05,"B",cex=1.5)
mtext("RDA1",side=1,outer=T,cex=1.4)
mtext("RDA2",side=2,outer=T,cex=1.4)
dev.off()
# supplemental figure random effects --------------------------------------
t.resid <- data.frame(DatasetID=trophic.bio.raw.m$DatasetID, BR=NA, GD=NA, P=NA, SC=NA, SE=NA, Year=trophic.bio.raw.m$Year, Location=trophic.bio.raw.m$Location)
for(k in c(2:6)){
temp <- trophic.bio.raw.m
colnames(temp)[k+4] <- "resp"
tb.mod.l <- lmer(log(resp+1) ~ Location + (1|DatasetID) , data=temp)
t.resid[,k] <- residuals(tb.mod.l)
}
plot(t.resid$DatasetID,t.resid$BR)
tot.resid <- data.frame(DatasetID=tot.bio.m$DatasetID, total = residuals(totbio.mod.l))
png(file='outputs/SOM7.png',height=4000,width=4000,res=300)
par(mfcol=c(6,2),mar=c(1.5,1.5,1,1.5),oma=c(4,4,0,0),mgp=c(1.6,.7,0),xpd=F)
plot(tot.resid$DatasetID,tot.resid$total, bty="o",cex.axis=1.4,ylim=c(-4,4))
abline(h=0)
text(0.2,0.9*4,"Total Biomass",pos=4,cex=2)
plot(t.resid$DatasetID,t.resid$P, bty="o",cex.axis=1.4,ylim=c(-3,5.5))
abline(h=0)
text(0.2,0.9*5.5,"Predator Biomass",pos=4,cex=2)
plot(t.resid$DatasetID,t.resid$SC, bty="o",cex.axis=1.4,ylim=c(-3,4.5))
abline(h=0)
text(0.2,0.9*4.5,"Secondary Consumer Biomass",pos=4,cex=2)
plot(t.resid$DatasetID,t.resid$BR, bty="o",cex.axis=1.4,ylim=c(-2,4.5))
abline(h=0)
text(0.2,0.9*4.5,"Browser Biomass",pos=4,cex=2)
plot(t.resid$DatasetID,t.resid$GD, bty="o",cex.axis=1.4, ylim=c(-2,4.5))
abline(h=0)
text(0.2,0.9*4.5,"Grazer Biomass",pos=4,cex=2)
plot(t.resid$DatasetID,t.resid$SE, bty="o",cex.axis=1.4,ylim=c(-2.2,4))
abline(h=0)
text(0.2,0.9*4,"Scraper Biomass",pos=4,cex=2)
plot(size.out$DatasetID,residuals(size.bio.mod), bty="o",cex.axis=1.4,ylim=c(-1.5,3.5))
abline(h=0)
text(0.2,0.9*3.5,"Mean size overall",pos=4,cex=2)
plot(size.bioP.out$DatasetID,residuals(size.bio.mod.P), bty="o",cex.axis=1.4,ylim=c(-2,3))
abline(h=0)
text(0.2,0.9*3,"Mean Size Predators",pos=4,cex=2)
plot(size.bioSC.out$DatasetID,residuals(size.bio.mod.SC), bty="o",cex.axis=1.4,ylim=c(-2,3))
abline(h=0)
text(0.2,0.9*3,"Mean Size Secondary Consumers",pos=4,cex=2)
plot(size.bioBR.out$DatasetID,residuals(size.bio.mod.BR), bty="o",cex.axis=1.4,ylim=c(-2,2))
abline(h=0)
text(0.2,0.9*2,"Mean Size Browsers",pos=4,cex=2)
plot(size.bioGD.out$DatasetID,residuals(size.bio.mod.GD), bty="o",cex.axis=1.4,ylim=c(-2,2.5))
abline(h=0)
text(0.2,0.9*2.5,"Mean Size Grazers",pos=4,cex=2)
plot(size.bioSE.out$DatasetID,residuals(size.bio.mod.SE), bty="o",cex.axis=1.4,ylim=c(-1.5,3))
abline(h=0)
text(0.2,0.9*3,"Mean Size Scrapers",pos=4,cex=2)
mtext("Dataset",side=1,outer=T,cex=1.6,line=2)
mtext("Residuals",side=2,outer=T,cex=1.6,line=2)
dev.off()
# correlations with lat/long? ---------------------------------------------
loc.coord <- fish.use %>% group_by(Location) %>% summarise("Lat"=median(Latitude,na.rm=T),"Long"=median(Longitude,na.rm=T)) %>% ungroup()
png(file='outputs/SOM9.png',height=3000,width=2800,res=300)
par(mfcol=c(6,4),mar=c(1,1,1,1),oma=c(4,4,2,0),mgp=c(1.6,.7,0),xpd=T)
####### biomass
temp <- data.frame(total = residuals(totbio.mod.l),Location=tot.bio.m$Location)
temp <- left_join(temp,loc.coord)
str(temp)
x <- temp$Lat; y <- temp$total
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(2,cex.axis=1.4); axis(1,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(13,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
temp <- left_join(t.resid,loc.coord)
str(temp)
x <- temp$Lat; y <- temp$P
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(2,cex.axis=1.4); axis(1,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(13,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
x <- temp$Lat; y <- temp$SC
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(2,cex.axis=1.4); axis(1,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(13,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
x <- temp$Lat; y <- temp$GD
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(2,cex.axis=1.4); axis(1,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(13,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
x <- temp$Lat; y <- temp$BR
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(2,cex.axis=1.4); axis(1,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(13,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
x <- temp$Lat; y <- temp$SE
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,cex.axis=1.4); axis(2,cex.axis=1.4); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(13,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
temp <- data.frame(total = residuals(totbio.mod.l),Location=tot.bio.m$Location)
temp <- left_join(temp,loc.coord)
x <- temp$Long; y <- temp$total
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,labels=NA); axis(2,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(-87,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
temp <- left_join(t.resid,loc.coord)
str(temp)
x <- temp$Long; y <- temp$P
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,labels=NA); axis(2,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(-87,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
x <- temp$Long; y <- temp$SC
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,labels=NA); axis(2,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(-87,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
x <- temp$Long; y <- temp$GD
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,labels=NA); axis(2,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(-87,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
x <- temp$Long; y <- temp$BR
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(2,labels=NA); axis(1,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(-87,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
x <- temp$Long; y <- temp$SE
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,cex.axis=1.4); axis(2,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(-89,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
######## size
temp <- data.frame(total = residuals(size.bio.mod),Location=size.out$Location)
temp <- left_join(temp,loc.coord)
str(temp)
x <- temp$Lat; y <- temp$total
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(2,cex.axis=1.4); axis(1,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(13,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
temp <- data.frame(P = residuals(size.bio.mod.P),Location=size.bioP.out$Location)
temp <- left_join(temp,loc.coord)
x <- temp$Lat; y <- temp$P
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(2,cex.axis=1.4); axis(1,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(13,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
temp <- data.frame(SC = residuals(size.bio.mod.SC),Location=size.bioSC.out$Location)
temp <- left_join(temp,loc.coord)
x <- temp$Lat; y <- temp$SC
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(2,cex.axis=1.4); axis(1,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(13,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
temp <- data.frame(GD = residuals(size.bio.mod.GD),Location=size.bioGD.out$Location)
temp <- left_join(temp,loc.coord)
x <- temp$Lat; y <- temp$GD
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(2,cex.axis=1.4); axis(1,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(13,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
temp <- data.frame(BR = residuals(size.bio.mod.BR),Location=size.bioBR.out$Location)
temp <- left_join(temp,loc.coord)
x <- temp$Lat; y <- temp$BR
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(2,cex.axis=1.4); axis(1,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(13,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
temp <- data.frame(SE = residuals(size.bio.mod.SE),Location=size.bioSE.out$Location)
temp <- left_join(temp,loc.coord)
x <- temp$Lat; y <- temp$SE
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,cex.axis=1.4); axis(2,cex.axis=1.4); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(13,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
temp <- data.frame(total = residuals(size.bio.mod),Location=size.out$Location)
temp <- left_join(temp,loc.coord)
x <- temp$Long; y <- temp$total
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,labels=NA); axis(2,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(-87,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
temp <- data.frame(P = residuals(size.bio.mod.P),Location=size.bioP.out$Location)
temp <- left_join(temp,loc.coord)
x <- temp$Long; y <- temp$P
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,labels=NA); axis(2,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(-87,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
temp <- data.frame(SC = residuals(size.bio.mod.SC),Location=size.bioSC.out$Location)
temp <- left_join(temp,loc.coord)
x <- temp$Long; y <- temp$SC
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,labels=NA); axis(2,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(-87,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
temp <- data.frame(GD = residuals(size.bio.mod.GD),Location=size.bioGD.out$Location)
temp <- left_join(temp,loc.coord)
x <- temp$Long; y <- temp$GD
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,labels=NA); axis(2,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(-87,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
temp <- data.frame(BR = residuals(size.bio.mod.BR),Location=size.bioBR.out$Location)
temp <- left_join(temp,loc.coord)
x <- temp$Long; y <- temp$BR
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(2,labels=NA); axis(1,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(-87,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
temp <- data.frame(SE = residuals(size.bio.mod.SE),Location=size.bioSE.out$Location)
temp <- left_join(temp,loc.coord)
x <- temp$Long; y <- temp$SE
plot(x,y,pch=21,col="black",bg="grey",cex=1.5,bty="o",xlab="",ylab="",yaxt="n",xaxt="n",cex.axis=1.4); axis(1,cex.axis=1.4); axis(2,labels=NA); abline(h=0)
if((cor.test(x,y))$p.value < 0.05){
text(-89,0.9*max(y),round(cor(x,y),2),pos=4,cex=2,font=2)
}
mtext("Latitude",outer=T,side=1,cex=1.3,at=.125,line=1)
mtext("Longitude",outer=T,side=1,cex=1.3,at=.375,line=1)
mtext("Latitude",outer=T,side=1,cex=1.3,at=0.625,line=1)
mtext("Longitude",outer=T,side=1,cex=1.3,at=0.875,line=1)
mtext("Total",outer=T,side=2,at=0.92,line=1,cex=1.3)
mtext("Predators",outer=T,side=2,at=0.752,line=1,cex=1.3)
mtext("Sec. Consumers",outer=T,side=2,at=0.584,line=1,cex=1.3)
mtext("Grazers",outer=T,side=2,at=0.416,line=1,cex=1.3)
mtext("Browsers",outer=T,side=2,at=0.248,line=1,cex=1.3)
mtext("Scrapers",outer=T,side=2,at=0.09,line=1,cex=1.3)
mtext(expression("Biomass"~~bgroup("(","g"*m^{-2},")")),outer=T,side=3,at=.25,cex=1.3,line=-1)
mtext("Mean size (cm)",outer=T,side=3,at=.75,cex=1.3)
dev.off()
# time effect? ------------------------------------------------------------
plot(residuals(totbio.mod.l)~tot.bio.m$Year)
plot(P~Year,data=t.resid)
plot(SC~Year,data=t.resid)
plot(GD~Year,data=t.resid)
plot(BR~Year,data=t.resid)
plot(SE~Year,data=t.resid)
png('outputs/SOM10.png',height=2800,width=1900,res=300)
par(mfrow=c(6,1),mar=c(1,1,1,1),oma=c(4,3,0,0),xpd=F,mgp=c(1.6,.7,0))
plot(residuals(totbio.mod.l)~tot.bio.m$Year,bty="l",xlab="",ylab="Total",cex.axis=1.2,cex.lab=1.1,xaxt="n")
lines(lowess(residuals(totbio.mod.l)~tot.bio.m$Year),lwd=4,col="red")
axis(1,labels=NA)
plot(P~Year,data=t.resid,bty="l",xlab="",ylab="Predators",cex.axis=1.2,cex.lab=1.1,xaxt="n")
lines(lowess(t.resid$P~t.resid$Year),lwd=4,col="red")
axis(1,labels=NA)
plot(SC~Year,data=t.resid,bty="l",xlab="",ylab="Sec. Consumers",cex.axis=1.2,cex.lab=1.1,xaxt="n")
lines(lowess(t.resid$SC~t.resid$Year),lwd=4,col="red")
axis(1,labels=NA)
plot(GD~Year,data=t.resid,bty="l",xlab="",ylab="Sec. Consumers",cex.axis=1.2,cex.lab=1.1,xaxt="n")
lines(lowess(t.resid$GD~t.resid$Year),lwd=4,col="red")
axis(1,labels=NA)
plot(BR~Year,data=t.resid,bty="l",xlab="",ylab="Sec. Consumers",cex.axis=1.2,cex.lab=1.1,xaxt="n")
lines(lowess(t.resid$BR~t.resid$Year),lwd=4,col="red")
axis(1,labels=NA)
plot(SE~Year,data=t.resid,bty="l",ylab="Sec. Consumers",cex.axis=1.2,cex.lab=1.1,xlab="Year")
lines(lowess(t.resid$SE~t.resid$Year),lwd=4,col="red")
mtext("Total",outer=T,side=2,at=0.92,line=1,cex=1)
mtext("Predators",outer=T,side=2,at=0.752,line=1,cex=1)
mtext("Sec. Consumers",outer=T,side=2,at=0.584,line=1,cex=1)
mtext("Grazers",outer=T,side=2,at=0.416,line=1,cex=1)
mtext("Browsers",outer=T,side=2,at=0.248,line=1,cex=1)
mtext("Scrapers",outer=T,side=2,at=0.09,line=1,cex=1)
mtext("Year",outer=T,side=1,line=1,cex=1)
dev.off()
# total herbivore biomass -------------------------------------------------
temp <- fish.use
temp$H <- ifelse(temp$Trophic=='GD'|temp$Trophic=='BR'|temp$Trophic=='SE','H','other')
H.bio.raw <- temp %>%
group_by(DatasetID,Replicate,Location,Year,H) %>%
summarise("sum"=sum(bio_use,na.rm=T)) %>%
spread(key=H,value=sum) %>%
ungroup()
H.bio.raw$H[is.na(H.bio.raw$H)] <- 0
H.bio.raw$DatasetID <- as.factor(H.bio.raw$DatasetID)
H.bio.raw$Location <- as.character(H.bio.raw$Location); H.bio.raw$Location <- as.factor(H.bio.raw$Location)
H.mod.l <- lmer(log(H+1) ~ Location + (1|DatasetID) , data=H.bio.raw)
plot(residuals(H.mod.l)~fitted(H.mod.l))
newdat <- data.frame(expand.grid(Location=unique(H.bio.raw$Location),DatasetID=11,Country="USVI"))
# tb.mod.pred <- predict(tb.mod.l,newdata=newdat)
# tb.mod.pred <- exp(tb.mod.pred) - 1
# trophic.loc.mod[k] <- tb.mod.pred
H.CI <- predictInterval(H.mod.l, newdata = newdat, n.sims = 10000, level=0.05)
H.CI <- exp(H.CI)-1
H.loc.mod <- H.CI
H.loc.mod$Location <- newdat$Location
H.loc.mod <- left_join(H.loc.mod, coral.mod.CI, by="Location")
H.loc.mod <- left_join(H.loc.mod, macro.mod.CI, by="Location")
str(H.loc.mod)
#### plot
png(file='outputs/SOM_herb.png',height=1200,width=3200,res=300)
par(mfrow=c(1,3),mar=c(3,4.5,2,1),oma=c(2,0,0,0),mgp=c(2,.7,0))
temp <- H.loc.mod[!is.na(H.loc.mod$coral),]
tp <-lm(temp$coral~temp$fit); summary(tp)
# ncvTest(tp); op <- par(mfrow=c(2,2),mar=c(4,4,2,1)); plot(tp); par(op) # non-constant error variance test
tpg <- gam(coral~s(fit,k=3),data=temp,family=gaussian); summary(tpg)
model.sel(tp,tpg)
tpg.pred <- predict(tpg, se.fit=T)
tpg.pred <- data.frame(x=temp$fit,y=tpg.pred$fit,up=tpg.pred$fit+1.96*tpg.pred$se.fit
,down=tpg.pred$fit-1.96*tpg.pred$se.fit)
tpg.pred <- tpg.pred[with(tpg.pred,order(x)),]
# points(tpg.pred$x,tpg.pred$y,type="l",col="black",lwd=2,lty=2)
plot(temp$fit,temp$coral,pch=21,col="black",bg="grey",bty="l",xlab="",ylab="Coral Cover (%)",cex.axis=1.2,cex.lab=1.5,ylim=c(0,60),xlim=c(0,30),type="n")
# axis(1,labels=NA)
polygon(c(tpg.pred$x,rev(tpg.pred$x)),c(tpg.pred$up,rev(tpg.pred$down)),col=rgb(169,169,169,150,max=255),border=NA)
points(tpg.pred$x,tpg.pred$y,type="l",col="black",lwd=2)
# }
points(temp$fit,temp$coral,pch=21,col="black",bg="grey")
temp <- H.loc.mod[!is.na(H.loc.mod$macro),]
tp <-lm(temp$macro~temp$fit); summary(tp)
# ncvTest(tp); op <- par(mfrow=c(2,2),mar=c(4,4,2,1)); plot(tp); par(op) # non-constant error variance test
tpg <- gam(macro~s(fit,k=3),data=temp,family=gaussian); summary(tpg)
model.sel(tp,tpg)
tpg.pred <- predict(tpg, se.fit=T)
tpg.pred <- data.frame(x=temp$fit,y=tpg.pred$fit,up=tpg.pred$fit+1.96*tpg.pred$se.fit
,down=tpg.pred$fit-1.96*tpg.pred$se.fit)
tpg.pred <- tpg.pred[with(tpg.pred,order(x)),]
# points(tpg.pred$x,tpg.pred$y,type="l",col="black",lwd=2,lty=2)
plot(temp$fit,temp$macro,pch=21,col="black",bg="grey",bty="l",xlab="",ylab="Macroalgal Cover (%)",cex.axis=1.2,cex.lab=1.5,ylim=c(0,60),xlim=c(0,30),type="n")
# axis(1,labels=NA)
polygon(c(tpg.pred$x,rev(tpg.pred$x)),c(tpg.pred$up,rev(tpg.pred$down)),col=rgb(169,169,169,150,max=255),border=NA)
points(tpg.pred$x,tpg.pred$y,type="l",col="black",lwd=2)
# }
points(temp$fit,temp$macro,pch=21,col="black",bg="grey")
temp <- H.loc.mod[!is.na(H.loc.mod$macro),]
temp <- temp[!is.na(temp$coral),]
temp$cm <- log(temp$coral/temp$macro)
tp <-lm(temp$cm~temp$fit); summary(tp)
# ncvTest(tp); op <- par(mfrow=c(2,2),mar=c(4,4,2,1)); plot(tp); par(op) # non-constant error variance test
tpg <- gam(cm~s(fit,k=3),data=temp,family=gaussian); summary(tpg)
model.sel(tp,tpg)
tpg.pred <- predict(tpg, se.fit=T)
tpg.pred <- data.frame(x=temp$fit,y=tpg.pred$fit,up=tpg.pred$fit+1.96*tpg.pred$se.fit
,down=tpg.pred$fit-1.96*tpg.pred$se.fit)
tpg.pred <- tpg.pred[with(tpg.pred,order(x)),]
# points(tpg.pred$x,tpg.pred$y,type="l",col="black",lwd=2,lty=2)
plot(temp$fit,temp$cm,pch=21,col="black",bg="grey",bty="l",xlab="",ylab="log(Coral/Macroalgae)",cex.axis=1.2,cex.lab=1.5,xlim=c(0,30),type="n")
# axis(1,labels=NA)
polygon(c(tpg.pred$x,rev(tpg.pred$x)),c(tpg.pred$up,rev(tpg.pred$down)),col=rgb(169,169,169,150,max=255),border=NA)
points(tpg.pred$x,tpg.pred$y,type="l",col="black",lwd=2)
# }
points(temp$fit,temp$cm,pch=21,col="black",bg="grey")
mtext(expression("Total Herbivore Biomass"~~bgroup("(","g "*m^{-2},")")),side=1,outer=T,cex=1)
dev.off()
# SOM 1 & 3 -------------------------------------------------------------------
str(tot.bio.m)
temp <- tot.bio.m
temp$DatasetID <- as.character(temp$DatasetID); temp$DatasetID <- as.integer(temp$DatasetID)
meta.grp <- data.frame(
DatasetID = c(11L, 33L, 40L, 47L, 48L, 56L, 59L, 60L, 200L, 201L, 202L,
203L, 205L, 206L, 207L, 209L, 210L, 211L, 212L, 216L, 218L,
223L, 262L, 581L, 699L, 700L, 700L),
grp = as.character(c("1 Alan & Jim", "2 Alan CO", "3 NOAA LP", "5 UVI ",
"4 NOAA USVI", "6 Pete", "7 Marah", "7 Marah",
"8 AGRRA", "8 AGRRA", "8 AGRRA", "8 AGRRA", "8 AGRRA",
"8 AGRRA", "8 AGRRA", "8 AGRRA", "8 AGRRA", "8 AGRRA",
"8 AGRRA", "8 AGRRA", "8 AGRRA", "8 AGRRA",
"8 AGRRA", "7 Marah", "9 FL", "8 Waitt", "8 Waitt"))
)
temp <- left_join(temp,meta.grp,by='DatasetID')
str(temp)
temp %>% distinct(DatasetID,grp)
SOM1 <- temp %>% group_by(grp) %>% summarise('n'=length(tot.bio),'minYr'=min(Year),'maxYr'=max(Year))
sum(SOM1$n)
write.csv(SOM1, 'outputs/SOM1.csv',row.names=F)
som3 <- temp %>% group_by(Location) %>% summarise("n"=length((Replicate))) %>% ungroup()
temp <- temp %>% group_by(Location) %>% summarise("nDat"=length(unique(DatasetID))) %>% ungroup()
som3 <- full_join(som3,temp,by='Location')
write.csv(som3,'outputs/SOM3.csv',row.names = FALSE)
sum(som3$n)
# end ---------------------------------------------------------------------
Sys.time()-Start
|
c58c8911e87fdd11f8d0bd6cc20b7c91459ad4c8 | 86d782689ef2c4f1c5d943ef37dd95f2a124589f | /R/vignette_filter.R | 0215c266e8174ad2ad31316091d05a1f6bd6b735 | [
"MIT"
] | permissive | trevorkwan/Rmagine | 3c43ce2e485195c9419e06e9e5d9e51a7a04fb8c | 8d82b8a6777af46287776920743ef71e2f1867fa | refs/heads/master | 2022-12-11T06:05:17.189007 | 2020-03-26T22:30:49 | 2020-03-26T22:30:49 | 243,434,114 | 0 | 0 | NOASSERTION | 2020-02-27T04:55:00 | 2020-02-27T04:54:59 | null | UTF-8 | R | false | false | 2,877 | r | vignette_filter.R | # Created on February 28, 2020
# author: Brendon Campbell
# Implementation of vignette_filter function in the Rmagine package.
#' Apply a vignetting filter to a given image
#'
#' Saves the given image with the vignette filter applied at the specified strength.
#'
#' @param image_path string: The local file path for the image to which the filter will be applied.
#' @param strength double: parameter for the strength of the dimming effect. Default: 1.0.
#' @param return_file_name string: File name for the transformed image to be saved
#' @param dest_folder string: Destiname folder name for storing transformed images
#'
#' @return array: image array with the desired distortion applied.
#' @export
#'
#' @example
#' vignette_filter("imgs/pic.jpg", strength=2.5)
vignette_filter <- function(image_path, strength=1.0, return_file_name = "vignette.jpeg", dest_folder = "transformed_imgs/") {
if(!is.character(image_path)){
stop("Image file path must be a string.")
}
if (!endsWith(image_path, ".png") & !endsWith(image_path, ".jpeg") & !endsWith(image_path, ".jpg")){
stop("Image format must be png, jpg, or jpeg.")
}
if (startsWith(image_path, "http") | startsWith(image_path, "www")){
stop("Image file path can't be a URL, provide a local file path.")
}
if (strength <= 0){
stop("Vignette strength must be a positive value.")
}
if(!is.character(return_file_name)){
stop("Error: Output file name must be a string")
}
if(!endsWith(tolower(return_file_name), "jpg") & !endsWith(tolower(return_file_name), "png") & !endsWith(tolower(return_file_name), "jpeg")){
stop("Error: Path given must end with .png, .jpg, or .jpeg")
}
if(!is.character(dest_folder)){
stop("Error: Destination folder must be a string")
}
# read in image from path
image <- imager::load.image(image_path)
cols <- imager::width(image)
rows <- imager::height(image)
original_array <- as.array(image)
vignette_array <- as.array(image)
# calculate stdev of the gaussian based on strength parameter
sigma <- ((rows + cols)/((1+strength)/1))/2
# generate gaussian filters for each axis
filt_cols <- stats::dnorm(seq.int(1, cols), mean = cols/2, sd = sigma)
filt_rows <- stats::dnorm(seq.int(1, rows), mean = rows/2, sd = sigma)
# create and scale 2d vignette filter
filt_2d <- filt_rows %*% t(filt_cols)
filt_2d_df <- as.data.frame(filt_2d/max(filt_2d))
# apply vignette filter values to individual pixels
for (c in 1:3) {
for (x in 1:cols) {
for (y in 1:rows) {
vignette_array[x, y, 1, c] <- original_array[x, y, 1, c]*filt_2d_df[y, x]
}
}
}
vignette_image <- imager::as.cimg(vignette_array)
imager::save.image(vignette_image, file = paste0(dest_folder, return_file_name))
print("The filtered image has been saved to the specified directory.")
return(vignette_image)
}
|
8b82ec25d19a70dcb6b71a2cd264b0ad8c5906da | 237568b413ec0336a3715d8ca01b3b7ff3260a0f | /analysis/shiny-apps/players_moves_rgl/app.R | 3e81d292d3ca7116afbb98b96ebcbd94c34a4d40 | [] | no_license | tuubes/GameAnalysis-IUT | 2c02bd739dbc06dc49e69a273b1424521b102d90 | 1b550d17a3444c476209cea46cee34fa423fdf4c | refs/heads/master | 2018-09-30T19:02:50.733080 | 2018-06-15T10:54:14 | 2018-06-15T10:54:14 | 119,714,920 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,613 | r | app.R | #
# Shiny Web App - Déplacements des joueurs, visualisation 3D
# (c) Guillaume Raffin 2018
#
# Packages nécessaires : shiny, shinyjs, RJDBC, data.table, rgl
# Working directory : dossier de l'application, avec le fichier voxel_analysis.R et avec un dossier adjacent "db_data" contenant les BDD et h2.jar
# ATTENTION : il faut un serveur d'affichage fonctionnel avec les libs de développement 3D pour que l'application fonctionne sur le serveur Shiny
library(shiny)
library(shinyjs)
library(RJDBC)
library(data.table)
library(rgl)
source("../voxel_analysis.R")
# -- Récupération des données depuis la BDD H2 ---
print("Récupération des données depuis la BDD H2")
driverH2 <- sprintf("%s/../db_data/h2.jar", getwd())
dbBofas <- sprintf("%s/../db_data/database_bofas", getwd())
dbJulien <- sprintf("%s/../db_data/database_julien", getwd())
drv <- JDBC(driverClass = "org.h2.Driver", classPath = driverH2, identifier.quote="`")
connBofas <- dbConnect(drv, paste("jdbc:h2:", dbBofas, sep=""), "", "")
connJulien <- dbConnect(drv, paste("jdbc:h2:", dbJulien, sep=""), "", "")
dbGetQuery(connBofas, "SHOW TABLES")
dbGetQuery(connJulien, "SHOW TABLES")
# X,Y,Z pour tout le serveur, 1 XYZ par ligne
sqlAllMoves <- "SELECT ChunkX X, ChunkY Y, ChunkZ Z FROM PlayerMoves"
# X,Y,Z pour tout le serveur, chaque XYZ apparait une seule fois, avec son nombre d'occurences N:
sqlAllMovesFreq <- "SELECT ChunkX X, ChunkY Y, ChunkZ Z, count(*) N FROM PlayerMoves GROUP BY X,Y,Z ORDER BY N DESC"
dataBofas <- data.table(dbGetQuery(connBofas, sqlAllMovesFreq))
dataJulien <- data.table(dbGetQuery(connJulien, sqlAllMovesFreq))
dataBofasLog <- dataBofas
dataJulienLog <- dataJulien
dataBofasLog[, N := log10(N)+1]
dataJulienLog[, N := log10(N)+1]
dbDisconnect(connBofas)
dbDisconnect(connJulien)
# -- Interface Shiny --
ui <- fluidPage(
# ShinyJS permet d'améliorer l'expérience Shiny, notamment en activant et désactivant les éléments
useShinyjs(),
titlePanel("Déplacements des joueurs : Visualisation 3D"),
sidebarLayout(
sidebarPanel(
selectInput("serverChoice",
label="Choisissez le serveur de jeu",
choices = list("BOFAS", "Julien"),
selected = "Julien"),
br(),
checkboxInput("logScale",
"Échelle logarithmique",
value = TRUE),
checkboxInput("spaceY",
"Espacer les couches Y",
value = TRUE),
helpText(
"La génération du modèle 3D peut prendre jusqu'à 30 secondes, merci de bien vouloir patienter."
),
helpText(
"La vue initiale est une vue de dessus. Utilisez la souris pour vous déplacer."
)
),
# Show a plot of the generated distribution
mainPanel(rglwidgetOutput("amazing3D", width="100%"))
)
)
# -- Serveur Shiny --
# Cache des visualisation 3D
# BOFAS, logScale=FALSE, spaceY=FALSE -> 1
# BOFAS,logScale=FALSE, spaceY=TRUE -> 2
# BOFAS,logScale=TRUE, spaceY=FALSE -> 3
# BOFAS,logScale=TRUE, spaceY=TRUE -> 4
# Julien, logScale=FALSE, spaceY=FALSE -> 5
# Julien,logScale=FALSE, spaceY=TRUE -> 6
# Julien,logScale=TRUE, spaceY=FALSE -> 7
# Julien,logScale=TRUE, spaceY=TRUE -> 8
glCache <- vector(mode="list", length=8)
server <- function(input, output) {
save <- options(rgl.inShiny = TRUE)
on.exit(options(save))
cachedScene <- reactive({
if(!input$logScale && !input$spaceY) {
cacheIdx <- 1
} else if(!input$logScale && input$spaceY) {
cacheIdx <- 2
} else if(input$logScale && !input$spaceY) {
cacheIdx <- 3
} else {
cacheIdx <- 4
}
if(input$serverChoice == "BOFAS") {
if(input$logScale) {
data <- dataBofasLog
} else {
data <- dataBofas
}
} else {
if(input$logScale) {
data <- dataJulienLog
} else {
data <- dataJulien
}
cacheIdx <- cacheIdx + 4
}
if(input$spaceY) {
zFactor <- 3
} else {
zFactor <- 1
}
if(input$logScale) {
subtitle <- "Couleurs en fonction de log10(N)"
} else {
subtitle <- "Couleurs en fonction de N"
}
print(paste("cacheIdx:", cacheIdx))
print(glCache)
scene <- glCache[[cacheIdx]]
if(is.null(scene)) {
# Barre de progression
progress <- shiny::Progress$new()
on.exit(progress$close())
nbSteps <- nrow(data) + 1
progress$set(message="Génération des cubes colorés...", value=0)
# Désactivation des inputs pour éviter les bugs
shinyjs::disable("serverChoice")
shinyjs::disable("logScale")
shinyjs::disable("spaceY")
# Génération 3D et mise à jour de la progression
rgl3D(data, xi=1, zi=2, yi=3, zf=zFactor, colorFunction=getColor,
xlab="X", ylab="Z", zlab="Y", subtitle=subtitle,
title="Déplacements des joueurs", notifyProgressFunction = function(x,y,z) {
progress$inc(1/nbSteps, detail=sprintf("Tronçon (%i, %i, %i)", x, y, z))
})
progress$inc(0, message="Enregistrement des données...", detail="")
scene <- scene3d()
rgl.close()
glCache[[cacheIdx]] <<- scene # <<- permet de modifier la variable dans l'environnement parent au lieu de modifier la référence locale
shinyjs::enable("serverChoice")
shinyjs::enable("logScale")
shinyjs::enable("spaceY")
progress$inc(1/nbSteps)
}
return(scene)
})
output$amazing3D <- renderRglwidget({
rglwidget(x=cachedScene())
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
8bad5e2334d49919237cbbf44f22dc86cc25c601 | e13aa8574d9ae6d08fc8a4cc75607fccaa38b71a | /HW10/hw10_importance_sampling.R | 317fc3d4d30d6dd1092dd776de5d60dedb4a365c | [] | no_license | Dongzhikang/GR5206-Intro-to-ds-hw | 7c62e0542231b4d17b942e174338c2d0369bddda | 4bf3a1656ac3c206019adaf3e3f6b9b224c6e22c | refs/heads/master | 2020-09-16T11:33:35.060026 | 2019-12-02T16:51:13 | 2019-12-02T16:51:13 | 223,756,992 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,701 | r | hw10_importance_sampling.R | # HW10: Importance sampling in Monte Carlo integration
#
# In this exercise, we will go over a simple application of monte carlo
# estimation of an integral. We will estimate the standard normal CDF
# between 1.5 and 2.5, namely `P(x > 1.5 & x < 2.5)` where `x ~ norm(0, 1)`.
# We can't derive the integral manually, thus we will use monte carlo estimation.
# The idea of Importance Sampling is to sample from a second distribution that
# will generate most of the samples in the desired interval, to have a fast
# convergence and find an appropriate acceptance rate to get the correct
# distribution approximation.
#
# 1. Create a function `in_set <- function(x, minx, maxx)` that takes as input
# a numeric vector `x` and the limit `minx` and `maxx` and returns a vector
# of `1` or `0` depending on whether the corresponding element of `x` falls
# into the interval `[minx, maxx]` os not.
# (It returns numeric `0` if not in this interval and `1` otherwise)
# Example : `in_set(c(1, 2), 2, 2.5)` is `c(0, 1)`.
# Create a function `norm_density <- function(x)` that takes as input
# one scalar `x` and returns the standard normal density of that value.
# Example : `norm_density(2) = 0.05399097`.
# Create a function `g <- function(x)` that takes as input
# one scalar `x` and returns `in_set(x, 1.5, 2.5) * norm_density(x)`.
# Example : `g(2) = 0.05399097`.
## Do not modify this line!
in_set <- function(x, minx, maxx) ifelse(between(x, minx, maxx),1,0)
norm_density <- function(x) dnorm(x)
g <- function(x) in_set(x, 1.5, 2.5)*norm_density(x)
# 2. Load the `tidyverse`, `tibble` and `ggplot2` packages.
# Use `set.seed()` to set seed to `0` and save it into `seed1` using
# `.Random.seed`. Use samples from standard normal to
# estimate the cumulative distribution function between 1.5 and 2.5.
# Create a tibble called `data` with three columns:
# - `x` are 1000 samples generated randomly from standard normal using `rnorm`
# - `y`, that is equal to 1 if `x` is in `[1.5, 2.5]` and 0 otherwise.
# (you can use `in_set()`)
# - `z` the density evaluated at point `x`. (you can use `norm_density`)
# - inside `labs`, set `x` to "X", `y` to `"Density Function"` and `title`
# to `"Standard Normal density"`, `subtitle` to
# `"Integral interval shown in red"`.
# `data` should print to:
# # A tibble: 1,000 x 3
# x y z
# <dbl> <int> <dbl>
# 1 1.26 0 0.180
# 2 -0.326 0 0.378
# 3 1.33 0 0.165
# 4 1.27 0 0.178
# 5 0.415 0 0.366
# 6 -1.54 0 0.122
# 7 -0.929 0 0.259
# 8 -0.295 0 0.382
# 9 -0.00577 0 0.399
# 10 2.40 1 0.0221
# # … with 990 more rows
# Estimate the integral value by calculating the mean of `data$y` and
# save the result to `cdf_estimate`. Save the standard deviation of
# `data$y` to `sd_estimate`.
# (As we are using the same distribution as the function,
# the integral is an approximation of `P(1.5 < x < 2.5))`
# Calculate the true value by using difference of `pnorm()` at 2.5 and 1.5
# and save it into `cdf_gold`. Compare our estimated `cdf_estimate` and
# `cdf_gold`. We can notice that the variance of the estimate `sd_estimate`
# is pretty high.
## Do not modify this line!
set.seed(0)
seed1 <- .Random.seed
data <- tibble(x=rnorm(1000), y = in_set(x, 1.5, 2.5), z = norm_density(x))
cdf_estimate <- mean(data$y)
sd_estimate <- sqrt(var(data$y))
cdf_gold <- pnorm(2.5)-pnorm(1.5)
# 3. Plot the density function of standard normal to explore why this would happen.
# To plot the figure :
# - use `ggplot()` to initialize the ggplot object on `data`.
# - use `geom_line()` with `mapping` set to `aes(x = x, y = z)` to draw
# the full normal plot.
# - use `geom_line()` with `data` `filter()` by `y == 1`, with `mapping`
# set to `aes(x = x, y = z)` and `color` set to `red` to highlight the
# area we want to take integral of.
# - inside `labs()`, set `x` to `"X"`, `y` to `"Density Function"`,
# `title` to `"Standard Normal density"` and `subtitle` to
# `"Integral interval shown in red"`.
# - use `theme_light()`.
# Save the plot to `norm_plot`.
# Then we can see the reason why the variance is high when sampling from
# `norm(0, 1)` : we have relatively low probability to get samples
# within `(1.5, 2.5)` range.
# Thus, the value tend to vary a lot (and it has high standard deviation).
# We should use another distribution which has a higher concentration over
# the range `(1.5, 2.5)`.
## Do not modify this line!
norm_plot <- ggplot(data)+
geom_line(aes(x,z))+
geom_line(data = data%>%filter(y==1),mapping = aes(x, z),color="red")+
labs(x="X",
y="Density Function",
title = "Standard Normal density",
subtitle = "Integral interval shown in red")+
theme_light()
norm_plot
# 4. Now, we will explore this effect by using three different distributions
# and use Importance Sampling to estimate the integral.
# - set `n` to 1e4.
# - Use `set.seed()` to set seed to `0` and save seed to `seed2` using
# `.Random.seed`.
# - Generate `n` samples from `uniform(1.5, 2.5)` and save it to `uniform`.
# - Use `set.seed()` to set seed to `0` and save seed to `seed3` using
# `.Random.seed`.
# - Generate `n` samples from `normal(0, 1)` and save them into `original`.
# - Generate tibble `fit` using `tribble()`, inside which:
# - set three formula `~x` to represent our samples, `~name` to
# record the distribution, `~g` to caculate the corresponding density
# value.
# - then add these rows by specifying the values:
# `uniform`, `"uniform"`, `dunif(uniform, 1.5, 2.5)`,
# `original`, `"original"`, `dnorm(original, 0, 1)`
# - Use `mutate` to create column `g_over_f` using `map2()` and `g()` on
# columns `x` and `f`.
# Save the result into `fit`. It should print to :
# # A tibble: 2 x 4
# x name f g_over_f
# <list> <chr> <list> <list>
# 1 <dbl [10,000]> uniform <dbl [1]> <dbl [10,000]>
# 2 <dbl [10,000]> original <dbl [10,000]> <dbl [10,000]> 3 <dbl [1,… uniform(1.… <dbl [1,… <int [1,… <dbl [1… <dbl [1… <dbl [1… 3 <dbl [1,000]> uniform(1.5, 2.… <dbl [1,000… <int [1,000… <dbl [1,000… <dbl [1,000… <dbl [1,000…
## Do not modify this line!
n <- 1e4
set.seed(0)
seed2 <- .Random.seed
uniform <- runif(n, 1.5, 2.5)
set.seed(0)
seed3 <- .Random.seed
original <- rnorm(n)
fit <- tribble(~x,~name,~f,
uniform, "uniform", 1,
original, "original", dnorm(original, 0, 1))%>%
mutate(g_over_f = map2(x,f,function(x,y) g(x)/y))
fit
# 5. Calculate the expectation of `f(x)/g(x)` over distribution `g` by calculating
# the mean of column `z` for all of our samples.
# To do that, use `transmute()` to create new columns:
# - use `mean()` and `map_dbl()` to generate
# column `mean` recording the estimated integral under each distribution,
# and `sd() / sqrt(n)` and `map_dbl()` to calculate column `se` which
# is the variance of `f(x)/g(x)` under each set of samples.
# - create column `upper` by setting it to `mean + 1.96 * se` and `lower`
# by setting to `mean - 1.96 * se`.
# Save the result tibble into `result` and it should print to:
# # A tibble: 2 x 4
# mean se lower upper
# <dbl> <dbl> <dbl> <dbl>
# 1 0.0607 0.000326 0.0600 0.0613
# 2 0.0613 0.00240 0.0566 0.0660#'
## Do not modify this line!
result <- fit%>%
transmute(mean = map_dbl(g_over_f,mean),
se = map_dbl(g_over_f,~sd(.x)/sqrt(n)))%>%
mutate(lower = mean-se*1.96,
upper = mean+se*1.96)
result
# 6. We will notice that for sample distribution from `uniform(1.5, 2.5)`,
# we have smaller variance and closer estimate.
# To explore the effect of different uniform intervals on estimation variance,
# we will calculate the estimation for different uniform intervals centered
# around 2.
# - generate a sequence of possible uniform interval `width` using `seq()`
# ranging from 0.1 to 3 with interval 1e-2.
# - create a function `generate_sample <- function(w, seed = 0, n = 1e4)`,
# in which `w` represents the uniform interval width, `seed` represents the
# seed number, and `n` represents the sample size.
# Inside the function:
# - first set the seed to `seed` by `set.seed()`.
# - return `n` samples using `runif()` and set `min` to `2 - w/2`, `max` to
# `2 + w/2`. (The function will return the corresponding samples from the
# uniform distribution specified by width `w`).
# - create tibble `subsamples`, in which each row represents a different sample
# size:
# - use `tribble()`, set three formula `~width` to represent our sample size,
# `~uniform` to record the uniform samples.
# - then specify each row by `width` and its corresponding samples by `map`
# and `generate_sample`.
# - `unnest()` the tibble by `c(width, samples)`.
# - use `mutate()` and `map2()` to calculate weighted sample values `g_over_f`
# for each interval `width` and corresponding `samples` by customising
# `function(width, sample) g(sample) / dunif(sample, 2 - width/2, 2 + with/2)`
# - calculate the estimation result by `transmute()` to create new columns:
# - use `mean()` and `map_dbl()` to generate column `mean` recording the
# estimated integral under each distribution, and `sd() / sqrt(n)` and
# `map_dbl()` to calculate column `se` which
# is the variance of `f(x)/g(x)` under each set of samples.
# - create column `upper` by setting it to `mean + 1.96 * se` and `lower`
# by setting to `mean - 1.96 * se`.
# Save the tibble to `result2`. it should print to:
# # A tibble: 201 x 4
# mean se lower upper
# <dbl> <dbl> <dbl> <dbl>
# 1 0.0607 0.000326 0.0600 0.0613
# 2 0.0606 0.000333 0.0599 0.0612
# 3 0.0606 0.000341 0.0599 0.0613
# 4 0.0605 0.000348 0.0598 0.0612
# 5 0.0604 0.000354 0.0597 0.0611
# 6 0.0604 0.000361 0.0597 0.0611
# 7 0.0604 0.000368 0.0597 0.0611
# 8 0.0604 0.000374 0.0597 0.0611
# 9 0.0603 0.000380 0.0596 0.0611
# 10 0.0604 0.000386 0.0596 0.0612
# # … with 191 more rows
## Do not modify this line!
width <- seq(1,3,by=1e-2)
generate_sample <- function(w, seed = 0, n = 1e4){
set.seed(seed)
runif(n, 2-w/2,2+w/2)
}
subsamples <- tribble(~width,~samples,
width, map(width,generate_sample))%>%
unnest(c(width,samples))%>%
mutate(g_over_f = map2(width,samples,function(width, sample) g(sample) / dunif(sample, 2 - width/2, 2 + width/2)))
result2 <- subsamples%>%
transmute(mean = map_dbl(g_over_f,mean),
se = map_dbl(g_over_f,~sd(.x)/sqrt(n)))%>%
mutate(lower = mean-1.96*se,
upper= mean+1.96*se)
# 7. Next, we can visualize the variance trend as the interval width changes.
# - use `ggplot` to initialize the plot object over `result2`. Set `mapping`
# to `aes(y = mean, x = width)`.
# - use `geom_line` to plot variance curves.
# - use `geom_ribbon()` to draw standard deviation shade to the plot,
# set `mapping` to `aes(ymin = lower, ymax = upper)`, set `alpha`
# to 0.2 and `fill` to `"orange"`.
# - add gold line by `geom_line` with `mapping` set to
# `aes(y = cdf_gold, x = width)` and `color` set to "red".
# - use `scale_x_reverse()` to reverse the axis.
# - inside `labs`, set `title` to `"MC estimate for different uniform interval"`,
# set `subtitle` to `"Variance decreases with the interval width"`,
# `x` to `"Interval Width"` and `y` to `"MC Estimate"`.
# - use `theme_light()`.
# Save the plot into `variance_plot`.
## Do not modify this line!
variance_plot <- ggplot(result2)+
geom_line(aes(y=mean,x=width))+
geom_ribbon(aes(x=width,ymin=lower,ymax=upper),alpha=.2,fill='orange')+
geom_line(aes(y=cdf_gold,x=width),color="red")+
scale_x_reverse()+
labs(title = "MC estimate for different uniform interval",
subtitle = "Variance decreases with the interval width",
x="Interval Width",
y = "MC Estimate")+
theme_light()
variance_plot
|
9db5c6989fb0cd0b0d1df9984296d6cd56d19608 | 7801aeb2ddf556153ade35ac2bbed507cefc869f | /wetlands/assess_wetlands_and_ponds.r | 38789b2c45168b60231af71e6a1dc112624790e2 | [] | no_license | dnrwaterqualitymodeling/wisconsinRiverTMDL | af2b66c09be2078efb8c128624afb418170c0992 | 6c4eccf875f154bb69a95291940d1e16234e1030 | refs/heads/master | 2021-01-21T18:05:29.104788 | 2016-06-29T17:50:46 | 2016-06-29T17:50:46 | 23,920,607 | 4 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,006 | r | assess_wetlands_and_ponds.r | wd = "H:/WRB/Scenarios/Default/TxtInOut"
files_pnds = list.files(wd, "*.pnd")
dat = data.frame()
for (fl in files_pnds){
lnes = readLines(paste(wd, fl, sep="/"))
sb = gsub("0", "", substr(fl, 1, 5))
print(paste("working on Subabsin", sb))
PND_FR = substr(lnes[3], 9, 16)
PND_PSA = substr(lnes[4], 9, 16)
PND_PVOL = substr(lnes[5], 9, 16)
PND_ESA = substr(lnes[6], 9, 16)
PND_EVOL = substr(lnes[7], 9, 16)
WET_FR = substr(lnes[29], 9, 16)
WET_NSA = substr(lnes[30], 9, 16)
WET_NVOL = substr(lnes[31], 9, 16)
WET_MXSA = substr(lnes[32], 9, 16)
WET_MXVOL = substr(lnes[33], 9, 16)
rw = c(sb,
PND_FR,
PND_PSA,
PND_PVOL,
PND_ESA,
PND_EVOL,
WET_FR,
WET_NSA,
WET_NVOL,
WET_MXSA,
WET_MXVOL)
dat = rbind(dat, as.numeric(rw))
}
names(dat) = c("Subbasin",
"PND_FR",
"PND_PSA",
"PND_PVOL",
"PND_ESA",
"PND_EVOL",
"WET_FR",
"WET_NSA",
"WET_NVOL",
"WET_MXSA",
"WET_MXVOL")
# looks good...
plot(PND_FR ~ WET_FR, data=dat)
#####
hist(dat$PND_PVOL/dat$PND_PSA)
dat[which((dat$PND_PVOL/dat$PND_PSA)>50),]
hist(dat$PND_EVOL/dat$PND_ESA)
dat[which((dat$PND_EVOL/dat$PND_ESA)>75),]
hist(dat$WET_NVOL/dat$WET_NSA)
dat[which((dat$WET_NVOL/dat$WET_NSA)>10),]
### There are some anomalus, very high values of max volume.
### Got it: ponds were not masking sinks in wet script.
hist(dat$WET_MXVOL/dat$WET_MXSA)
dat[which((dat$WET_MXVOL/dat$WET_MXSA)>10),]
### how many have a larger normal than max? a few
dat[which(dat$PND_PVOL > dat$PND_EVOL),c('Subbasin', 'PND_PVOL', 'PND_EVOL')]
#### Issue here: why do a couple of these subs have greater emergency than principle surface areas?
dat[which(dat$PND_PSA > dat$PND_ESA),c('Subbasin', 'PND_PSA', 'PND_ESA')]
dat[which(dat$WET_NVOL > dat$WET_MXVOL),c('Subbasin', 'WET_NSA', 'WET_MXSA')]
dat[which(dat$WET_NSA > dat$WET_MXSA),c('Subbasin', 'WET_NVOL', 'WET_MXVOL')]
### How many have a calc'd volume even though fraction = 0
dat[which(dat$PND_PVOL > 0 & dat$PND_FR == 0),]
dat[which(dat$WET_NVOL > 0 & dat$WET_FR == 0),]
|
a9a7c2f85b00c888ccd38190b136eed6194da656 | ada38fccd871bbc52589c2c97b86a6d5c9dfd4ff | /scripts/6_figures7-8.R | fdbf0daf574d153ee3b60afdcf932ce378003c1a | [] | no_license | wrahool/twitter-landscape-old | b2412433c168108c4536440bc142a3540b58f2e3 | ca0228d9e32462b4cca034cf458be289dc7a29b4 | refs/heads/master | 2023-05-06T18:45:58.292479 | 2021-05-21T09:10:57 | 2021-05-21T09:10:57 | 237,348,870 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,072 | r | 6_figures7-8.R | #wt anlaysis
library(ggplot2)
library(plyr)
library(dplyr)
library(gridExtra)
library(moments)
setwd("C:/Users/Subhayan/Documents/Work/twitter-landscape/")
elite_df = read.csv("data/weak_elite_ideologies.csv", as.is = T)
elite_df[is.na(elite_df$ideology),]$ideology = 0
elite_df[elite_df$ideology == -Inf,]$ideology = -3
elite_df[elite_df$ideology == Inf,]$ideology = 3
names(elite_df)[2] = "ideology"
elite_freq = read.csv("data/elites_activity.csv", as.is = T)
names(elite_freq)[1] = "username"
elite_freq$numberoftweets_scaled = (elite_freq$numberoftweets - min(elite_freq$numberoftweets))/(max(elite_freq$numberoftweets) - min(elite_freq$numberoftweets))
elite_df$username = tolower(elite_df$username)
elite_freq_df = merge(elite_df, elite_freq)
elite_freq_df$corrected_ideology = elite_freq_df$ideology * elite_freq_df$numberoftweets_scaled
elite_followers_count = read.csv("data/elite_followers_count.csv")
load("data/walktrap_results.Rdata")
comm_membership <- data.frame(cbind(wt$names, wt$membership))
names(comm_membership) <- c("username", "community")
comm_membership$username <- tolower(as.character(comm_membership$username))
library(tidyverse)
elite_master_tbl <- elite_freq_df %>%
inner_join(comm_membership) %>%
inner_join(elite_followers_count)
median1 <- ddply(elite_master_tbl, "community", summarise, grp.median=median(ideology, na.rm = T))
median2 <- ddply(elite_master_tbl, "community", summarise, grp.median.w=median(corrected_ideology, na.rm = T))
elite_master_tbl %>% inner_join(median1) -> elite_master_tbl
elite_master_tbl %>% inner_join(median2) -> elite_master_tbl
median.tbl <- elite_master_tbl %>%
group_by(community) %>%
mutate(median.id = median(ideology))
p1 <- ggplot(elite_master_tbl, aes(ideology)) +
geom_density(fill = "snow3", alpha = 0.6, colour="snow3") +
facet_wrap(~community, nrow = 5, scales = "free") +
xlim(-3, 3) +
geom_vline(aes(xintercept=grp.median, colour = "red")) +
geom_vline(xintercept=0, linetype = "dashed") +
theme(
strip.background = element_blank(),
strip.text.x = element_blank()
) +
theme_bw() +
theme(legend.position = "none")
p2 = ggplot(elite_master_tbl, aes(corrected_ideology)) +
geom_density(fill = "snow3", alpha = 0.6, colour="snow3") +
facet_wrap(~community, nrow = 5, scales = "free") +
xlim(-3, 3) +
geom_vline(aes(xintercept=grp.median.w), color = "red") +
geom_vline(xintercept=0, linetype = "dashed") +
theme(
strip.background = element_blank(),
strip.text.x = element_blank()
) +
theme_bw() +
theme(legend.position = "none")
# figure 7
grid.arrange(p1, p2, nrow = 1)
comm_stats <- NULL
for(c in 1:max(wt$membership)) {
c_ideologies <- elite_master_tbl %>%
filter(community %in% c) %>%
pull(ideology)
c_corrected_ideologies <- elite_master_tbl %>%
filter(community %in% c) %>%
pull(corrected_ideology)
c_tweetcount <- elite_master_tbl %>%
filter(community %in% c) %>%
pull(numberoftweets)
c_followers <- elite_master_tbl %>%
filter(community %in% c) %>%
pull(followers)
c_elite_count <- elite_master_tbl %>%
filter(community %in% c) %>%
pull(username)
c_i_mean = mean(c_ideologies)
c_i_sd = sd(c_ideologies)
c_i_median = median(c_ideologies)
c_i_skewness = skewness(c_ideologies)
c_i_kurtosis = kurtosis(c_ideologies)
c_ci_mean = mean(c_corrected_ideologies)
c_ci_sd = sd(c_corrected_ideologies)
c_ci_median = median(c_corrected_ideologies)
c_ci_skewness = skewness(c_corrected_ideologies)
c_ci_kurtosis = kurtosis(c_corrected_ideologies)
total_tweets = sum(c_tweetcount)
total_followers = sum(c_followers)
elite_count = length(c_elite_count)
c_row = c(c,
c_i_mean, c_i_sd, c_i_median, c_i_skewness, c_i_kurtosis,
c_ci_mean, c_ci_sd, c_ci_median, c_ci_skewness, c_ci_kurtosis,
total_tweets, total_followers, elite_count)
comm_stats <- rbind(comm_stats, c_row)
}
comm_stats <- data.frame(comm_stats, row.names = NULL)
names(comm_stats) = c("community",
"mean_id", "sd_id", "median_id", "skewness_id", "kurtosis_id",
"mean_cid", "sd_cid", "median_cid", "skewness_cid", "kurtosis_cid",
"total_tweets", "total_followers", "elite_count")
library(ggrepel)
#figure 8
ggplot(comm_stats, aes(x=total_followers, y=total_tweets)) +
geom_point(color = "black", shape = 21, aes(fill = comm_stats$median_id,size=comm_stats$elite_count)) +
geom_text_repel(aes(label = comm_stats$community),
size = 4) +
scale_size_continuous(range = c(4, 11.4)) +
scale_fill_gradient(low = "white", high = "salmon") +
theme_bw() +
theme(legend.position="none")
ggplot(comm_stats, aes(x=total_followers, y=total_tweets)) +
geom_point(color = "black", shape = 21, aes(fill = comm_stats$median_id,size=comm_stats$elite_count)) +
scale_size_continuous(range = c(4, 11.4)) +
scale_fill_gradient(low = "white", high = "salmon") +
theme_bw() +
theme(legend.position="none")
comm_stats
|
dc35ca27198344bb5f7f18379415013437f453b1 | dbd38ce158841d9d94984629a70651d813cbdef8 | /inst/doc/vignette.R | 700fa32eaeafee2bae1d73657f430708dbb1a63f | [] | no_license | gaospecial/RVenn | ebf49636f11aa8ab804a62966f66f622f6cf8cd2 | 13841159034d84a58a8eecfbb12c9778ce0de3ef | refs/heads/master | 2022-01-16T18:54:46.219924 | 2019-07-18T20:40:02 | 2019-07-18T20:40:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,827 | r | vignette.R | ## ----setup, include = FALSE----------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- message=FALSE------------------------------------------------------
library(purrr)
library(RVenn)
library(ggplot2)
## ------------------------------------------------------------------------
set.seed(42)
toy = map(sample(5:25, replace = TRUE, size = 10),
function(x) sample(letters, size = x))
toy[1:3] # First 3 of the sets.
## ------------------------------------------------------------------------
toy = Venn(toy)
## ------------------------------------------------------------------------
overlap(toy)
## ------------------------------------------------------------------------
overlap(toy, c("Set_1", "Set_2", "Set_5", "Set_8"))
## ------------------------------------------------------------------------
overlap(toy, c(1, 2, 5, 8))
## ------------------------------------------------------------------------
overlap_pairs(toy, slice = 1:4)
## ------------------------------------------------------------------------
unite(toy)
## ------------------------------------------------------------------------
unite(toy, c("Set_3", "Set_8"))
## ------------------------------------------------------------------------
unite(toy, c(3, 8))
## ------------------------------------------------------------------------
unite_pairs(toy, slice = 1:4)
## ------------------------------------------------------------------------
discern(toy, 1, 8)
## ------------------------------------------------------------------------
discern(toy, "Set_1", "Set_8")
## ------------------------------------------------------------------------
discern(toy, c(3, 4), c(7, 8))
## ------------------------------------------------------------------------
discern_pairs(toy, slice = 1:4)
## ---- fig.height=5, fig.width=8, fig.retina=3----------------------------
ggvenn(toy, slice = c(1, 5))
## ---- fig.height=8, fig.width=8, fig.retina=3----------------------------
ggvenn(toy, slice = c(3, 6, 8))
## ---- fig.height=8, fig.width=8, fig.retina=3----------------------------
setmap(toy)
## ---- fig.height=8, fig.width=8, fig.retina=3----------------------------
setmap(toy, element_clustering = FALSE, set_clustering = FALSE)
## ---- fig.width=8, fig.height=5, fig.retina=3----------------------------
er = enrichment_test(toy, 6, 7)
er$Significance
qplot(er$Overlap_Counts, geom = "blank") +
geom_histogram(fill = "lemonchiffon4", bins = 8, color = "black") +
geom_vline(xintercept = length(overlap(toy, c(6, 7))), color = "firebrick2",
size = 2, linetype = "dashed", alpha = 0.7) +
ggtitle("Null Distribution") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_continuous(name = "Overlap Counts") +
scale_y_continuous(name = "Frequency")
|
77733c09ca09b265c12bc18e5351e3217318fa41 | 0ddf106e8771bf738e16cd544b133ca4aa8f9800 | /run_analysis.R | 9ae824d73d44efb6dfd13441678af9924a37059c | [] | no_license | dcudel/GettingAndCleaningData | 9e011d40372c066a93db00c0fcfabaa441ecb8c0 | 7d1c84e603a55c08175c83ae0df5521bd2f4a2ee | refs/heads/master | 2020-12-25T18:16:58.450200 | 2014-07-27T15:43:03 | 2014-07-27T15:43:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,114 | r | run_analysis.R | featurescount = 561
##------------------ Part 1 : Merge training & test dataset -------------------
## Load the activity label dataset and give meaningful name to columns
activities <- read.csv(".//UCI HAR Dataset//activity_labels.txt",
sep = " ",
header = FALSE,
col.names = c("activityid", "activitylabel"))
## Load the subject id dataset
subjecttrain <- read.csv(".//UCI HAR Dataset/train/subject_train.txt",
header = FALSE,
col.names ="subjectid",
colClasses = "factor")
subjecttest <- read.csv(".//UCI HAR Dataset//test//subject_test.txt",
header = FALSE,
col.names ="subjectid",
colClasses = "factor")
subjecttotal <- rbind(subjecttrain, subjecttest)
## Load features' descriptions
featuresnames <- read.csv(".//UCI HAR Dataset//features.txt",
sep =" ",
header = FALSE,
stringsAsFactors = FALSE)[, 2]
## Get a logical vector of the features to be kept
meanstandardfeatures = featuresnames[grepl(".*(mean|std).*", featuresnames)]
idxSelectedFeatures <- featuresnames %in% meanstandardfeatures
## Create a list of columns to be selected while invoking read.fwf.
## Features are 16 characters wide splitted in 1 whitespace character to be
## removed + 15 characters that must be selected if belonging to a mean/std
## columns and removed otherwise. Thus we generate a sequence of 561 occurrences
## of the folowing pattern: (-1, 15) for a colnumn to be retain in the final
## result set. (-1, -15) for a column to be excluded from the final result set.
widthsColumns <- rep(c(-1, 15), featurescount)
dim(widthsColumns) <- c(2, featurescount)
widthsColumns[2, ] <- widthsColumns[2, ]*ifelse(idxSelectedFeatures, 1, -1)
dim(widthsColumns) = 2*featurescount
## Load the training dataset by leveraging the previously built vector of
## column to be kept
xtrain <- read.fwf(".//UCI HAR Dataset//train//X_train.txt",
widths = widthsColumns,
col.names = meanstandardfeatures,
header = FALSE,
comment.char = "",
n = 11500)
xtest <- read.fwf(".//UCI HAR Dataset//test//X_test.txt",
widths = widthsColumns,
col.names = meanstandardfeatures,
header = FALSE,
comment.char = "",
n = 3000)
xtotal <- rbind(xtrain, xtest)
## Read the ativity id's data set and give meaningful names to columns
ytrain <- read.csv(".//UCI HAR Dataset//train//y_train.txt",
header = FALSE,
col.names = "activityid")
ytest <- read.csv(".//UCI HAR Dataset//test//y_test.txt",
header = FALSE,
col.names = "activityid")
ytotal <- rbind(ytrain, ytest)
## Merge 'activity id' dataset with the 'activity label' dataset
ytotal <- data.frame(activitylabel = merge(ytotal,
activities,
by = "activityid")[,2])
## Produce the final dataset
data <- cbind(subjecttotal, ytotal, xtotal)
## Clean up memory
rm(list=c("subjecttest", "subjecttrain", "subjecttotal",
"xtest", "xtrain", "xtotal",
"ytest", "ytrain", "ytotal",
"activities", "featuresnames", "idxSelectedFeatures",
"widthsColumns", "meanstandardfeatures", "featurescount"))
##-- Part 2 : For each subjectid/activitylabel tuple calculate mean of all features --
## For all numeric columns in the dataset except the first two columns (subjectid/activitylabel)
## calculate the mean of the column
datacumul <- aggregate(data[,3:dim(data)[2]],
by = list(subjectid = data$subjectid, activitylabel = data$activitylabel),
mean)
write.csv(data, file = ".//TidyDataset.csv", row.names = FALSE)
write.csv(datacumul, file = ".//TidyAggregatedDataset.csv", row.names = FALSE)
|
010ea64e3b014849e9237aec1d5d7917f3b5e688 | c3f2f0b46e9b237533bd4c56b08f3c9e6a711def | /plot1-R.R | 4daffb3920aea93db3738f0c06fa648f9e2b5a87 | [] | no_license | kuhsibiris/ExData_Plotting1 | efeb244dfdb1ab0fc4cb00037137e5e54c354968 | cc0d6cd817a03a67bb72892504aa2ef8a5fc82e9 | refs/heads/master | 2021-01-11T09:47:22.636094 | 2016-12-29T03:59:59 | 2016-12-29T03:59:59 | 77,489,307 | 0 | 0 | null | 2016-12-27T23:48:21 | 2016-12-27T23:48:20 | null | UTF-8 | R | false | false | 740 | r | plot1-R.R | # This script makes the first graph plot1.png
#start with an empty space
rm(list=ls())
setwd("/home/andres/exploratoryDataAnalHW1/ExData_Plotting1")
library(ggplot2)
library(dplyr)
energyData<-read.csv2("household_power_consumption.txt")
energyData[energyData=="?"]<-NA
energyData$Global_active_power<-energyData$Global_active_power %>%
as.character() %>% as.numeric()
ggplot(data=energyData,aes(x=Global_active_power))+geom_histogram(fill="red",binwidth = 0.5,
na.rm=T, color="black",center=0.25)+xlim(c(0,7))+ theme(plot.title = element_text(hjust=0.5),
axis.line = element_line(color = "black"))+ ylim(c(0,1200))+
labs(x="Global Active Power (kilowatts)",y="Frequency",title="Global Active Power") |
9ad09b0b157de8592fbd6b74ed36ba2308d79be9 | 03ff62bcc1e282660716a999f8cdce25d655875b | /packrat/lib/x86_64-pc-linux-gnu/4.0.2/withr/tests/testthat/test-defer.R | 52e10cdae7a73b6beabb2b037c10e23da6336890 | [
"MIT"
] | permissive | rafaelortegar/R-sistema-de-recomendacion | edc881617860eeebcf6c026f61ddf1708b89a4de | 7e306ef74020d59f8707a178818e7abb3370e80f | refs/heads/main | 2022-12-22T15:27:23.160662 | 2020-10-07T04:40:52 | 2020-10-07T04:40:52 | 301,925,373 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,058 | r | test-defer.R | context("defer")
test_that("defer_parent works", {
local_file <- function(path) {
file.create(path)
defer_parent(unlink(path))
}
# create tempfile path
path <- tempfile()
# use 'local_file' in a function
local({
local_file(path)
stopifnot(file.exists(path))
})
# file is deleted as we leave 'local' scope
expect_false(file.exists(path))
})
test_that("defer()'s global env facilities work", {
expect_null(get_handlers(globalenv()))
Sys.setenv(abcdefg = "abcdefg")
expect_message(
defer(print("howdy"), envir = globalenv()),
"Setting deferred event"
)
expect_message(
local_envvar(c(abcdefg = "tuvwxyz"), .local_envir = globalenv()),
NA
)
h <- get_handlers(globalenv())
expect_length(h, 2)
expect_equal(Sys.getenv("abcdefg"), "tuvwxyz")
expect_output(deferred_run(globalenv()), "howdy")
expect_equal(Sys.getenv("abcdefg"), "abcdefg")
defer(print("never going to happen"), envir = globalenv())
deferred_clear(globalenv())
h <- get_handlers(globalenv())
expect_null(h)
})
|
f2f764fda7f1e17fbc1c930bd6d8187019f3f086 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/covTestR/examples/structureStatistics.Rd.R | 8f0385c813f132add6373641a392784c05ff1be8 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 276 | r | structureStatistics.Rd.R | library(covTestR)
### Name: Ahmad2015
### Title: Tests for Structure of Covariance Matrices
### Aliases: Ahmad2015 Chen2010 structureStatistics Fisher2012
### LedoitWolf2002 Nagao1973 Srivastava2005 Srivastava2011
### ** Examples
Chen2010(as.matrix(iris[1:50, 1:3]))
|
c50533d5061bedb7893d2add4fff353e0dd7480b | d6c74bac655e53cf6d417ab635bff1d2d3fa8576 | /Sesion-03/Reto-02/Solucion_Reto02.R | 6b9199e404bcc7d7a7c26a833f6e950d439f257a | [] | no_license | abrownrb/DA_R_BEDU_2 | 58f75ecdcead16c966c0807419b74999b1dd8879 | 6fe9ba47da5c759656459872a47a76b93c1eae9c | refs/heads/master | 2023-01-01T06:19:27.239662 | 2020-10-27T19:50:16 | 2020-10-27T19:50:16 | 277,850,824 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 862 | r | Solucion_Reto02.R | ######################################
########## Estadística con R #########
########## Sesión 02 ##########
########## Reto 02 ##########
######################################
# función evaluate()se pasa al argumento "func" y algunos datos (como un vector)
# devolverá el resultado de que se haya pasado como argumento para func.
evaluate <- function(func, dat){
func(dat)
}
?eval
evaluate(sd,c(1.4, 3.6, 7.9, 8.8))
evaluate(sum, c(2, 4, 6))
evaluate(median, c(7, 40, 9))
evaluate(floor, 11.8)
# construir una función resta. x1 - x2, donde x=(x1,x2)
# y aplicarle evaluate
resta <- function(vector){
return(vector[1]-vector[2])
}
resta(c(4,1))
evaluate(resta,c(4,5))
resta <- function (x){x[1]-x[2]}
evaluate(resta,c(8,4,0))
# devuelva el elemento en la penúltima posición
evaluate(function (x) {x[length(x)-1]},c(8,4,0))
|
927cc800513957c61cfddc3758bd92aaab9ed907 | 61a19c8062d70b540199a8adcad96128594ce069 | /R/src-test.r | e50e3e796f36ac268a168ac495377eb9c610f1fe | [
"MIT"
] | permissive | lionel-/dbplyr | 414a428022e812bf923710006b911bdb85953a8e | 90514df7e6721b8e2113dc05efb78074982ebfb6 | refs/heads/master | 2020-12-31T06:47:39.825750 | 2017-03-29T15:30:11 | 2017-03-29T15:30:11 | 86,605,490 | 0 | 0 | null | 2017-03-29T16:33:32 | 2017-03-29T16:33:32 | null | UTF-8 | R | false | false | 878 | r | src-test.r | #' A set of DBI methods to ease unit testing dplyr with DBI
#' @name src-test
#' @export
#' @param con A database connection.
#' @param x Object to transform
#' @param sql A string containing an sql query.
#' @param ... Other arguments passed on to the individual methods
DBITest <- function() {
structure(list(), class = "DBITestConnection")
}
#' @export
#' @rdname src-test
db_query_fields.DBITestConnection <- function(con, sql, ...) {
c("field1")
}
#' @export
#' @rdname src-test
sql_escape_ident.DBITestConnection <- function(con, x) {
sql_quote(x, "`")
}
#' @export
#' @rdname src-test
sql_translate_env.DBITestConnection <- function(con) {
dplyr::sql_variant(
scalar = dplyr::sql_translator(.parent = dplyr::base_scalar),
aggregate = dplyr::sql_translator(.parent = dplyr::base_agg),
window = dplyr::sql_translator(.parent = dplyr::base_win)
)
}
|
f1dc254f832f29e2460bea0939dc2f62013e01c7 | 8b7a9ebf84b4d1da3476f45f5b911d4cc575dd8a | /1. importing data.R | 09e6314289fdf3168839e05435666a91af491d2b | [] | no_license | benholding/global_mobility | a262a3b5b813ee2efcd00d023b6e8f4234881aa0 | 7ebf2beb27561c0e813517cffd0cb50b0de790e4 | refs/heads/master | 2023-08-28T18:20:45.306612 | 2021-10-19T11:34:33 | 2021-10-19T11:34:33 | 412,454,033 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,290 | r | 1. importing data.R | #Importing data
load("wos_data.RData")
pacman::p_load(dplyr, countrycode)
countries_to_regions <- read.csv("countries_to_regions.csv")
# making some small corrections to the dataset (universities in North Macedonia were missing the name of the country) #
publication_list_all <- publication_list_all %>%
mutate(pub_country = if_else(is.na(pub_country) & pub_org_name !="EURATOM", "North Macedonia", pub_country))
#########################################################################
#########################################################################
########## GETTING A DATASET OF ALL ELIGIBLE AUTHORS GLOBALLY ##########
#########################################################################
#########################################################################
# making a dataset with publications only for authors that fulfill criteria:
# 1. only from one country
# 2. only from certain disciplines (those that are well covered in wos, and don't have abnormally high coauthorship rates)
####################################################################################
# STEP ONE: Find authors who are from a single country at order_of_publishing == 1 #
####################################################################################
length(unique(publication_list_all$cluster_id)) #at this point we have 525,892 researchers
step1 <- publication_list_all %>% #take the main dataset
filter(order_of_publishing == 1) %>% #include only rows where it represents the earliest article for that author.
group_by(cluster_id) %>% #since we now be left with some cluster_ids who have 2 article rows (because some have multiple affilations) i need to check that all affilations are from the same country
mutate(number_of_distinct_countries = n_distinct(pub_country)) %>% #checking if the rows have the same country affilation
filter(number_of_distinct_countries == 1, #ensuring that only cluster_ids with only 1 distinct country affilation are included
!is.na(pub_org)) %>% #we also can't have NA as their first affilation because we then don't know where they start. Therefore I remove them here.
select(cluster_id) %>% #the next step is to merge all the rows for non "earliest-timepoint" articles. Therefore, i'm preparing to do a join to the "publication_list_all" dataframe, which should result in us being left with only the rows in "publication_list_all" that match with the cluster_ids we identified as being eligible
distinct(cluster_id) %>% #only taking distinct ones (since for some cluster_ids we would have 2 rows if they had 2 eligible "earliest-timepoint" articles)
left_join(publication_list_all, by = "cluster_id") %>% #now we have a dataset that fulfills criteria 1
ungroup()
length(unique(step1$cluster_id)) # we now have 501,491 researchers
length(unique(publication_list_all$cluster_id))-length(unique(step1$cluster_id)) # change = -24401
#####################################################
# STEP TWO: Keeping only well-covered disciplines #
#####################################################
# in order to decide on what disciplines should be included, I looked at the coverage
wos_covered_disciplines <- publication_info %>%
distinct(ut, .keep_all = T) %>%
group_by(discipline) %>%
summarise(proportion_of_refs_covered = mean(n_refs_1980_covered/n_refs, na.rm =T)) %>%
filter(proportion_of_refs_covered >= .6,#Biology, Biomedical Research, Chemistry, Clinical Medicine, Earth and Space, Engineering & tech, Health, Maths, Physics, & Psychology are all >60% covered
!is.na(discipline))
step2 <- step1 %>% #taking the dataset made in step 2 above
left_join(publication_info, by = c("ut")) %>% #joining it with the publication_info dataset, in order to get discipline information for each publication
group_by(cluster_id) %>%
count(discipline) %>% # i then count how many articles have each disciples per author
slice_max(n) %>% #then i make a dataframe showing the discipline per author with the highest number of articles
filter(discipline %in% wos_covered_disciplines$discipline) %>% #i keep only authors where their most popular discipline is >=60% covered
select(cluster_id, discipline) %>% #selecting the remaining authors (and also their discipline)
left_join(step1, by = "cluster_id") %>% #joining our selection of authors from only our chosen disciplines, with the information present in the previous dataframe.
ungroup()
length(unique(step2$cluster_id)) #at this point we have 480,964 researchers
length(unique(step1$cluster_id))-length(unique(step2$cluster_id)) #change = -20527
################################################################################
# STEP THREE: removing any subdisciplines that have too high coauthorship rates #
################################################################################
#here i look at mean numbers of co-authors per specialty
average_number_of_coauthors_per_specialty <- publication_info %>%
distinct(ut, .keep_all = T) %>%
group_by(specialty) %>%
summarise(mean_n_authors = mean(n_authors, na.rm=T),
median_n_authors = median(n_authors, na.rm=T)) %>%
arrange(desc(mean_n_authors)) #Nuclear & Particle Physics has 139 mean authors compared to next nearest 18.8, so anyone working primarily in this specialty will be removed
#here I add the each researchers main specialty to the dataset, and then remove researchers who focus on the specialty "Nuclear & Particle Physics"
step3 <- step2 %>% #take the step 3 dataset (i.e. only wos covered disciplines)
select(-discipline) %>% #...remove this column since it will be duplicate when we...
left_join(publication_info, by = c("ut")) %>% #...join the author information to the publication metadata
group_by(cluster_id) %>%
add_count(specialty, name = "n_specialty_articles") %>% #per cluster id, this provides a count of number of articles with this specialty...
add_count(discipline, name = "n_discipline_articles") %>% #and also discipline
distinct(cluster_id, discipline, specialty, .keep_all = T) %>% #keeping one row per cluster_id
select(cluster_id, discipline, n_discipline_articles,specialty, n_specialty_articles) %>%
filter(!is.na(discipline),
!is.na(specialty)) %>% #keeping only individuals with a main discipline and specialty
arrange(cluster_id, desc(n_discipline_articles), desc(n_specialty_articles)) %>% #this arranges the dataset so that an individuals top discipline is at the top, which is further ordered by specialty
slice(1) %>% #taking an individuals top specialty....
select(cluster_id, specialty) %>%
distinct(cluster_id, .keep_all = T) %>%
filter(specialty != "Nuclear & Particle Physics") %>% #...and excluding cluster ids with a specialty of nuclear physics.
left_join(step2, by = "cluster_id") #then joining the whole dataset back got all individuals that weren't excluded.
length(unique(step3$cluster_id)) #at this point we have 474,930 researchers
length(unique(step2$cluster_id))-length(unique(step3$cluster_id)) #change = -6034
###################################################################
# STEP FOUR: MAKING THE FINAL DATASET OF ALL ELIGIBLE RESEARCHERS #
###################################################################
# Here I chose a institute which represents the "origin" of the researcher.
step4 <-
step3 %>%
filter(order_of_publishing == 1) %>% #take the data of everyone at order_of_publishing = 1...
select(cluster_id, pub_org_name) %>%
left_join(publication_list_all, by = c("cluster_id", "pub_org_name")) %>% #... get their publications
group_by(cluster_id, pub_org_name) %>%
mutate(number_of_publications_with_this_affilation = n()) %>% #for each affilation measure how many times researchers published with this affilation during career.
distinct(cluster_id, pub_org_name, number_of_publications_with_this_affilation, .keep_all = T) %>%
select(cluster_id, pub_org_name, number_of_publications_with_this_affilation, lr_univ_id,pub_country) %>%
group_by(cluster_id) %>%
arrange(cluster_id, desc(number_of_publications_with_this_affilation),lr_univ_id) %>% #important ordering to ensure the comment below is correct.
mutate(origin_institution = first(pub_org_name), #this makes a variable of the origin institution. If there were multiple institutions at order_of_publishing == 1, then this takes the institution where the researcher had the most publications in his/her career. if it is a tie, then the leiden ranked university is chosen. If it is still a tie then it is selected alphabetically.
origin_country = first(pub_country), #new variable: what is the origin country of the researcher
origin_leiden_ranked = first(if_else(is.na(lr_univ_id), 0, 1))) %>% #new variable: is the origin institute Leiden ranked?
select(cluster_id, origin_institution, origin_country,origin_leiden_ranked) %>%
distinct(cluster_id, .keep_all = T) %>%
left_join(step3, by ="cluster_id") #creates a dataset with information about the authors, including origin info, + each UT (but with no further meta data)
global_mobility_eligible_researchers <- step4 %>% #this becomes the dataset that contains descriptive information about all of our potential matches
filter(origin_institution == pub_org_name) %>%
arrange(cluster_id, order_of_publishing) %>%
group_by(cluster_id) %>%
mutate(final_article_at_origininstitution_year = last(career_year)) %>%
distinct(cluster_id, .keep_all = T) %>%
select(cluster_id, final_article_at_origininstitution_year) %>%
ungroup()%>%
left_join(step4, by = "cluster_id") %>%
left_join(publication_info %>% select(ut, n_authors, n_countries), by = "ut") %>% #adding number of authors on paper, and number of countries
mutate(n_coauthors = n_authors - 1) %>%
left_join(countries_to_regions, by = "origin_country") %>% #adding in region
rename(origin_region = region)
length(unique(global_mobility_eligible_researchers$cluster_id)) #at this point we have 474,930 researchers
length(unique(publication_list_all$cluster_id))-length(unique(global_mobility_eligible_researchers$cluster_id)) #total exclusion to "enrollment" = 50,962
save(global_mobility_eligible_researchers, file = "global_mobility_eligible_researchers.RData")
|
f5cc7b0db8f70dc34df62b9ee18e7513655688ee | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/medfate/examples/spwb.Rd.R | 59d3a6ca31b4fa3f6b73706335033338fbe9e734 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,143 | r | spwb.Rd.R | library(medfate)
### Name: spwb
### Title: Soil-plant water balance
### Aliases: spwb spwb.resetInputs
### ** Examples
#Load example daily meteorological data
data(examplemeteo)
#Load example plot plant data
data(exampleforest)
#Default species parameterization
data(SpParamsMED)
#Initialize soil with default soil params (2 layers)
examplesoil = soil(defaultSoilParams(2))
#Initialize control parameters
control = defaultControl()
#Initialize input
x1 = forest2spwbInput(exampleforest,examplesoil, SpParamsMED, control)
#Call simulation function
S1<-spwb(x1, examplesoil, examplemeteo, elevation = 100)
#Plot results
plot(S1)
#Monthly summary (averages) of soil water balance
summary(S1, freq="months",FUN=mean, output="Soil")
#Initialize soil with default soil params (2 layers)
examplesoil2 = soil(defaultSoilParams(2))
#Switch to 'Complex' transpiration mode
control$transpirationMode="Complex"
#Initialize input
x2 = forest2spwbInput(exampleforest,examplesoil2, SpParamsMED, control)
#Call simulation function (5 days)
S2<-spwb(x2, examplesoil2, examplemeteo[100:105,], latitude = 41.82592, elevation = 100)
|
ea693cd62cf653bde002c72213b2b19e6e6a135d | 1e37665693349a7170a7169aecaa5ffee10f9ddc | /man/metaLocalized.Rd | d71b714e53235f9bb7bf3b9f3ceae38123201784 | [] | no_license | mbannert/gateveys | 25e515431b3829f7a087040da4d4994190e21ab1 | fd7478a4b1619d8e6d7ff54a68eac1c45c96157c | refs/heads/master | 2016-09-08T05:04:22.091404 | 2013-06-28T09:43:22 | 2013-06-28T09:43:22 | 7,775,562 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,856 | rd | metaLocalized.Rd | \name{metaLocalized}
\alias{metaLocalized}
\alias{metaLocalized-class}
\title{Class for Localized Meta Information}
\description{
This is basically a template for a class, that organizes
meta attributes that are language variant. You could add
multiple attributes of this class to a time series to
have meta information in multiple languages.
}
\details{
\section{Slots}{ \describe{ \item{\code{title}:}{Object
of class \code{"character"}, character or logical
descriptive title } \item{\code{selectedItem}:}{Object of
class \code{"character"}, character or logical
categorical item of interest.}
\item{\code{description}:}{Object of class
\code{"character"}, containing character or logical
elaborate description} \item{\code{aLevel}:}{Object of
class \code{"character"}, containing character or logical
aggregation Level} \item{\code{selectedGroup}:}{Object of
class \code{"character"}, containing character or logical
group that was actually selected in this time series on a
particular aggregation Level.}
\item{\code{survey}:}{Object of class \code{"character"},
containing character or logical constant indicating the
survey the questions stem from.}
\item{\code{questionWording}:}{Object of class
\code{"character"}, containing character or logical
wording of the question. Can be taken from a dictionary
assignments file if available.}
\item{\code{itemLevels}:}{Object of class
\code{"character"}, containing character or logical all
possible levels of answer.}
\item{\code{weightingInformation}:}{Object of class
\code{"character"}, containing character or logical
information on the weighting that was used in the
aggregation process.} } }
}
\note{
This is an experimental class to handle localized
metadata.
}
\author{
Matthias Bannert
}
\seealso{
\code{\link{metaFixed}}
}
|
16c5270c54ccc5ecfd9834253136a9abfc6690e7 | 445050b2d62fcb0b1b796c33c58f3d51f34d51e8 | /man/factor_to_character.Rd | 616f2d9df5d52cd9f8287f822b284fff5a645f16 | [] | no_license | cran/diffdf | 9c1c5129ebf95064a7c41ab105252801b935c188 | 02d02358fb63059c465a270757586f0680c141a8 | refs/heads/master | 2021-06-03T21:04:15.623148 | 2020-03-17T22:10:03 | 2020-03-17T22:10:03 | 132,450,100 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 465 | rd | factor_to_character.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc_functions.R
\name{factor_to_character}
\alias{factor_to_character}
\title{factor_to_character}
\usage{
factor_to_character(dsin, vars = NULL)
}
\arguments{
\item{dsin}{input dataframe}
\item{vars}{variables to consider for conversion. Default NULL will consider
every variable within the dataset}
}
\description{
Takes a dataframe and converts any factor variables to character
}
|
1886d9932cacf8a77a04d8f4a77e4f2cc28e3f02 | ef572bd2b0515892d1f59a073b8bf99f81d6a734 | /data-raw/COP23/update_cop23_de_coc_co_map.R | 06c29a9f628c3b7b6b07db2f06c6c75314f0f422 | [
"CC0-1.0"
] | permissive | pepfar-datim/datapackr | 5bc604caa1ae001b6c04e1d934c0c613c59df1e6 | 9275632673e45948db6846513a53c1436cfc0e47 | refs/heads/master | 2023-08-30T23:26:48.454382 | 2023-08-11T13:01:57 | 2023-08-11T13:01:57 | 170,350,211 | 9 | 7 | CC0-1.0 | 2023-09-11T21:53:24 | 2019-02-12T16:19:47 | R | UTF-8 | R | false | false | 1,507 | r | update_cop23_de_coc_co_map.R | # Point to DATIM login secrets ----
secrets <- Sys.getenv("SECRETS_FOLDER") %>% paste0(., "datim.json")
# datimutils::loginToDATIM("~/.secrets/datim.json")
datimutils::loginToDATIM(secrets)
cop_year <- 2023
dp_map <- datapackr::update_de_coc_co_map(cop_year = 2023,
d2_session = dynGet("d2_default_session",
inherits = TRUE))
#dp_map <- update_de_coc_co_map(cop_year, d2_session)
# Compare old and new maps for accuracy ####
new <- dp_map %>%
dplyr::select(-categoryoption_specified)
compare_diffs <- datapackr::cop23_map_DataPack_DATIM_DEs_COCs %>%
dplyr::select(-categoryoption_specified) %>%
dplyr::full_join(new, by = c("indicator_code",
"dataelementuid",
"categoryoptioncombouid",
"FY",
"valid_ages.name", "valid_ages.id", "valid_sexes.name",
"valid_sexes.id", "valid_kps.name", "valid_kps.id",
"categoryOptions.ids", "support_type", "resultstatus", "resultstatus_inclusive")) %>%
dplyr::filter(is.na(indicator_code) | is.na(dataelementname.x) | is.na(dataelementname.y))
waldo::compare(datapackr::cop23_map_DataPack_DATIM_DEs_COCs, dp_map)
cop23_map_DataPack_DATIM_DEs_COCs <- dp_map
save(cop23_map_DataPack_DATIM_DEs_COCs, file = "./data/cop23_map_DataPack_DATIM_DEs_COCs.rda", compress = "xz")
|
de185178836b2757260e68e195e4037772686f4b | b3b1b011ab46f024467282baeff0f160e2e91e31 | /man/registerParallelBackend.Rd | a01442a42355ea870949ce9dd3eb7492661580e9 | [
"Apache-2.0"
] | permissive | schuemie/PatientLevelPrediction | 5265629020a2406f9f96a4975aa3ab35c9663b92 | 0b59c97a53ab4c6aaf6236048d5bcc9363c2716e | refs/heads/master | 2020-09-05T00:50:10.021513 | 2019-11-06T07:46:44 | 2019-11-06T07:46:44 | 88,721,641 | 0 | 1 | null | 2019-05-01T04:30:23 | 2017-04-19T08:40:26 | R | UTF-8 | R | false | true | 724 | rd | registerParallelBackend.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Parallel.R
\name{registerParallelBackend}
\alias{registerParallelBackend}
\title{registerParallelBackend}
\usage{
registerParallelBackend(cores = NULL, logical = TRUE)
}
\arguments{
\item{cores}{the number of cores to use for multi core processing}
\item{logical}{whether to consider logical or physical cores}
}
\description{
Registers a parallel backend for multi core processing. The
number of cores will be detected automatically, unless specified otherwise.
}
\examples{
\dontrun{
# detect logical cores automatically
registerParallelBackend()
# use four physical cores
numCores <- 4
registerParallelBackend(numCores, logical = FALSE)
}
}
|
4bd8511ed991ec4b1b27ea231d300d3bbb8d8823 | 224807bcc64ee023d59db89da1ff436a2aa44ba8 | /man/createArgusInput.Rd | 56998b0a0fc8698e5f255d2e2c176a752476c18f | [] | no_license | sdcTools/sdcTable | cf963624c44510e8c77c6b4ba83fe064a84c168c | ade7328a1c73b3fa2d7f17b725ad193389d0bde6 | refs/heads/master | 2023-09-03T14:59:24.853963 | 2023-08-16T06:27:56 | 2023-08-16T06:27:56 | 61,604,088 | 7 | 5 | null | 2019-10-04T10:14:39 | 2016-06-21T05:14:13 | R | UTF-8 | R | false | true | 6,001 | rd | createArgusInput.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createArgusInput.R
\name{createArgusInput}
\alias{createArgusInput}
\title{Create input files for tauArgus}
\usage{
createArgusInput(
obj,
typ = "microdata",
verbose = FALSE,
path = getwd(),
solver = "FREE",
method,
primSuppRules = NULL,
responsevar = NULL,
shadowvar = NULL,
costvar = NULL,
requestvar = NULL,
holdingvar = NULL,
...
)
}
\arguments{
\item{obj}{an object of class \linkS4class{sdcProblem} from \code{sdcTable}}
\item{typ}{(character) either \code{"microdata"} or \code{"tabular"}}
\item{verbose}{(logical) if TRUE, the contents of the batch-file are written to the prompt}
\item{path}{path, into which (temporary) files will be written to (amongst them being the batch-files).
Each file written to this folder belonging to the same problem contains a random id in its filename.}
\item{solver}{which solver should be used. allowed choices are
\itemize{
\item \code{"FREE"}
\item \code{"CPLEX"}
\item \code{"XPRESS"}
}
In case \code{"CPLEX"} is used, it is also mandatory to specify argument \code{licensefile} which needs to be
the absolute path the the cplex license file}
\item{method}{secondary cell suppression algorithm, possible choices include:
\itemize{
\item \code{"MOD"}: modular approach. If specified, the following arguments in \code{...} can additionally be set:
\itemize{
\item \code{MaxTimePerSubtable}: number specifiying max. time (in minutes) spent for each subtable
\item \code{SingleSingle}: 0/1 (default=1)
\item \code{SingleMultiple}: 0/1 (default=1)
\item \code{MinFreq}: 0/1 (default=1)
}
\item \code{"GH"}: hypercube. If specified, the following arguments in \code{...} can additionally be set:
\itemize{
\item \code{BoundPercentage}: Default percentage to proctect primary suppressed cells, default 75
\item \code{ModelSize}: are we dealing with a small (0) or large (1) model? (default=1)
\item \code{ApplySingleton}: should singletons be additionally protected? 0/1 (default=1)
}
\item \code{"OPT"}: optimal cell suppression. If specified, the following arguments in \code{...} can additionally be set:
\itemize{
\item \code{MaxComputingTime}: number specifiying max. allowed computing time (in minutes)
}
}}
\item{primSuppRules}{rules for primary suppression, provided as a
\code{list}. For details, please have a look at the examples below.}
\item{responsevar}{which variable should be tabulated (defaults to frequencies). For details see tau-argus manual section 4.4.4.}
\item{shadowvar}{if specified, this variable is used to apply the safety rules, defaults to \code{responsevar}. For details
see tau-argus manual section 4.4.4.}
\item{costvar}{if specified, this variable describes the costs of suppressing each individual cell. For details see tau-argus
manual section 4.4.4.}
\item{requestvar}{if specified, this variable (0/1-coded) contains information about records that request protection.
Records with 1 will be protected in case a corresponding request rule matches. It is ignored, if tabular input is used.}
\item{holdingvar}{if specified, this variable contains information about records that should be grouped together.
It is ignored, if tabular input is used.}
\item{...}{allows to specify additional parameters for selected suppression-method as described above
as well as \code{licensefile} in clase \code{"CPLEX"} was specified in argument \code{solver}.}
}
\value{
the filepath to the batch-file
}
\description{
create required input-files and batch-file for tau-argus given an \linkS4class{sdcProblem} object
}
\examples{
\dontrun{
# loading micro data from sdcTable
utils::data("microdata1", package="sdcTable")
microdata1$num1 <- rnorm(mean = 100, sd = 25, nrow(microdata1))
microdata1$num2 <- round(rnorm(mean = 500, sd=125, nrow(microdata1)),2)
microdata1$weight <- sample(10:100, nrow(microdata1), replace = TRUE)
dim_region <- hier_create(root = "Total", nodes = LETTERS[1:4])
dim_region_dupl <- hier_create(root = "Total", nodes = LETTERS[1:4])
dim_region_dupl <- hier_add(dim_region_dupl, root = "B", nodes = c("b1"))
dim_region_dupl <- hier_add(dim_region_dupl, root = "D", nodes = c("d1"))
dim_gender <- hier_create(root = "Total", nodes = c("male", "female"))
dimList <- list(region = dim_region, gender = dim_gender)
dimList_dupl <- list(region = dim_region_dupl, gender = dim_gender)
dimVarInd <- 1:2
numVarInd <- 3:5
sampWeightInd <- 6
# creating an object of class \code{\link{sdcProblem-class}}
obj <- makeProblem(
data = microdata1,
dimList = dimList,
dimVarInd = dimVarInd,
numVarInd = numVarInd,
sampWeightInd = sampWeightInd)
# creating an object of class \code{\link{sdcProblem-class}} containing "duplicated" codes
obj_dupl <- makeProblem(
data = microdata1,
dimList = dimList_dupl,
dimVarInd = dimVarInd,
numVarInd = numVarInd,
sampWeightInd = sampWeightInd)
## create primary suppression rules
primSuppRules <- list()
primSuppRules[[1]] <- list(type = "freq", n = 5, rg = 20)
primSuppRules[[2]] <- list(type = "p", n = 5, p = 20)
# other supported formats are:
# list(type = "nk", n=5, k=20)
# list(type = "zero", rg = 5)
# list(type = "mis", val = 1)
# list(type = "wgt", val = 1)
# list(type = "man", val = 20)
## create batchInput object
bO_md1 <- createArgusInput(
obj = obj,
typ = "microdata",
path = tempdir(),
solver = "FREE",
method = "OPT",
primSuppRules = primSuppRules,
responsevar = "num1")
bO_td1 <- createArgusInput(
obj = obj,
typ = "tabular",
path = tempdir(),
solver = "FREE",
method = "OPT")
bO_td2 <- createArgusInput(
obj = obj_dupl,
typ = "tabular",
path = tempdir(),
solver = "FREE",
method = "OPT")
## in case CPLEX should be used, it is required to specify argument licensefile
bO_md2 <- createArgusInput(
obj = obj,
typ = "microdata",
path = tempdir(),
solver = "CPLEX",
method = "OPT",
primSuppRules = primSuppRules,
responsevar = "num1",
licensefile = "/path/to/my/cplexlicense")
}
}
|
db8fae5771df6f2f403eec3d95ada62c789141c6 | 8212e31e22ba5de8be1910343c94cd4e23b792a5 | /Script/SNPs/LDplot.R | 310677f32741d8b29435ae509eb75f6b3f2a12a4 | [
"MIT"
] | permissive | HY29/Tea_GPGWAS | bcac6832b865e93bdaf5ef28fa4d68cfb418e999 | ee65370df960caf0845a9816f48ff3f2f3a6c146 | refs/heads/main | 2023-03-07T13:29:04.146486 | 2021-02-22T09:13:19 | 2021-02-22T09:13:19 | 332,383,569 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 856 | r | LDplot.R | ############################################################
### LD decay estimation
### Draft script 200811 HY
############################################################
#load packages
library(ggplot2)
library(cowplot)
#Data arrangement
d <- read.table("list_gwas_maf0.05_miss0.7_ld_window_50kb.geno.ld", header=TRUE)
distance <- as.data.frame(d$POS2-d$POS1)
LD <- data.frame(distance, d$R.2)
colnames(LD) <- c("distance","R2")
plot(LD)
write.csv(LD, "LD_miss0.7_w50kb.csv")
#Mean R2
mean(LD$R2)
#Visualization by ggplot2
pdf("LDplot_miss0.7.pdf",width=3, height=3)
ggplot(LD, aes(y=R2, x=distance))+
#guides(color=FALSE)+
geom_point(size=1)+
geom_smooth(method="loess")+
theme_cowplot(font_size = 12, line_size = 1.0)+
theme(axis.text.x = element_text(angle=0))+
ylab("Linkage disequilibrium (r2)")+
xlab("Distance (bp)")
dev.off()
|
a77e34e5af73b5c888b21345b30b0c81d182c1b7 | 05c8302fb62bdfb080aa863df95d36aa46f3f35e | /man/dgirt_fit-class.Rd | f72e2ffdbc07679f6bfd0cfa7f65559213a3b9c7 | [] | no_license | fsolt/dgo | ad7a84ca863cb60f4bd4a1778b14d9d7f5d7abf6 | 2961b5b99c27c59f1889ee8cb90c5b30ad223d9c | refs/heads/master | 2021-06-01T23:51:14.349211 | 2018-07-17T11:34:20 | 2018-07-17T11:34:20 | 95,905,521 | 0 | 0 | null | 2017-06-30T16:22:24 | 2017-06-30T16:22:24 | null | UTF-8 | R | false | true | 1,168 | rd | dgirt_fit-class.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class-dgirt_fit.r
\docType{class}
\name{dgirt_fit-class}
\alias{dgirt_fit-class}
\alias{dgirt_fit}
\title{A class for fitted dynamic group IRT models}
\description{
\code{\link{dgirt}} returns a fitted model object of class \code{dgirt_fit},
which inherits from \code{\link{dgo_fit}}.
}
\details{
dgo 0.2.8 deprecated the \code{dgirtfit} class and replaced it with the
\code{\link{dgirt_fit}} class.
}
\section{Slots}{
\describe{
\item{\code{dgirt_in}}{\code{\link{dgirtin-class}} data used to fit the model.}
}}
\examples{
data(toy_dgirtfit)
# summarize the fitted results
summary(toy_dgirtfit, pars = 'xi')
# get posterior means with a convenience function
get_posterior_mean(toy_dgirtfit, pars = 'theta_bar')
# generally apply functions to posterior samples after warmup; n.b.
# `as.array` is iterations x chains x parameters so `MARGIN = 3` applies
# `FUN` over iterations and chains
apply(as.array(toy_dgirtfit, pars = 'xi'), 3, mean)
# access the posterior samples
head(as.data.frame(toy_dgirtfit, pars = 'theta_bar'))
}
\seealso{
\code{\link{dgmrp_fit}} \code{\link{dgo_fit}}
}
|
65b01ad72dfcdcdead959cda0db42aed313eb8e4 | f9549eed2e2917d38a76a5da14bc7d05deaf38d3 | /Text Mining and Emotion Mining - Amazon - Review.R | f56b22382dc5faef332ef6626e14890877bf770f | [] | no_license | umesh-123-rp/Data-Science-Text-Mining | 0d3689fce0418c4d5e93dd2327245473f4127672 | fdf826eac96599b62627d06efacb59fcb2240831 | refs/heads/main | 2023-01-27T19:49:05.100304 | 2020-12-12T07:55:18 | 2020-12-12T07:55:18 | 320,777,308 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,138 | r | Text Mining and Emotion Mining - Amazon - Review.R | # Extract reviews of any product from ecommerce website like amazon
# Perform emotion mining
# Loading the required packaged
install.packages(c("rvest","XML","magrittr"))
library(rvest)
library(XML)
library(magrittr)
# Extracting Amazon Reviews for book " The Tower of Nero"
aurl <- "https://www.amazon.in/Tower-Nero-Trials-Apollo-Book/product-reviews/0141364084/ref=cm_cr_getr_d_paging_btm_prev_2?ie=UTF8&reviewerType=all_reviews"
amazon_reviews <- NULL
# Loop to read and club the reviews from pages
for (i in 1:10){
murl <- read_html(as.character(paste(aurl,i,sep="=")))
rev <- murl %>% html_nodes(".review-text") %>% html_text()
amazon_reviews <- c(amazon_reviews,rev)
}
length(amazon_reviews)
amazon_reviews
#creating the .txt file
write.table(amazon_reviews,"towerofnero.txt",row.names = F)
getwd()
# Install package for pre-processiong the data
install.packages("tm") # for text mining
install.packages(c("SnowballC","textstem")) # for text stemming
install.packages("wordcloud") # word-cloud generator
install.packages("RColorBrewer") # color palettes
library('tm')
library("SnowballC")
library("wordcloud")
library("RColorBrewer")
library('textstem')
# Importing book reviews data
tower<-read.csv("D:\\Data\\IN102385\\My Documents\\towerofnero.txt")
x <- as.character(tower$x)
x <- iconv(x, "UTF-8") #Unicode Transformation Format. The '8' means
#it uses 8-bit blocks to represent a character
# Load the data as a corpus
x <- Corpus(VectorSource(x))
inspect(x[1:3])
# Removing unnecessary symbols like -,;,: etc
toSpace <- content_transformer(function(y,pattern) { return (gsub(pattern, " ",y))})
x1 <- tm_map(x, toSpace, "-")
inspect(x1[1])
x1 <- tm_map(x1, toSpace, "!")
inspect(x1[1])
x1 <- tm_map(x1, toSpace, "'")
inspect(x1[1])
# Convert the text to lower case
x1 <- tm_map(x1, tolower)
inspect(x1[1])
# Remove numbers
x1 <- tm_map(x1, removeNumbers)
# Remove punctuations
x1 <- tm_map(x1, removePunctuation)
inspect(x1[1])
# Remove english common stopwords
x1 <- tm_map(x1, removeWords, stopwords('english'))
# Remove your own stop word
# specify your stopwords as a character vector
inspect(x1[1])
x1 <- tm_map(x1, removeWords, c("the","will","im"))
inspect(x1[1])
#striping white spaces
x1 <- tm_map(x1, stripWhitespace)
inspect(x1[1])
# Text stemming
x1<-lemmatize_words(x1)
inspect(x1[1])
# Term document matrix(TDM)
# converting unstructured data to structured format using TDM
tdm <- TermDocumentMatrix(x1)
tdm <- as.matrix(tdm)
tdm
#Frequency of words
v <- sort(rowSums(tdm),decreasing=TRUE)
v
d <- data.frame(word = names(v),freq=v)
head(d, 10)
# Bar plot
w <- rowSums(tdm)
w_sub <- subset(w, w >= 20)
barplot(w_sub, las=3, col = rainbow(20))
# Term watch repeats in all most all documents
x1 <- tm_map(x1, removeWords, c('besides','just','also'))
x1 <- tm_map(x1, stripWhitespace)
tdm <- TermDocumentMatrix(x1)
tdm <- as.matrix(tdm)
tdm
w1 <- rowSums(tdm)
# Word cloud
#with all the words
wordcloud(words = names(w1), freq = w1, random.order = F, colors = rainbow(20), scale=c(2,.2), rot.per = 0.3)
# Loading positive and negative dictionaries
pos.words = scan(file.choose(), what="character", comment.char=";") # read-in positive-words.txt
neg.words = scan(file.choose(), what="character", comment.char=";") # read-in negative-words.txt
pos.words = c(pos.words,"wow", "kudos", "hurray") # including own positive words to the existing list
# Positive wordcloud
pos.matches = match(names(w), c(pos.words))
pos.matches = !is.na(pos.matches)
freq_pos <- w[pos.matches]
p_names <- names(freq_pos)
wordcloud(p_names,freq_pos,scale=c(3.5,.2),random.order = F,colors = rainbow(20))
# Negative wordcloud
neg.matches = match(names(w), c(neg.words))
neg.matches = !is.na(neg.matches)
freq_neg <- w[neg.matches]
n_names <- names(freq_neg)
wordcloud(n_names,freq_neg,scale=c(3.5,.2),random.order=F,colors = brewer.pal(8,"Dark2"))
# We can check associated words with some key words
# Association between words
tdm<-TermDocumentMatrix(x1)
findAssocs(tdm,c("tyrant"),corlimit=0.6)
# Emotion Mining of the Review
# Sentiment Alanysis
install.packages("syuzhet")
library("syuzhet")
library(lubridate,ggplot2)
library(ggplot2)
library(scales)
library(dplyr)
library(reshape2)
tower <- readLines("D:\\Data\\IN102385\\My Documents\\towerofnero.txt")
tower<-iconv(tower,"UTF-8")
#each sentence by eight
emotion<-get_sentences(tower)
nrc_data<-get_nrc_sentiment(emotion)
head(nrc_data)
# Bar plot emotion mining
windows()
barplot(colSums(nrc_data),las=1,col=rainbow(10),ylab='count', main='Emotion Plot')
sentiment_syuzhet<- get_sentiment(emotion,method="syuzhet")
sentiment_vector <- get_sentiment(emotion,method="bing")
sentiment_afinn <- get_sentiment(emotion, method="afinn")
sentiment_nrc <- get_sentiment(emotion, method="nrc")
sentiments <- data.frame(sentiment_syuzhet,sentiment_vector,sentiment_afinn,sentiment_nrc)
sum(sentiment_afinn)
mean(sentiment_afinn)
summary(sentiment_afinn)
windows()
plot(sentiment_vector,type='l',main='plot trajectory',xlab="Narrative time",ylab="Emotional Variance")
# To extract the sentence with the most negative emotional valence
negative <- emotion[which.min(sentiment_syuzhet)]
negative
# and to extract the most positive sentence
positive <- emotion[which.max(sentiment_syuzhet)]
positive
## CONCLUSION
# All the main attributes (Overall, Positive and Negative)were plotted in Word Cloud
# Sentimental Analysis indicates negatives and positives w.r.t 8 parameters were classified
# The book " The Tower of Nero" was found to have more positive motions than negative.
# Sentimental analysis indicates that the book is reading worth as it balances
# both positive and negative emotions which is plotted in Trajectory plot
|
ac48599694ca9d046c5dd66798cfa0a248c99887 | fc9c903bc15654ba32c45d1118f36d3aa9073fce | /R/split.asc.r | 0c05a559cf195beb73edc240a07d7dc83189c340 | [] | no_license | lib314a/popeye | 9dbffd006047e33e46a6e2c7a94dac9119b1d119 | 988df16b1aa81fb2ae98f876ee07bf130c85ce0b | refs/heads/master | 2021-07-04T20:58:08.952537 | 2020-09-10T13:48:26 | 2020-09-10T13:48:26 | 167,812,629 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 395 | r | split.asc.r | split.asc <- function (lines, start.flag, end.flag = NULL) {
starts <- grep(start.flag, lines)
if (is.null(end.flag))
ends <- c(starts[-1], length(lines))
else
ends <- grep(end.flag, lines)
x <- correct.err.flags(starts, ends)
starts <- x$starts
ends <- x$ends
r <- lapply(lisp::zip.c(starts, ends), function (x) lines[x[1]:x[2]])
names(r) <- lines[starts]
return(r)
}
|
53c282483a16f9de2039510651b90b34bac0b6b2 | bfaf11c8a5329aa84ee2a826157d664925188ccf | /man/detection.Rd | 4b17288c4908d89be16a1a28ac17ad73c76f4268 | [
"CC-BY-4.0"
] | permissive | cran/klexdatr | e73b7fd914d89f5c323742a52dabac1f38a68136 | 992228b296c650d061566e383dbbba8d59f7e82d | refs/heads/master | 2023-04-20T10:30:32.848771 | 2021-05-29T20:00:02 | 2021-05-29T20:00:02 | 261,254,008 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 533 | rd | detection.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{detection}
\alias{detection}
\title{Acoustic Detection Data}
\format{
A tbl data frame:
\describe{
\item{DateTimeDetection}{The detection date and hour (time).}
\item{Capture}{The fish code (fctr).}
\item{Receiver}{The receiver code (fctr).}
\item{Detections}{The number of detections in the hour (int).}
}
}
\usage{
detection
}
\description{
Hourly acoustic detection data by fish (capture) and receiver.
}
\keyword{datasets}
|
d0fd694349b7b45eeba7d63bc5f84e5b0419a7b1 | 36f59fc4ac547b8a124c60560e753be0c2177637 | /lib/feature/feature.mean.R | 7c9bcea353aece2c933ff66e403b0517d515cf6f | [] | no_license | zehaowang/cycle3cvd-team8 | ba505550046b52f1ee49072b134afd30fb4717b1 | 880c5fbb11fdcc58c8f55f17cd335e21a2ce9133 | refs/heads/master | 2020-12-25T22:08:23.719838 | 2016-04-04T16:59:02 | 2016-04-04T16:59:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 691 | r | feature.mean.R | library(EBImage)
trainvec <- rep(NA, length(list.files(img_train_dir)))
testvec <- rep(NA, length(list.files(img_test_dir)))
setwd("~/Columbia University/Spring 2016/Data Science/Project 3/train/train")
for(i in 1:length(list.files(img_train_dir))){
img <- readImage(list.files(img_train_dir)[i])
colorMode(img) = Grayscale
intens <- getFrame(img,1)
trainvec[i] <- mean(intens)
}
setwd("~/Columbia University/Spring 2016/Data Science/Project 3/test/test")
for(i in 1:length(list.files(img_test_dir))){
img <- readImage(list.files(img_test_dir)[i])
colorMode(img) = Grayscale
intens <- getFrame(img,1)
testvec[i] <- mean(intens)
}
|
c24676dd361c5c0d9f8a20654a67fb74d8cd50a3 | 9d7f9350bc17fd00e590ddd5053addb4090b1993 | /man/shiny_dimsmets.Rd | 12c7ceeabfaa566338d21656410213dffff71cf9 | [] | no_license | selesnow/RGA | 14359e1d90ec7fbe7b91d9f4420926b820ba1536 | c0f91a102ef50c57289ac5fb9bda1ef5bc95d920 | refs/heads/master | 2019-07-12T01:28:37.211872 | 2016-08-23T05:21:36 | 2016-08-23T05:21:36 | 105,628,870 | 2 | 2 | null | 2017-10-03T08:19:09 | 2017-10-03T08:19:09 | null | UTF-8 | R | false | true | 495 | rd | shiny_dimsmets.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metadata.R
\name{shiny_dimsmets}
\alias{shiny_dimsmets}
\title{The Shiny Dimensions & Metrics Explorer}
\usage{
shiny_dimsmets()
}
\description{
The dimensions and metrics explorer lists and describes all the dimensions and metrics available through the Core Reporting API. This app deployed to the \url{https://artemklevtsov.shinyapps.io/ga-dimsmets}.
}
\seealso{
\code{\link{list_dimsmets}} \code{\link{get_ga}}
}
|
c70c9ad07dfa4ef294f84ead09943f9f002910d1 | 51b93b6394163abaaa5144524745e240a8a37a66 | /man/fitVolDist.Rd | f00236e1522d53fd818ba8cbe01894f76d44d51c | [] | no_license | cran/cellVolumeDist | cf00020600bf7b12e05e1d36e6512bb06cde768c | 23786af9c8566f558703f7f9f1423c71b6da1ae6 | refs/heads/master | 2022-05-06T12:39:58.763310 | 2022-04-24T22:50:02 | 2022-04-24T22:50:02 | 17,695,017 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,828 | rd | fitVolDist.Rd | \name{fitVolDist}
\alias{fitVolDist}
\title{Fit a model for cell volume distribution under least squares criteria.}
\description{This function fits a model for cell volume distribution
under least squares criteria; free model parameters are the cell
growth rate \code{r} (\eqn{\mu m^3/h}), the variability in cell growth rate
\code{sigma_r} (\eqn{\mu m^3/h}) and a linear scaling factor \code{A}.
}
\usage{
fitVolDist(vol, freq, r = 100, sigma_r = 44, t = 40,
sigma_t = 0.3 * t, maxiter = 100, nprint = 1,
alg="leastsq")
}
\arguments{
\item{vol}{a vector of numeric values representing volumes (\eqn{\mu
m^3})}
\item{freq}{a vector of numeric values with the same length as
\code{vol}, representing the frequency of cells of the volumes given
in \code{vol}}
\item{r}{a numeric value that represents the starting value for the
rate (\eqn{\mu m^3/h}) of cell growth parameter}
\item{sigma_r}{a numeric value that represents the starting value for
the variability in the rate of cell growth parameter \code{r}
(\eqn{\mu m^3/h}) }
\item{t}{a numeric value representing the average cell cycle time (\eqn{h})}
\item{sigma_t}{a numeric value representing the variability in the
average cell cycle time \code{t} (\eqn{h})}
\item{maxiter}{numeric value representing the maximum number of
iterations used by \link[minpack.lm]{nls.lm} in model fitting under least
squares criteria}
\item{nprint}{optimization output is printed every \code{nprint}
iterations}
\item{alg}{character string indicating the algorithm to use; the
choices are now "leastsq", so that that sum square error
\code{sum((data - model)^2)} is minimized, or "chisq", so that the
multinomial likelihood chi-square
\code{2*sum(data*log(data/model))} is minimized.}
}
\value{
\code{fitVolDist} returns
an object of class \code{"fitVolDist"}.
The generic accessor functions \code{coefficients},
\code{vcov}, \code{deviance}, \code{fitted} and \code{residuals} extract
various useful features of the value returned by \code{fitVolDist}.
An object of class \code{"fitVolDist"} is a list containing the
following components:
\item{t}{the value for \code{t} (\eqn{h}) used}
\item{sigma_t}{the value for \code{sigma_t} (\eqn{h}) used}
\item{fitted}{the model fit.}
\item{fit.res}{the output object returned from \link[minpack.lm]{nls.lm}.}
\item{summary.fit.res}{the summary of the output object returned from
\link[minpack.lm]{nls.lm}.}
}
\references{
Halter M, Elliott JT, Hubbard JB, Tona A, Plant AL (2009),
"Cell Volume Distributions Reveal Cell Growth Rates and Division Times",
Journal of Theoretical Biology, Vol 257, pp 124 - 130,
DOI: 10.1016/j.jtbi.2008.10.031.
}
\seealso{\code{\link{volEq7}}}
\examples{
\dontrun{
#############################################################
# Fit volume distribution data for A10 vSMC cell cultures
# as described in the above referenced paper
#############################################################
## load the volume distributions in the "A10_vSMC_volume_data" dataset
data("A10_vSMC_volume_data")
labs <- c("a","b","c","d")
## the volume distributions representing 0 nM aphidicolin concentration
Aph0 <- list(Aph0_a, Aph0_b, Aph0_c, Aph0_d)
## the associated cell cycle times
tAph0 <- c(tAph0_a, tAph0_b, tAph0_c, tAph0_d)
## fit each dataset
Aph0res <- list()
Aph0tab <- matrix(ncol=2,nrow=4)
for(i in 1:length(Aph0)) {
Aph0res[[i]] <- fitVolDist(vol=volumes_A10_vSMC, freq=Aph0[[i]],
r=100,sigma_r=44, t=tAph0[i])
Aph0tab[i,] <- coef(Aph0res[[i]])
}
Aph0tab <- rbind(Aph0tab, colMeans(Aph0tab))
colnames(Aph0tab) <- c("r", "sigma_r")
rownames(Aph0tab) <- c(labs, "mean values")
## plot results
par(mfrow=c(3,2))
for(i in 1:length(Aph0)) {
pe <- signif(coef(Aph0res[[i]]),3)
plot(volumes_A10_vSMC, Aph0[[i]], type="l", main= substitute(paste(
"r: ", p1, ", ", sigma[r],": ",p2),
list(p1=pe[1], p2=pe[2])),
xlab = expression(paste("volume (",mu, m^3,")", sep="")),
sub=paste("vol. dist. Aphidicolin 0 nM", labs[i]), ylab="frequency")
lines(volumes_A10_vSMC, fitted(Aph0res[[i]]), col=2)
}
textplot("(Above) Volume distribution data
representing A10 vSMC cells
cultured with 0 nM aphidicolin
concentration (black)
and model fit (red).
(Right) Parameter estimates and
mean estimates over the four fits",fixed.width=FALSE)
textplot(signif(Aph0tab,3))
## the volume distributions representing 50 nM aphidicolin concentration
Aph50 <- list(Aph50_a, Aph50_b, Aph50_c, Aph50_d)
## the associated cell cycle times
tAph50 <- c(tAph50_a, tAph50_b, tAph50_c, tAph50_d)
## fit each dataset
Aph50res <- list()
Aph50tab <- matrix(ncol=2,nrow=4)
for(i in 1:length(Aph50)) {
Aph50res[[i]] <- fitVolDist(vol=volumes_A10_vSMC, freq=Aph50[[i]],
r=100,sigma_r=44, t=tAph50[i])
Aph50tab[i,] <- coef(Aph50res[[i]])
}
Aph50tab <- rbind(Aph50tab, colMeans(Aph50tab))
colnames(Aph50tab) <- c("r", "sigma_r")
rownames(Aph50tab) <- c(labs, "mean values")
## plot results
par(mfrow=c(3,2))
for(i in 1:length(Aph50)) {
pe <- signif(coef(Aph50res[[i]]),3)
plot(volumes_A10_vSMC, Aph50[[i]], type="l", main= substitute(paste(
"r: ", p1, ", ", sigma[r],": ",p2),
list(p1=pe[1], p2=pe[2])),
xlab = expression(paste("volume (", mu, m^3,")", sep="")),
sub=paste("vol. dist. Aphidicolin 50 nM", labs[i]), ylab="frequency")
lines(volumes_A10_vSMC, fitted(Aph50res[[i]]), col=2)
}
textplot("(Above) Volume distribution data
representing A10 vSMC cells
cultured with 50 nM aphidicolin
concentration (black)
and model fit (red).
(Right) Parameter estimates and
mean estimates over the four fits",fixed.width=FALSE)
textplot(signif(Aph50tab,3))
## the volume distributions representing 100 nM aphidicolin concentration
Aph100 <- list(Aph100_a, Aph100_b, Aph100_c, Aph100_d)
## the associated cell cycle times
tAph100 <- c(tAph100_a, tAph100_b, tAph100_c, tAph100_d)
## fit each dataset
Aph100res <- list()
Aph100tab <- matrix(ncol=2,nrow=4)
for(i in 1:length(Aph100)) {
Aph100res[[i]] <- fitVolDist(vol=volumes_A10_vSMC, freq=Aph100[[i]],
r=100,sigma_r=44, t=tAph100[i])
Aph100tab[i,] <- coef(Aph100res[[i]])
}
Aph100tab <- rbind(Aph100tab, colMeans(Aph100tab))
colnames(Aph100tab) <- c("r", "sigma_r")
rownames(Aph100tab) <- c(labs, "mean values")
## plot results
par(mfrow=c(3,2))
for(i in 1:length(Aph100)) {
pe <- signif(coef(Aph100res[[i]]),3)
plot(volumes_A10_vSMC, Aph100[[i]], type="l", main= substitute(paste(
"r: ", p1, ", ", sigma[r],": ",p2),
list(p1=pe[1], p2=pe[2])),
xlab = expression(paste("volume (",mu, m^3,")", sep="")),
sub=paste("vol. dist. Aphidicolin 100 nM", labs[i]), ylab="frequency")
lines(volumes_A10_vSMC, fitted(Aph100res[[i]]), col=2)
}
textplot("(Above) Volume distribution data
representing A10 vSMC cells
cultured with 100 nM aphidicolin
concentration (black)
and model fit (red).
(Right) Parameter estimates and
mean estimates over the four fits",fixed.width=FALSE)
textplot(signif(Aph100tab,3))
}
#############################################################
# Fit volume distribution data for NIH3T3 cell cultures
# as described in the above referenced paper
#############################################################
## load the volume distributions in the "NIH3T3_volume_data" dataset
data("NIH3T3_volume_data")
labs <- c("a","b","c","d")
## the volume distributions representing NIH3T3 cells
NIH3T3 <- list(NIH3T3_a, NIH3T3_b, NIH3T3_c, NIH3T3_d)
## the associated cell cycle times
tNIH3T3 <- c(tNIH3T3_a, tNIH3T3_b, tNIH3T3_c, tNIH3T3_d)
## fit each dataset
NIH3T3res <- list()
NIH3T3tab <- matrix(ncol=2,nrow=4)
for(i in 1:length(NIH3T3)) {
NIH3T3res[[i]] <- fitVolDist(vol=volumes_nih3t3, freq=NIH3T3[[i]],
r=100,sigma_r=44, t=tNIH3T3[i])
NIH3T3tab[i,] <- coef(NIH3T3res[[i]])
}
NIH3T3tab <- rbind(NIH3T3tab, colMeans(NIH3T3tab))
colnames(NIH3T3tab) <- c("r", "sigma_r")
rownames(NIH3T3tab) <- c(labs, "mean values")
## plot results
par(mfrow=c(3,2))
for(i in 1:length(NIH3T3)) {
pe <- signif(coef(NIH3T3res[[i]]),3)
plot(volumes_nih3t3, NIH3T3[[i]], type="l", main= substitute(paste(
"r: ", p1, ", ", sigma[r],": ",p2),
list(p1=pe[1], p2=pe[2])),
xlab = expression(paste("volume (",mu, m^3,")", sep="")),
sub=paste("vol. dist. NIH3T3", labs[i]), ylab="frequency")
lines(volumes_nih3t3, fitted(NIH3T3res[[i]]), col=2)
}
textplot("(Above) Volume distribution data
representing NIH3T3 cells
cultured under normal
conditions (black)
and model fit (red).
(Right) Parameter estimates and
mean estimates over the four fits",fixed.width=FALSE)
textplot(signif(NIH3T3tab,3))
}
\keyword{optimize}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.