content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
if (!require(shiny)) {install.packages("shiny")}; library(shiny)
if (!require(shinythemes)) {install.packages("shinythemes")}; library(shinythemes)
if (!require(dplyr)) {install.packages("dplyr")}; library(dplyr)
if (!require(fastDummies)) {install.packages("fastDummies")}; library(fastDummies)
if (!require(Hmisc)) {install.packages("Hmisc")}; library(Hmisc)
if (!require(VIM)) {install.packages("VIM")}; library(VIM)
if (!require(shinyWidgets)) {install.packages("shinyWidgets")}; library(shinyWidgets)
if (!require(tidyr)) {install.packages("tidyr")}; library(tidyr)
if (!require(datasets)) {install.packages("datasets")}; library(datasets)
if (!require(stringr)) {install.packages("stringr")}; library(stringr)
if (!require(shinyhelper)) {install.packages("shinyhelper")}; library(shinyhelper)
if (!require(summarytools)) {install.packages("summarytools")}; library(summarytools)
if (!require(descriptr)) {install.packages("descriptr")}; library(descriptr)
if (!require(DataExplorer)){install.packages("DataExplorer")}; library(DataExplorer)
if (!require(shinyBS)) {install.packages("shinyBS")}; library(shinyBS)
|
/dependencies.R
|
no_license
|
yogesh1612/data_pre-process_shinyapp
|
R
| false
| false
| 1,133
|
r
|
if (!require(shiny)) {install.packages("shiny")}; library(shiny)
if (!require(shinythemes)) {install.packages("shinythemes")}; library(shinythemes)
if (!require(dplyr)) {install.packages("dplyr")}; library(dplyr)
if (!require(fastDummies)) {install.packages("fastDummies")}; library(fastDummies)
if (!require(Hmisc)) {install.packages("Hmisc")}; library(Hmisc)
if (!require(VIM)) {install.packages("VIM")}; library(VIM)
if (!require(shinyWidgets)) {install.packages("shinyWidgets")}; library(shinyWidgets)
if (!require(tidyr)) {install.packages("tidyr")}; library(tidyr)
if (!require(datasets)) {install.packages("datasets")}; library(datasets)
if (!require(stringr)) {install.packages("stringr")}; library(stringr)
if (!require(shinyhelper)) {install.packages("shinyhelper")}; library(shinyhelper)
if (!require(summarytools)) {install.packages("summarytools")}; library(summarytools)
if (!require(descriptr)) {install.packages("descriptr")}; library(descriptr)
if (!require(DataExplorer)){install.packages("DataExplorer")}; library(DataExplorer)
if (!require(shinyBS)) {install.packages("shinyBS")}; library(shinyBS)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/library.R
\name{md.mvnorm}
\alias{md.mvnorm}
\title{md.mvnorm}
\usage{
md.mvnorm(names, means = rep(0, length(names)), cov = diag(ncol(names)))
}
\arguments{
\item{names}{vector of covariate names}
\item{means}{vector of means, default is \code{rep(0, length(names))}}
\item{cov}{covariance matrix, default is \code{diag(ncol(names))}}
}
\description{
Creates information of a vector of multi-normal covariates with the specified array of means and covariance matrix.
This function call must be added to the \code{\link{md.simparams}} object.
}
\examples{
\dontrun{
library(missDeaths)
sim = md.simparams() +
md.mvnorm(c("X1", "X2"), c(100, 0), matrix(c(225, 3, 2, 1), 2, 2))
}
}
|
/man/md.mvnorm.Rd
|
no_license
|
cran/missDeaths
|
R
| false
| true
| 793
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/library.R
\name{md.mvnorm}
\alias{md.mvnorm}
\title{md.mvnorm}
\usage{
md.mvnorm(names, means = rep(0, length(names)), cov = diag(ncol(names)))
}
\arguments{
\item{names}{vector of covariate names}
\item{means}{vector of means, default is \code{rep(0, length(names))}}
\item{cov}{covariance matrix, default is \code{diag(ncol(names))}}
}
\description{
Creates information of a vector of multi-normal covariates with the specified array of means and covariance matrix.
This function call must be added to the \code{\link{md.simparams}} object.
}
\examples{
\dontrun{
library(missDeaths)
sim = md.simparams() +
md.mvnorm(c("X1", "X2"), c(100, 0), matrix(c(225, 3, 2, 1), 2, 2))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xml.R
\name{set_xml_file_helper}
\alias{set_xml_file_helper}
\title{set_xml_file_helper}
\usage{
set_xml_file_helper(xml, fq_name)
}
\arguments{
\item{xml}{The xml pipeline object}
\item{fq_name}{The full path to the XML file}
}
\value{
The updated XML object.
}
\description{
set_xml_file_helper
}
|
/input/gcamdata/man/set_xml_file_helper.Rd
|
permissive
|
JGCRI/gcam-core
|
R
| false
| true
| 378
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xml.R
\name{set_xml_file_helper}
\alias{set_xml_file_helper}
\title{set_xml_file_helper}
\usage{
set_xml_file_helper(xml, fq_name)
}
\arguments{
\item{xml}{The xml pipeline object}
\item{fq_name}{The full path to the XML file}
}
\value{
The updated XML object.
}
\description{
set_xml_file_helper
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/windows.R
\name{coverageWindowsCenteredStranded}
\alias{coverageWindowsCenteredStranded}
\title{Get windowed strand-oriented coverages around center points}
\usage{
coverageWindowsCenteredStranded(centers, window.size = 1000, coverage)
}
\arguments{
\item{centers}{a data frame of center points with columns 'chr', 'center', 'strand'}
\item{window.size}{the size of the window surrounding the center position, default 1000}
\item{coverage}{a coverage object (\code{\link[IRanges]{RleList}} as returned by \code{\link[IRanges]{coverage}})}
}
\value{
a matrix
}
\description{
Out-of-bound windows will be removed!
}
|
/man/coverageWindowsCenteredStranded.Rd
|
permissive
|
musikutiv/tsTools
|
R
| false
| true
| 694
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/windows.R
\name{coverageWindowsCenteredStranded}
\alias{coverageWindowsCenteredStranded}
\title{Get windowed strand-oriented coverages around center points}
\usage{
coverageWindowsCenteredStranded(centers, window.size = 1000, coverage)
}
\arguments{
\item{centers}{a data frame of center points with columns 'chr', 'center', 'strand'}
\item{window.size}{the size of the window surrounding the center position, default 1000}
\item{coverage}{a coverage object (\code{\link[IRanges]{RleList}} as returned by \code{\link[IRanges]{coverage}})}
}
\value{
a matrix
}
\description{
Out-of-bound windows will be removed!
}
|
#Formacao Cientista de Dados - Fernando Amaral
cluster = kmeans(iris[1:4],centers=3)
table(iris$Species,cluster$cluster)
plot(iris[,1:4],col=cluster$cluster)
set.seed(2014)
cluster = kmeans(iris[1:4],centers=3)
table(iris$Species,cluster$cluster)
plot(iris[,1:4],col=cluster$cluster)cl
|
/Udemy/Formacao Cientista de Dados/Resources/GERAL/5.1.Kmeans.R
|
permissive
|
tarsoqueiroz/Rlang
|
R
| false
| false
| 296
|
r
|
#Formacao Cientista de Dados - Fernando Amaral
cluster = kmeans(iris[1:4],centers=3)
table(iris$Species,cluster$cluster)
plot(iris[,1:4],col=cluster$cluster)
set.seed(2014)
cluster = kmeans(iris[1:4],centers=3)
table(iris$Species,cluster$cluster)
plot(iris[,1:4],col=cluster$cluster)cl
|
#' Extract information from GEO for a given sample
#'
#' This function uses GEOquery to extract information for a given sample. The
#' GEO accession ids for the sample can be found in the study phenotype table.
#'
#' @return Returns a [DataFrame-class][S4Vectors::DataFrame-class] with the information
#' from GEO available for the given sample.
#'
#' @param geoid A character vector of length 1 with the GEO accession id for
#' a given sample.
#' @param verbose If `TRUE` the `geoid` will be shown.
#' @param sleep The number of seconds (or fraction) to wait before downloading
#' data using [getGEO][GEOquery::getGEO]. This is important if you are looking over
#' `geo_info()` given the constraints published at
#' <https://www.ncbi.nlm.nih.gov/books/NBK25497/>.
#' @param getGPL This argument is passed to [getGEO][GEOquery::getGEO] and is set
#' to `FALSE` by default to speed up the process.
#' @param destdir This argument is passed to [getGEO][GEOquery::getGEO].
#' @param ... Additional arguments passed to [getGEO][GEOquery::getGEO].
#'
#' @author Leonardo Collado-Torres, Andrew Jaffe
#' @export
#'
#' @import GEOquery IRanges S4Vectors
#'
#' @examples
#' geo_info("GSM836270")
geo_info <- function(geoid, verbose = FALSE, sleep = 1 / 2, getGPL = FALSE,
destdir = tempdir(), ...) {
if (is.na(geoid)) {
return(NULL)
}
## Check inputs
stopifnot(is.character(geoid) & length(geoid) == 1)
if (verbose) {
message(paste(
Sys.time(),
"finding GEO information for GEO accession id", geoid
))
}
if (!file.exists(file.path(destdir, paste0(geoid, ".soft")))) {
Sys.sleep(sleep)
}
## Get data from GEO, with 3 retries, waiting between 0 and 2 seconds in
## between retries
N.TRIES <- 3L
while (N.TRIES > 0L) {
geo <- tryCatch(
GEOquery::getGEO(geoid, getGPL = getGPL, destdir = destdir, ...),
error = function(e) {
soft <- paste0(geoid, ".soft")
soft_file <- file.path(destdir, soft)
if (any(grepl("private", readLines(soft_file)))) {
message(paste(geoid, "is currently private"))
return(NA)
} else if (any(grepl("blocked", readLines(soft_file)))) {
warning(paste("It seems like your IP access is blocked. Please check the file", soft_file, "for more details."))
return(NA)
} else {
return(e)
}
}
)
if (!inherits(geo, "error")) {
break
}
Sys.sleep(runif(n = 1, min = 2, max = 5))
N.TRIES <- N.TRIES - 1L
}
## Return and empty DataFrame if there was an issue with getGEO()
if (!is(geo, "GSM")) {
return(S4Vectors::DataFrame())
}
## Extract the header information
result <- geo@header
## Function for cleaning
clean_geo <- function(pattern, varname, res) {
charIndex <- grep(pattern, names(res))
if (length(charIndex) > 0) {
res <- c(
res,
IRanges::CharacterList(unlist(unname(result[charIndex])))
)
names(res)[length(res)] <- varname
res <- res[-charIndex]
}
return(res)
}
## Clean up the header information
df <- data.frame(
pattern = c(
"characteristics_ch1", "data_processing", "contact_",
"extract_", "library_", "relation", "series_",
"supplementary_file_"
),
varname = c(
"characteristics", "data_processing", "contact", "extract",
"library", "relation", "series", "supplementary_file"
),
stringsAsFactors = FALSE
)
for (i in seq_len(nrow(df))) {
result <- clean_geo(
df$pattern[i],
df$varname[i], result
)
}
## Make sure they are all length 1
if (any(S4Vectors::elementNROWS(result) > 1)) {
for (i in which(S4Vectors::elementNROWS(result) > 1)) result[i] <- IRanges::CharacterList(unlist(unname(result[i])))
}
## Finish
return(S4Vectors::DataFrame(result))
}
|
/R/geo_info.R
|
no_license
|
qtguan/recount
|
R
| false
| false
| 4,221
|
r
|
#' Extract information from GEO for a given sample
#'
#' This function uses GEOquery to extract information for a given sample. The
#' GEO accession ids for the sample can be found in the study phenotype table.
#'
#' @return Returns a [DataFrame-class][S4Vectors::DataFrame-class] with the information
#' from GEO available for the given sample.
#'
#' @param geoid A character vector of length 1 with the GEO accession id for
#' a given sample.
#' @param verbose If `TRUE` the `geoid` will be shown.
#' @param sleep The number of seconds (or fraction) to wait before downloading
#' data using [getGEO][GEOquery::getGEO]. This is important if you are looking over
#' `geo_info()` given the constraints published at
#' <https://www.ncbi.nlm.nih.gov/books/NBK25497/>.
#' @param getGPL This argument is passed to [getGEO][GEOquery::getGEO] and is set
#' to `FALSE` by default to speed up the process.
#' @param destdir This argument is passed to [getGEO][GEOquery::getGEO].
#' @param ... Additional arguments passed to [getGEO][GEOquery::getGEO].
#'
#' @author Leonardo Collado-Torres, Andrew Jaffe
#' @export
#'
#' @import GEOquery IRanges S4Vectors
#'
#' @examples
#' geo_info("GSM836270")
geo_info <- function(geoid, verbose = FALSE, sleep = 1 / 2, getGPL = FALSE,
destdir = tempdir(), ...) {
if (is.na(geoid)) {
return(NULL)
}
## Check inputs
stopifnot(is.character(geoid) & length(geoid) == 1)
if (verbose) {
message(paste(
Sys.time(),
"finding GEO information for GEO accession id", geoid
))
}
if (!file.exists(file.path(destdir, paste0(geoid, ".soft")))) {
Sys.sleep(sleep)
}
## Get data from GEO, with 3 retries, waiting between 0 and 2 seconds in
## between retries
N.TRIES <- 3L
while (N.TRIES > 0L) {
geo <- tryCatch(
GEOquery::getGEO(geoid, getGPL = getGPL, destdir = destdir, ...),
error = function(e) {
soft <- paste0(geoid, ".soft")
soft_file <- file.path(destdir, soft)
if (any(grepl("private", readLines(soft_file)))) {
message(paste(geoid, "is currently private"))
return(NA)
} else if (any(grepl("blocked", readLines(soft_file)))) {
warning(paste("It seems like your IP access is blocked. Please check the file", soft_file, "for more details."))
return(NA)
} else {
return(e)
}
}
)
if (!inherits(geo, "error")) {
break
}
Sys.sleep(runif(n = 1, min = 2, max = 5))
N.TRIES <- N.TRIES - 1L
}
## Return and empty DataFrame if there was an issue with getGEO()
if (!is(geo, "GSM")) {
return(S4Vectors::DataFrame())
}
## Extract the header information
result <- geo@header
## Function for cleaning
clean_geo <- function(pattern, varname, res) {
charIndex <- grep(pattern, names(res))
if (length(charIndex) > 0) {
res <- c(
res,
IRanges::CharacterList(unlist(unname(result[charIndex])))
)
names(res)[length(res)] <- varname
res <- res[-charIndex]
}
return(res)
}
## Clean up the header information
df <- data.frame(
pattern = c(
"characteristics_ch1", "data_processing", "contact_",
"extract_", "library_", "relation", "series_",
"supplementary_file_"
),
varname = c(
"characteristics", "data_processing", "contact", "extract",
"library", "relation", "series", "supplementary_file"
),
stringsAsFactors = FALSE
)
for (i in seq_len(nrow(df))) {
result <- clean_geo(
df$pattern[i],
df$varname[i], result
)
}
## Make sure they are all length 1
if (any(S4Vectors::elementNROWS(result) > 1)) {
for (i in which(S4Vectors::elementNROWS(result) > 1)) result[i] <- IRanges::CharacterList(unlist(unname(result[i])))
}
## Finish
return(S4Vectors::DataFrame(result))
}
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Perceived mental health
if(year == 1996)
FYC <- FYC %>% mutate(MNHLTH53 = MNTHLTH2, MNHLTH42 = MNTHLTH2, MNHLTH31 = MNTHLTH1)
FYC <- FYC %>%
mutate_at(vars(starts_with("MNHLTH")), funs(replace(., .< 0, NA))) %>%
mutate(mnhlth = coalesce(MNHLTH53, MNHLTH42, MNHLTH31)) %>%
mutate(mnhlth = recode_factor(mnhlth, .default = "Missing", .missing = "Missing",
"1" = "Excellent",
"2" = "Very good",
"3" = "Good",
"4" = "Fair",
"5" = "Poor"))
# Sex
FYC <- FYC %>%
mutate(sex = recode_factor(SEX, .default = "Missing", .missing = "Missing",
"1" = "Male",
"2" = "Female"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~TOTEXP.yy., FUN=svymean, by = ~sex + mnhlth, design = FYCdsgn)
print(results)
|
/mepstrends/hc_use/json/code/r/meanEXP0__sex__mnhlth__.r
|
permissive
|
HHS-AHRQ/MEPS-summary-tables
|
R
| false
| false
| 1,643
|
r
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Perceived mental health
if(year == 1996)
FYC <- FYC %>% mutate(MNHLTH53 = MNTHLTH2, MNHLTH42 = MNTHLTH2, MNHLTH31 = MNTHLTH1)
FYC <- FYC %>%
mutate_at(vars(starts_with("MNHLTH")), funs(replace(., .< 0, NA))) %>%
mutate(mnhlth = coalesce(MNHLTH53, MNHLTH42, MNHLTH31)) %>%
mutate(mnhlth = recode_factor(mnhlth, .default = "Missing", .missing = "Missing",
"1" = "Excellent",
"2" = "Very good",
"3" = "Good",
"4" = "Fair",
"5" = "Poor"))
# Sex
FYC <- FYC %>%
mutate(sex = recode_factor(SEX, .default = "Missing", .missing = "Missing",
"1" = "Male",
"2" = "Female"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~TOTEXP.yy., FUN=svymean, by = ~sex + mnhlth, design = FYCdsgn)
print(results)
|
# Clean up
rm(list = ls(all = TRUE))
#Setting the working directory
setwd("~/Desktop/Personal/DataScience/Coursera/8Course- Practical Machine Learning/final project")
# Loading the caret library
library(caret)
# Taken from the assignment to write the files
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
# Reading training and testing sets
trainingRaw <- read.csv(file="pml-training.csv", header=TRUE, as.is = TRUE, stringsAsFactors = FALSE, sep=',', na.strings=c('NA','','#DIV/0!'))
testingRaw <- read.csv(file="pml-testing.csv", header=TRUE, as.is = TRUE, stringsAsFactors = FALSE, sep=',', na.strings=c('NA','','#DIV/0!'))
trainingRaw$classe <- as.factor(trainingRaw$classe)
#Removing NAs
NAindex <- apply(trainingRaw,2,function(x) {sum(is.na(x))})
trainingRaw <- trainingRaw[,which(NAindex == 0)]
NAindex <- apply(testingRaw,2,function(x) {sum(is.na(x))})
testingRaw <- testingRaw[,which(NAindex == 0)]
#Preprocess
v <- which(lapply(trainingRaw, class) %in% "numeric")
preObj <-preProcess(trainingRaw[,v],method=c('knnImpute', 'center', 'scale'))
trainLess1 <- predict(preObj, trainingRaw[,v])
trainLess1$classe <- trainingRaw$classe
testLess1 <-predict(preObj,testingRaw[,v])
# remove near zero values, if any
nzv <- nearZeroVar(trainLess1,saveMetrics=TRUE)
trainLess1 <- trainLess1[,nzv$nzv==FALSE]
nzv <- nearZeroVar(testLess1,saveMetrics=TRUE)
testLess1 <- testLess1[,nzv$nzv==FALSE]
# Create cross validation set
set.seed(12031987)
inTrain = createDataPartition(trainLess1$classe, p = 3/4, list=FALSE)
training = trainLess1[inTrain,]
crossValidation = trainLess1[-inTrain,]
# Train model with random forest
startTime <- Sys.time();
modFit <- train(classe ~., method="rf", data=training, trControl=trainControl(method='cv'), number=5, allowParallel=TRUE )
endTime <- Sys.time()
endTime - startTime
# Training set accuracy
trainingPred <- predict(modFit, training)
confusionMatrix(trainingPred, training$classe)
# Cross validation set accuracy
cvPred <- predict(modFit, crossValidation)
confusionMatrix(cvPred, crossValidation$classe)
#Predictions on the real testing set
testingPred <- predict(modFit, testLess1)
testingPred
|
/PML-Project/model.R
|
no_license
|
rastmails/8CourseFinalProject
|
R
| false
| false
| 2,381
|
r
|
# Clean up
rm(list = ls(all = TRUE))
#Setting the working directory
setwd("~/Desktop/Personal/DataScience/Coursera/8Course- Practical Machine Learning/final project")
# Loading the caret library
library(caret)
# Taken from the assignment to write the files
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
# Reading training and testing sets
trainingRaw <- read.csv(file="pml-training.csv", header=TRUE, as.is = TRUE, stringsAsFactors = FALSE, sep=',', na.strings=c('NA','','#DIV/0!'))
testingRaw <- read.csv(file="pml-testing.csv", header=TRUE, as.is = TRUE, stringsAsFactors = FALSE, sep=',', na.strings=c('NA','','#DIV/0!'))
trainingRaw$classe <- as.factor(trainingRaw$classe)
#Removing NAs
NAindex <- apply(trainingRaw,2,function(x) {sum(is.na(x))})
trainingRaw <- trainingRaw[,which(NAindex == 0)]
NAindex <- apply(testingRaw,2,function(x) {sum(is.na(x))})
testingRaw <- testingRaw[,which(NAindex == 0)]
#Preprocess
v <- which(lapply(trainingRaw, class) %in% "numeric")
preObj <-preProcess(trainingRaw[,v],method=c('knnImpute', 'center', 'scale'))
trainLess1 <- predict(preObj, trainingRaw[,v])
trainLess1$classe <- trainingRaw$classe
testLess1 <-predict(preObj,testingRaw[,v])
# remove near zero values, if any
nzv <- nearZeroVar(trainLess1,saveMetrics=TRUE)
trainLess1 <- trainLess1[,nzv$nzv==FALSE]
nzv <- nearZeroVar(testLess1,saveMetrics=TRUE)
testLess1 <- testLess1[,nzv$nzv==FALSE]
# Create cross validation set
set.seed(12031987)
inTrain = createDataPartition(trainLess1$classe, p = 3/4, list=FALSE)
training = trainLess1[inTrain,]
crossValidation = trainLess1[-inTrain,]
# Train model with random forest
startTime <- Sys.time();
modFit <- train(classe ~., method="rf", data=training, trControl=trainControl(method='cv'), number=5, allowParallel=TRUE )
endTime <- Sys.time()
endTime - startTime
# Training set accuracy
trainingPred <- predict(modFit, training)
confusionMatrix(trainingPred, training$classe)
# Cross validation set accuracy
cvPred <- predict(modFit, crossValidation)
confusionMatrix(cvPred, crossValidation$classe)
#Predictions on the real testing set
testingPred <- predict(modFit, testLess1)
testingPred
|
answer <- 'Test'
|
/Microsoft R Demo/MS-R/mrsdeploy/scripts/projectA/test.R
|
no_license
|
dem108/Microsoft-R-Demo
|
R
| false
| false
| 16
|
r
|
answer <- 'Test'
|
## All Utterances
# All possible utterances (i.e. object features) that can be handled.
# Here, we assume a 3x3 matrix (three feature types with three expressions each)
allUtterances <- c('cloud', 'circle', 'square', 'solid', 'striped', 'dotted', 'blue', 'red', 'green')
allUtterancesNew1 <- c('cloud', 'circle', 'square', 'solid', 'striped', 'polka-dotted', 'blue', 'red', 'green')
allUttMatrix <- matrix(allUtterances, ncol=3, byrow=TRUE)
##
## All Objects
# all object matrix contains 3^3 types of objects.
# the matrix essentially specifies the 3 feature expressions for each object
# thus, the matrix maps objects to matching utterances
# all Objects implements the strings,
# allObjectsToUtterancesMappings encodes the index mappings
allObjects <- matrix('',27,3)
allObjectsToUtterancesMappings <- matrix(0,27,3)
for(index in c(1:27)) {
# print(c(1+((index-1)%%3), 1+floor(((index-1)%%9)/3), 1+floor((index-1)/9)))
allObjects[index,1] <- allUttMatrix[1,1+((index-1)%%3)]
allObjects[index,2] <- allUttMatrix[2,1+floor(((index-1)%%9)/3)]
allObjects[index,3] <- allUttMatrix[3,1+floor((index-1)/9)]
allObjectsToUtterancesMappings[index,1] <- 1+((index-1)%%3)
allObjectsToUtterancesMappings[index,2] <- 4+floor(((index-1)%%9)/3)
allObjectsToUtterancesMappings[index,3] <- 7+floor((index-1)/9)
}
##
## The relevant utterances are determined given currentObjects
# valid utterances correspond to all features present in the current objects!
determineValidUtterances <- function(currentObjects) {
validUtterances <- c()
for(i in c(1:length(currentObjects))) {
validUtterances <- c(validUtterances, allObjectsToUtterancesMappings[currentObjects[i],])
}
validUtterances <- sort(unique(validUtterances))
return(validUtterances)
}
###
## No preference is encoded with 4, whereas a specific feature expression preference is encode
# by the respective index value
# get feature-respective priors returns general feature respective priors for all 3 features
# @deprecated (not used currently!)
getFeatureRespectivePriors <- function(softAddProb) {
featureRespectivePriors <- list()
for(i in c(1:3)) { ## for all three features generate a preference matrix
m <- matrix(0,4,3)
for(fPref in c(1:3)) {
m[fPref,fPref] <- 1
m[fPref,] <- m[fPref,] + softAddProb
m[fPref,] <- m[fPref,] / sum(m[fPref,])
}
m[4,] <- 1/3
featureRespectivePriors[[i]] <- m
}
return(featureRespectivePriors)
}
##
## Determining the specifc mapping of objects to utterances that applies given currentObjects
# mapping current objects to utterances
determineObjectToUtterancesMapping <- function(currentObjects) {
mapObjToUtt <- matrix(0, length(currentObjects), 3)
for(i in c(1:length(currentObjects))) {
mapObjToUtt[i,] <- allObjectsToUtterancesMappings[currentObjects[i],]
}
return(mapObjToUtt)
}
##
# Determining the corresponding mappings from all relevant utterances to objects
# parameter notObeyInst determines if the instruction does not need to be obeyed (0=full obedience: -> infty =full instruction ignorance)
determineUtteranceToObjectProbabilities <- function(consideredUtterances, currentObjects,
mapObjToUtt, notObeyInst) {
mapUttToObj <- list()
mapUttToObjProbs <- matrix(notObeyInst, length(consideredUtterances), length(currentObjects))
for(utt in rep(1:length(consideredUtterances)) ) {
# determine array of all objects that match the utterance
mapUttToObj[[utt]] = ((which(mapObjToUtt[,] == consideredUtterances[utt])-1)%%nrow(mapObjToUtt))+1
for(i in rep(1:length(mapUttToObj[[utt]]))) {
mapUttToObjProbs[utt,mapUttToObj[[utt]][i]] <- mapUttToObjProbs[utt,mapUttToObj[[utt]][i]] + 1;
}
mapUttToObjProbs[utt,] <- mapUttToObjProbs[utt,] / sum(mapUttToObjProbs[utt,])# length(mapUttToObj[[utt]])
}
return(mapUttToObjProbs)
}
##
## Priors on object preferences - automatically derived from considered utterances
# (i.e. derived from all relevant features)
# type == 0: hard priors; type > 0: soft prior with specified softness
# returns a list of preference priors for all considered features, i.e. utterances,
# as well as for "no preference" whatsoever, i.e., uniform prior over all three objects
getObjectPreferencePriors <- function(consideredUtterances, currentObjects, type, mapUttToObjProbs) {
objectPreferenceHardPriors <- list()
for(utt in rep(1:length(consideredUtterances)) ) {
objectPreferenceHardPriors[[utt]] <- mapUttToObjProbs[utt,]
}
objectPreferenceHardPriors[[length(consideredUtterances)+1]] =
rep(1/length(currentObjects), length(currentObjects) )
# soft preferences with uniform choice fusion.
softAddProb <- type
objectPreferenceSoftPriors <- list()
for(utt in rep(1:(length(consideredUtterances)+1)) ) {
objectPreferenceSoftPriors[[utt]] <- objectPreferenceHardPriors[[utt]] + softAddProb
objectPreferenceSoftPriors[[utt]] <- objectPreferenceSoftPriors[[utt]] / sum(objectPreferenceSoftPriors[[utt]])
}
return(objectPreferenceSoftPriors)
}
|
/RSA_2019_01/RSA_StratUtt_AllUtterancesAndObjects.R
|
no_license
|
gscontras/prior_inference
|
R
| false
| false
| 5,174
|
r
|
## All Utterances
# All possible utterances (i.e. object features) that can be handled.
# Here, we assume a 3x3 matrix (three feature types with three expressions each)
allUtterances <- c('cloud', 'circle', 'square', 'solid', 'striped', 'dotted', 'blue', 'red', 'green')
allUtterancesNew1 <- c('cloud', 'circle', 'square', 'solid', 'striped', 'polka-dotted', 'blue', 'red', 'green')
allUttMatrix <- matrix(allUtterances, ncol=3, byrow=TRUE)
##
## All Objects
# all object matrix contains 3^3 types of objects.
# the matrix essentially specifies the 3 feature expressions for each object
# thus, the matrix maps objects to matching utterances
# all Objects implements the strings,
# allObjectsToUtterancesMappings encodes the index mappings
allObjects <- matrix('',27,3)
allObjectsToUtterancesMappings <- matrix(0,27,3)
for(index in c(1:27)) {
# print(c(1+((index-1)%%3), 1+floor(((index-1)%%9)/3), 1+floor((index-1)/9)))
allObjects[index,1] <- allUttMatrix[1,1+((index-1)%%3)]
allObjects[index,2] <- allUttMatrix[2,1+floor(((index-1)%%9)/3)]
allObjects[index,3] <- allUttMatrix[3,1+floor((index-1)/9)]
allObjectsToUtterancesMappings[index,1] <- 1+((index-1)%%3)
allObjectsToUtterancesMappings[index,2] <- 4+floor(((index-1)%%9)/3)
allObjectsToUtterancesMappings[index,3] <- 7+floor((index-1)/9)
}
##
## The relevant utterances are determined given currentObjects
# valid utterances correspond to all features present in the current objects!
determineValidUtterances <- function(currentObjects) {
validUtterances <- c()
for(i in c(1:length(currentObjects))) {
validUtterances <- c(validUtterances, allObjectsToUtterancesMappings[currentObjects[i],])
}
validUtterances <- sort(unique(validUtterances))
return(validUtterances)
}
###
## No preference is encoded with 4, whereas a specific feature expression preference is encode
# by the respective index value
# get feature-respective priors returns general feature respective priors for all 3 features
# @deprecated (not used currently!)
getFeatureRespectivePriors <- function(softAddProb) {
featureRespectivePriors <- list()
for(i in c(1:3)) { ## for all three features generate a preference matrix
m <- matrix(0,4,3)
for(fPref in c(1:3)) {
m[fPref,fPref] <- 1
m[fPref,] <- m[fPref,] + softAddProb
m[fPref,] <- m[fPref,] / sum(m[fPref,])
}
m[4,] <- 1/3
featureRespectivePriors[[i]] <- m
}
return(featureRespectivePriors)
}
##
## Determining the specifc mapping of objects to utterances that applies given currentObjects
# mapping current objects to utterances
determineObjectToUtterancesMapping <- function(currentObjects) {
mapObjToUtt <- matrix(0, length(currentObjects), 3)
for(i in c(1:length(currentObjects))) {
mapObjToUtt[i,] <- allObjectsToUtterancesMappings[currentObjects[i],]
}
return(mapObjToUtt)
}
##
# Determining the corresponding mappings from all relevant utterances to objects
# parameter notObeyInst determines if the instruction does not need to be obeyed (0=full obedience: -> infty =full instruction ignorance)
determineUtteranceToObjectProbabilities <- function(consideredUtterances, currentObjects,
mapObjToUtt, notObeyInst) {
mapUttToObj <- list()
mapUttToObjProbs <- matrix(notObeyInst, length(consideredUtterances), length(currentObjects))
for(utt in rep(1:length(consideredUtterances)) ) {
# determine array of all objects that match the utterance
mapUttToObj[[utt]] = ((which(mapObjToUtt[,] == consideredUtterances[utt])-1)%%nrow(mapObjToUtt))+1
for(i in rep(1:length(mapUttToObj[[utt]]))) {
mapUttToObjProbs[utt,mapUttToObj[[utt]][i]] <- mapUttToObjProbs[utt,mapUttToObj[[utt]][i]] + 1;
}
mapUttToObjProbs[utt,] <- mapUttToObjProbs[utt,] / sum(mapUttToObjProbs[utt,])# length(mapUttToObj[[utt]])
}
return(mapUttToObjProbs)
}
##
## Priors on object preferences - automatically derived from considered utterances
# (i.e. derived from all relevant features)
# type == 0: hard priors; type > 0: soft prior with specified softness
# returns a list of preference priors for all considered features, i.e. utterances,
# as well as for "no preference" whatsoever, i.e., uniform prior over all three objects
getObjectPreferencePriors <- function(consideredUtterances, currentObjects, type, mapUttToObjProbs) {
objectPreferenceHardPriors <- list()
for(utt in rep(1:length(consideredUtterances)) ) {
objectPreferenceHardPriors[[utt]] <- mapUttToObjProbs[utt,]
}
objectPreferenceHardPriors[[length(consideredUtterances)+1]] =
rep(1/length(currentObjects), length(currentObjects) )
# soft preferences with uniform choice fusion.
softAddProb <- type
objectPreferenceSoftPriors <- list()
for(utt in rep(1:(length(consideredUtterances)+1)) ) {
objectPreferenceSoftPriors[[utt]] <- objectPreferenceHardPriors[[utt]] + softAddProb
objectPreferenceSoftPriors[[utt]] <- objectPreferenceSoftPriors[[utt]] / sum(objectPreferenceSoftPriors[[utt]])
}
return(objectPreferenceSoftPriors)
}
|
\name{featureselection.meta}
\alias{featureselection.meta}
\title{
Feature selection for meta analysis
}
\description{
Apply univariate Cox regression and aggregate gene Z-scores.
}
\usage{
featureselection.meta(gnExpMat, survivaltime, censor)
}
\arguments{
\item{gnExpMat}{
Matrix of gene expression data.
}
\item{survivaltime}{
Vector of survival time.
}
\item{censor}{
Vector of censoring status. In the censoring status vector, 1 = event occurred, 0 = censored.
}
}
\value{
Vector of gene Z-scores.
}
\author{
Haleh Yasrebi
}
\section{Warning }{This function is not called by the user directly.}
\keyword{Meta analysis}
|
/man/featureselection.meta.Rd
|
no_license
|
cran/survJamda
|
R
| false
| false
| 634
|
rd
|
\name{featureselection.meta}
\alias{featureselection.meta}
\title{
Feature selection for meta analysis
}
\description{
Apply univariate Cox regression and aggregate gene Z-scores.
}
\usage{
featureselection.meta(gnExpMat, survivaltime, censor)
}
\arguments{
\item{gnExpMat}{
Matrix of gene expression data.
}
\item{survivaltime}{
Vector of survival time.
}
\item{censor}{
Vector of censoring status. In the censoring status vector, 1 = event occurred, 0 = censored.
}
}
\value{
Vector of gene Z-scores.
}
\author{
Haleh Yasrebi
}
\section{Warning }{This function is not called by the user directly.}
\keyword{Meta analysis}
|
#' module_aglu_L2012.ag_For_Past_bio_input_irr_mgmt
#'
#' Build agriculture, forest, pasture and biomass production inputs for all technologies.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{L2012.AgSupplySector}, \code{L2012.AgSupplySubsector}, \code{L2012.AgProduction_ag_irr_mgmt}, \code{L2012.AgProduction_For}, \code{L2012.AgProduction_Past}, \code{L2012.AgHAtoCL_irr_mgmt}, \code{L2012.AgYield_bio_ref}. The corresponding file in the
#' original data system was \code{L2012.ag_For_Past_bio_input_irr_mgmt.R} (aglu level2).
#' @details This chunk specifies the input tables for agriculture, forest, pasture and biomass supply sectors and subsectors,
#' agricultural commodity production and harvest area to cropland by technologies, forest and pasture production,
#' and biomass grass and tree crops yield by technologies.
#' @importFrom assertthat assert_that
#' @importFrom dplyr filter mutate select
#' @importFrom tidyr gather spread
#' @author RC August 2017
module_aglu_L2012.ag_For_Past_bio_input_irr_mgmt <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c(FILE = "common/GCAM_region_names",
FILE = "water/basin_to_country_mapping",
FILE = "aglu/A_agSupplySector",
FILE = "aglu/A_agSupplySubsector",
"L101.ag_Prod_Mt_R_C_Y_GLU",
"L113.ag_bioYield_GJm2_R_GLU",
"L122.ag_HA_to_CropLand_R_Y_GLU",
"L123.ag_Prod_Mt_R_Past_Y_GLU",
"L123.For_Prod_bm3_R_Y_GLU",
"L132.ag_an_For_Prices",
"L1321.prP_R_C_75USDkg",
"L161.ag_irrProd_Mt_R_C_Y_GLU",
"L161.ag_rfdProd_Mt_R_C_Y_GLU",
"L163.ag_irrBioYield_GJm2_R_GLU",
"L163.ag_rfdBioYield_GJm2_R_GLU",
"L181.ag_Prod_Mt_R_C_Y_GLU_irr_level",
"L181.YieldMult_R_bio_GLU_irr"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c("L2012.AgSupplySector",
"L2012.AgSupplySubsector",
"L2012.AgProduction_ag_irr_mgmt",
"L2012.AgProduction_For",
"L2012.AgProduction_Past",
"L2012.AgHAtoCL_irr_mgmt",
"L2012.AgYield_bio_ref",
"L201.AgYield_bio_grass",
"L201.AgYield_bio_tree"))
} else if(command == driver.MAKE) {
GCAM_commodity <- GCAM_region_ID <- region <- value <- year <- GLU <- GLU_name <- GLU_code <-
AgSupplySector <- AgSupplySubsector <- AgProductionTechnology <- share.weight.year <- subs.share.weight <-
tech.share.weight <- logit.year.fillout <- logit.exponent <- calPrice <- calOutputValue <-
market <- IRR_RFD <- Irr_Rfd <- MGMT <- level <- yield <- generic.yield <- yield_irr <-
yieldmult <- Yield_GJm2 <- reg_calPrice <- NULL # silence package check notes
all_data <- list(...)[[1]]
# Load required inputs
GCAM_region_names <- get_data(all_data, "common/GCAM_region_names")
basin_to_country_mapping <- get_data(all_data, "water/basin_to_country_mapping")
A_AgSupplySector <- get_data(all_data, "aglu/A_agSupplySector")
A_AgSupplySubsector <- get_data(all_data, "aglu/A_agSupplySubsector")
L101.ag_Prod_Mt_R_C_Y_GLU <- get_data(all_data, "L101.ag_Prod_Mt_R_C_Y_GLU")
L113.ag_bioYield_GJm2_R_GLU <- get_data(all_data, "L113.ag_bioYield_GJm2_R_GLU")
L122.ag_HA_to_CropLand_R_Y_GLU <- get_data(all_data, "L122.ag_HA_to_CropLand_R_Y_GLU")
L123.ag_Prod_Mt_R_Past_Y_GLU <- get_data(all_data, "L123.ag_Prod_Mt_R_Past_Y_GLU")
L123.For_Prod_bm3_R_Y_GLU <- get_data(all_data, "L123.For_Prod_bm3_R_Y_GLU")
L132.ag_an_For_Prices <- get_data(all_data, "L132.ag_an_For_Prices")
L1321.prP_R_C_75USDkg <- get_data(all_data, "L1321.prP_R_C_75USDkg")
L161.ag_irrProd_Mt_R_C_Y_GLU <- get_data(all_data, "L161.ag_irrProd_Mt_R_C_Y_GLU")
L161.ag_rfdProd_Mt_R_C_Y_GLU <- get_data(all_data, "L161.ag_rfdProd_Mt_R_C_Y_GLU")
L163.ag_irrBioYield_GJm2_R_GLU <- get_data(all_data, "L163.ag_irrBioYield_GJm2_R_GLU")
L163.ag_rfdBioYield_GJm2_R_GLU <- get_data(all_data, "L163.ag_rfdBioYield_GJm2_R_GLU")
L181.ag_Prod_Mt_R_C_Y_GLU_irr_level <- get_data(all_data, "L181.ag_Prod_Mt_R_C_Y_GLU_irr_level")
L181.YieldMult_R_bio_GLU_irr <- get_data(all_data, "L181.YieldMult_R_bio_GLU_irr")
# L2012.AgSupplySector: Generic AgSupplySector characteristics (units, calprice, market, logit)
# Set up the regional price data to be joined in to the ag supplysector table
L2012.prP_R_C <- left_join_error_no_match(L1321.prP_R_C_75USDkg, GCAM_region_names,
by = "GCAM_region_ID") %>%
select(region, GCAM_commodity, reg_calPrice = value)
A_AgSupplySector %>%
# At the supplysector (market) level, all regions get all supplysectors
write_to_all_regions(c(LEVEL2_DATA_NAMES[["AgSupplySector"]], LOGIT_TYPE_COLNAME),
GCAM_region_names = GCAM_region_names) %>%
select(-calPrice) %>%
# Join calibration price data, there are missing value for biomass, use left_join instead
left_join(L132.ag_an_For_Prices, by = c("AgSupplySector" = "GCAM_commodity")) %>%
left_join(L2012.prP_R_C, by = c("region", AgSupplySector = "GCAM_commodity")) %>%
mutate(calPrice = replace(calPrice, AgSupplySector == "biomass", 1), # value irrelevant
calPrice = if_else(is.na(reg_calPrice), calPrice, reg_calPrice),
# For regional commodities, specify market names with region names
market = replace(market, market == "regional", region[market == "regional"])) %>%
select(LEVEL2_DATA_NAMES[["AgSupplySector"]], LOGIT_TYPE_COLNAME) %>%
# Remove any regions for which agriculture and land use are not modeled
filter(!region %in% aglu.NO_AGLU_REGIONS) ->
L2012.AgSupplySector
# L2012.AgSupplySubsector: Generic AgSupplySubsector characteristics (none specified as competition is in the land allocator)
# At the subsector (production) level, only region x GLU combinations that actually exist are created.
# So start with template production tables of available region x commodity x glu for all commodities.
# First, biograss: available anywhere that has any crop production at all
L101.ag_Prod_Mt_R_C_Y_GLU %>%
select(GCAM_region_ID, GLU) %>%
unique %>%
mutate(GCAM_commodity = "biomass_grass") ->
L201.R_C_GLU_biograss
# Second, biotree: available anywhere that has any forest production at all
L123.For_Prod_bm3_R_Y_GLU %>%
select(GCAM_region_ID, GLU) %>%
unique %>%
mutate(GCAM_commodity = "biomass_tree") ->
L201.R_C_GLU_biotree
# Third, bind Ag commodties, forest, pasture and biomass all together
L101.ag_Prod_Mt_R_C_Y_GLU %>%
bind_rows(L123.For_Prod_bm3_R_Y_GLU, L123.ag_Prod_Mt_R_Past_Y_GLU) %>%
select(GCAM_region_ID, GCAM_commodity, GLU) %>%
unique %>%
bind_rows(L201.R_C_GLU_biograss, L201.R_C_GLU_biotree) %>%
arrange(GCAM_region_ID, GLU, GCAM_commodity) %>%
left_join_error_no_match(A_AgSupplySubsector, by = c("GCAM_commodity" = "AgSupplySubsector")) %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
left_join_error_no_match(select(basin_to_country_mapping, GLU_code, GLU_name), by = c("GLU" = "GLU_code")) %>%
# Subsector isn't just the supplysector & GLU for biomass crops, as this is where the grass/tree split is done
mutate(AgSupplySubsector = paste(GCAM_commodity, GLU_name, sep = "_"),
# We do not actually care about the logit here but we need a value to avoid errors
logit.year.fillout = min(MODEL_BASE_YEARS),
logit.exponent = -3,
logit.type = NA) %>%
select(LEVEL2_DATA_NAMES[["AgSupplySubsector"]], LOGIT_TYPE_COLNAME) ->
L2012.AgSupplySubsector
# L2012.AgProduction_ag_irr_mgm: Agricultural commodities production by all technoligies
# Start with L2011.AgProduction_ag_irr to get subsector and technology shareweights
L161.ag_irrProd_Mt_R_C_Y_GLU %>%
mutate(IRR_RFD = "IRR") %>%
bind_rows(mutate(L161.ag_rfdProd_Mt_R_C_Y_GLU, IRR_RFD = "RFD")) %>%
filter(year %in% MODEL_BASE_YEARS) %>%
mutate(calOutputValue = round(value, digits = aglu.DIGITS_CALOUTPUT)) %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
left_join_error_no_match(select(basin_to_country_mapping, GLU_code, GLU_name), by = c("GLU" = "GLU_code")) %>%
# Set subsector year and technology shareweights, subsector shareweights are set at the aggregate level later
mutate(share.weight.year = year,
tech.share.weight = 0,
tech.share.weight = replace(tech.share.weight, calOutputValue > 0, 1)) ->
L2011.AgProduction_ag_irr
# Set subsector shareweights at the aggregate level across irrigated and rainfed production
L2011.AgProduction_ag_irr %>%
group_by(region, GCAM_commodity, GLU_name, year) %>%
summarise(calOutputValue = sum(calOutputValue)) %>%
ungroup %>%
mutate(subs.share.weight = 0,
subs.share.weight = replace(subs.share.weight, calOutputValue > 0, 1)) %>%
select(-calOutputValue) %>%
# Combine with subsector year and technology shareweights
right_join(L2011.AgProduction_ag_irr, by = c("region", "GCAM_commodity", "GLU_name", "year")) %>%
# Copy to high and low management levels
repeat_add_columns(tibble(MGMT = c("hi", "lo"))) %>%
# Add sector, subsector, technology names
mutate(AgSupplySector = GCAM_commodity,
AgSupplySubsector = paste(GCAM_commodity, GLU_name, sep = "_"),
AgProductionTechnology = paste(GCAM_commodity, GLU_name, IRR_RFD, MGMT, sep = "_")) %>%
select(LEVEL2_DATA_NAMES[["AgProduction"]]) ->
L2011.AgProduction_ag_irr
# For agricultural product calibrated output, use the specific management-partitioned data
L181.ag_Prod_Mt_R_C_Y_GLU_irr_level %>%
filter(year %in% MODEL_BASE_YEARS) %>%
mutate(calOutputValue = round(value, digits = aglu.DIGITS_CALOUTPUT)) %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
left_join_error_no_match(select(basin_to_country_mapping, GLU_code, GLU_name), by = c("GLU" = "GLU_code")) %>%
# Add sector, subsector, technology names
mutate(AgProductionTechnology = paste(GCAM_commodity, GLU_name, toupper(Irr_Rfd), level, sep = "_")) %>%
# Combine with subsector and technology shareweights
right_join(select(L2011.AgProduction_ag_irr, -calOutputValue),
by = c("region", "AgProductionTechnology", "year")) %>%
# recheck technology share weight after splitting technology levels and rounding
mutate(tech.share.weight = 0,
tech.share.weight = replace(tech.share.weight, calOutputValue > 0, 1)) %>%
select(LEVEL2_DATA_NAMES[["AgProduction"]]) ->
L2012.AgProduction_ag_irr_mgmt
# L2012.AgProduction_For and L2012.AgProduction_Past: Forest and pasture product calibration (output)
L123.For_Prod_bm3_R_Y_GLU %>%
# Combine forest and pasture production by region x GLU
bind_rows(L123.ag_Prod_Mt_R_Past_Y_GLU) %>%
filter(year %in% MODEL_BASE_YEARS) %>%
mutate(calOutputValue = round(value, digits = aglu.DIGITS_CALOUTPUT)) %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
left_join_error_no_match(select(basin_to_country_mapping, GLU_code, GLU_name), by = c("GLU" = "GLU_code")) %>%
mutate(AgProductionTechnology = paste(GCAM_commodity, GLU_name, sep = "_")) ->
L201.For_Past_Prod_R_Y_GLU
# Subset only forest and pasture products from the main subsector table and paste in calibrated production, rounded
L2012.AgSupplySubsector %>%
# Use semi_join to filter region x GLU that have forest and pasture production
semi_join(L201.For_Past_Prod_R_Y_GLU, by = c("AgSupplySector" = "GCAM_commodity")) %>%
# No disaggregation of technologies
mutate(AgProductionTechnology = AgSupplySubsector) %>%
# Copy to all base years
repeat_add_columns(tibble(year = MODEL_BASE_YEARS)) %>%
left_join_error_no_match(L201.For_Past_Prod_R_Y_GLU, by = c("region", "AgProductionTechnology", "year")) %>%
# Subsector and technology shareweights (subsector requires the year as well)
mutate(share.weight.year = year,
subs.share.weight = 0,
subs.share.weight = replace(subs.share.weight, calOutputValue > 0, 1),
tech.share.weight = 0,
tech.share.weight = replace(tech.share.weight, calOutputValue > 0, 1)) %>%
select(LEVEL2_DATA_NAMES[["AgProduction"]]) ->
L2012.AgProduction_For_Past
# L2012.AgHAtoCL_irr_mgmt: Harvests area to cropland ratio per year, by technologies
# Start with the harvested-area-to-cropland table, extrapolate base year values to all model years
L122.ag_HA_to_CropLand_R_Y_GLU %>%
# Build a template table with all existing region x GLU combinations and by all model years
select(GCAM_region_ID, GLU) %>%
unique %>%
# Add all model years
repeat_add_columns(tibble(year = MODEL_YEARS)) %>%
# Full_join the original data to keep all model years for extrapolation
full_join(L122.ag_HA_to_CropLand_R_Y_GLU, by = c("GCAM_region_ID", "GLU", "year")) %>%
mutate(year = as.numeric(year), value = as.numeric(value)) %>%
group_by(GCAM_region_ID, GLU) %>%
# Copy the last base year value to all model years
mutate(value = approx_fun(year, value, rule = 2)) %>%
ungroup %>%
# Drop other historical years that are not in model base years
filter(year %in% MODEL_YEARS) %>%
mutate(harvests.per.year = round(value, digits = aglu.DIGITS_CALOUTPUT)) %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
left_join_error_no_match(select(basin_to_country_mapping, GLU_code, GLU_name), by = c("GLU" = "GLU_code")) ->
L201.ag_HA_to_CropLand_R_Y_GLU
# Paste in harvested-area-to-cropland only for agriculture commodities, repeat by irr/rfd and mgmt techs
L2012.AgSupplySubsector %>%
semi_join(L101.ag_Prod_Mt_R_C_Y_GLU, by = c("AgSupplySector" = "GCAM_commodity")) %>%
# Copy to all model years
repeat_add_columns(tibble(year = MODEL_YEARS)) %>%
# Separate the AgSupplySubsector variable to get GLU names for matching in the harvest data
mutate(AgSupplySubsector = sub("Root_Tuber", "RootTuber", AgSupplySubsector)) %>%
separate(AgSupplySubsector, c("GCAM_commodity", "GLU_name"), sep = "_") %>%
left_join(L201.ag_HA_to_CropLand_R_Y_GLU, by = c("region", "GLU_name", "year")) %>%
# Copy to both irrigated and rainfed technologies
repeat_add_columns(tibble(IRR_RFD = c("IRR", "RFD"))) %>%
# Copy to high and low management levels
repeat_add_columns(tibble(MGMT = c("hi", "lo"))) %>%
# Add subsector and technology names
mutate(GCAM_commodity = sub("RootTuber", "Root_Tuber", GCAM_commodity),
AgSupplySubsector = paste(GCAM_commodity, GLU_name, sep = "_"),
AgProductionTechnology = paste(GCAM_commodity, GLU_name, IRR_RFD, MGMT, sep = "_")) %>%
select(LEVEL2_DATA_NAMES[["AgHAtoCL"]]) ->
L2012.AgHAtoCL_irr_mgmt
# L2012.AgYield_bio_ref: bioenergy yields.
# For bio grass crops, start with data of irrigated and rainfed split;
# For bio tree crops, use the no-tech-split data of bio grass crops,
# whenever it is not available, use a minimum default yield;
# then split to irrigated and rainfed using irr:rfd yield ratios from grass crops;
# And last for both crops, apply different yield multipliers for high and low managements
# L2011.AgYield_bio_grass_irr: yields of bioenergy grass crops, with irrigated and rainfed split
L163.ag_irrBioYield_GJm2_R_GLU %>%
mutate(IRR_RFD = "IRR") %>%
# Combine irrigated and rainfet yield data
bind_rows(mutate(L163.ag_rfdBioYield_GJm2_R_GLU, IRR_RFD = "RFD")) %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
left_join_error_no_match(select(basin_to_country_mapping, GLU_code, GLU_name), by = c("GLU" = "GLU_code")) %>%
mutate(AgSupplySubsector = paste("biomass_grass", GLU_name, sep = "_")) ->
L2011.AgYield_bio_grass_irr
# From the subsector generic table, filter bio grass crops
L2012.AgSupplySubsector %>%
filter(grepl("biomass_grass", AgSupplySubsector)) %>%
# Copy to all base years
repeat_add_columns(tibble(year = MODEL_BASE_YEARS)) %>%
# Copy to both irrigated and rainfed technologies
repeat_add_columns(tibble(IRR_RFD = c("IRR", "RFD"))) %>%
# Match in yield data, use left_join instead because of NAs
left_join(L2011.AgYield_bio_grass_irr, by = c("region", "AgSupplySubsector", "IRR_RFD")) %>%
# Round yield data
mutate(yield = round(Yield_GJm2, digits = aglu.DIGITS_CALOUTPUT)) ->
L2011.AgYield_bio_grass_irr
L2011.AgYield_bio_grass_irr %>%
# Places with no irrigated crop production will return missing values here.
# May as well set these yields to 0 to ensure that they get no irrigated bioenergy production
# in the future periods (most are tropical areas with no need for irrigation).
replace_na(list(yield = 0)) %>%
mutate(AgProductionTechnology = paste(AgSupplySubsector, IRR_RFD, sep = "_")) %>%
select(LEVEL2_DATA_NAMES[["AgYield"]]) ->
L2011.AgYield_bio_grass_irr
# L201.AgYield_bio_tree: base year biomass yields, tree bioenergy crops
# Start with the grass crop yields with no tech split where available
L113.ag_bioYield_GJm2_R_GLU %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
left_join_error_no_match(select(basin_to_country_mapping, GLU_code, GLU_name), by = c("GLU" = "GLU_code")) %>%
mutate(yield = round(Yield_GJm2, digits = aglu.DIGITS_CALOUTPUT)) %>%
select(-GCAM_region_ID, -GLU, -Yield_GJm2) ->
L201.AgYield_bio_grass
# From the subsector table, filter bio tree crops
L2012.AgSupplySubsector %>%
filter(grepl("biomass_tree", AgSupplySubsector)) %>%
# No tech split yet
mutate(AgProductionTechnology = AgSupplySubsector) %>%
# Copy to all base years
repeat_add_columns(tibble(year = MODEL_BASE_YEARS)) %>%
select(LEVEL2_DATA_NAMES[["AgTechYr"]]) %>%
mutate(GLU_name = AgSupplySubsector,
GLU_name = sub("biomass_tree_", "", GLU_name)) %>%
# Match in grass crop yields where available, use left_join instead because of NAs
left_join(L201.AgYield_bio_grass, by = c("region", "GLU_name")) %>%
# Where not available (i.e., where there is forest but not cropped land),
# use a default minimum as these are likely remote / unpopulated lands
replace_na(list(yield = min(L201.AgYield_bio_grass$yield))) %>%
select(LEVEL2_DATA_NAMES[["AgYield"]]) ->
L201.AgYield_bio_tree
# L2011.AgYield_bio_tree_irr: yield of bioenergy tree crops (use the irr:rfd yield ratios from grass crops)
# This method essentially copies prior versions of GCAM with irrigation, albeit in a different sequence.
# Before, we used the generic bioenergy crop yields by irr/rfd, multiplied by an assumed tree:grass yield conversion factor.
# Here, we start with the generic tree bioenergy crop yields in each land use region,
# and multiply by generic:irr and generic:rfd conversion factors.
# Compile the generic:irr and generic:rfd conversion factors
L201.AgYield_bio_grass %>%
mutate(AgSupplySubsector = paste("biomass_grass", GLU_name, sep = "_")) %>%
# Generic bio grass crop yield w/o tech split
rename(generic.yield = yield) %>%
# Match in irrigated and rainfed bio grass yields
right_join(L2011.AgYield_bio_grass_irr, by = c("region", "AgSupplySubsector")) %>%
# Compute conversion factor as irr/generic, and rfd/generic
mutate(factor = yield / generic.yield,
# Prepare for bio tree crops
AgSupplySubsector = sub("biomass_grass", "biomass_tree", AgSupplySubsector),
AgProductionTechnology = sub("biomass_grass", "biomass_tree", AgProductionTechnology)) %>%
select(-GLU_name, -yield, -generic.yield) ->
L2011.irr_rfd_factors
# Multiply generic tree crop yields by the conversion factors
# For regions that do not have grass crops but only tree crops (e.g., regions w forest but no ag production),
# simply use the generic defaults, which are likely minor ag regions anyway
L201.AgYield_bio_tree %>%
# Copy to both irrigated and rainfed technologies
repeat_add_columns(tibble(IRR_RFD = c("IRR", "RFD"))) %>%
mutate(AgProductionTechnology = paste(AgProductionTechnology, IRR_RFD, sep = "_")) %>%
# Match in conversion factors
left_join(L2011.irr_rfd_factors,
by = c("region", "AgSupplySector", "AgSupplySubsector", "AgProductionTechnology", "year")) %>%
# Calculate the irrigated and rainfed yields using the conversion factors from bio grass crops
mutate(yield_irr = yield * factor,
# When grass crops are not available, use the generic yields
yield = replace(yield, !is.na(yield_irr), yield_irr[!is.na(yield_irr)]),
yield = round(yield, digits = aglu.DIGITS_CALOUTPUT)) %>%
select(LEVEL2_DATA_NAMES[["AgYield"]]) ->
L2011.AgYield_bio_tree_irr
# Last step, apply different yield multipliers for high and low mgmt techs
# Prepare a yield multiplier table, with the mgmt level as a column ID
L181.YieldMult_R_bio_GLU_irr %>%
gather(level, yieldmult, -GCAM_region_ID, -GLU, -Irr_Rfd) %>%
mutate(level = sub("yieldmult_", "", level),
Irr_Rfd = toupper(Irr_Rfd)) %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
left_join_error_no_match(select(basin_to_country_mapping, GLU_code, GLU_name), by = c("GLU" = "GLU_code")) ->
L2012.YieldMult_R_bio_GLU_irr
# Bind the tree and grass tables, repeat by mgmt level, and match in the yield multipliers
L2011.AgYield_bio_grass_irr %>%
bind_rows(L2011.AgYield_bio_tree_irr) %>%
# Copy to high and low management levels
repeat_add_columns(tibble(MGMT = c("hi", "lo"))) %>%
# Separate technology to match in the multipliers
separate(AgProductionTechnology, c("biomass", "type", "GLU_name", "IRR_RFD")) %>%
# Match in multipliers, use left_join instead because of NAs
left_join(L2012.YieldMult_R_bio_GLU_irr, by = c("region", "GLU_name", "IRR_RFD" = "Irr_Rfd", "MGMT" = "level")) %>%
# For minor region/GLUs that are missing from the ag data, set the multipliers to 1 (effectively fixing yields in all periods)
replace_na(list(yieldmult = 1)) %>%
mutate(yield = yield * yieldmult,
# Add technology back
AgProductionTechnology = paste(AgSupplySubsector, IRR_RFD, MGMT, sep = "_")) %>%
select(LEVEL2_DATA_NAMES[["AgYield"]]) ->
L2012.AgYield_bio_ref
# Add sector, subsector, and technology information to L201.AgYield_bio_grass
L201.AgYield_bio_grass %>%
mutate(AgSupplySector = "biomass",
AgSupplySubsector = paste0("biomass_grass_", GLU_name),
AgProductionTechnology = AgSupplySubsector) %>%
repeat_add_columns((tibble(year = MODEL_BASE_YEARS))) %>%
select(-GLU_name) ->
L201.AgYield_bio_grass
# Produce outputs
L2012.AgSupplySector %>%
add_title("Generic information for agriculture supply sectors") %>%
add_units("Unitless") %>%
add_comments("Specify generic supply sector characteristics (units, calprice, market, logit)") %>%
add_comments("At the supplysector (market) level, all regions get all supplysectors") %>%
add_comments("Remove any regions for which agriculture and land use are not modeled") %>%
add_legacy_name("L2012.AgSupplySector") %>%
add_precursors("common/GCAM_region_names",
"water/basin_to_country_mapping",
"aglu/A_agSupplySector",
"L132.ag_an_For_Prices",
"L1321.prP_R_C_75USDkg") ->
L2012.AgSupplySector
L2012.AgSupplySubsector %>%
add_title("Generic information for agriculture supply subsectors") %>%
add_units("Unitless") %>%
add_comments("Specify generic supply subsector characteristics") %>%
add_comments("At the subsector (production) level, only region x GLU combinations that actually exist are created") %>%
add_legacy_name("L2012.AgSupplySubsector") %>%
add_precursors("common/GCAM_region_names",
"water/basin_to_country_mapping",
"aglu/A_agSupplySubsector",
"L101.ag_Prod_Mt_R_C_Y_GLU",
"L123.ag_Prod_Mt_R_Past_Y_GLU",
"L123.For_Prod_bm3_R_Y_GLU") ->
L2012.AgSupplySubsector
L2012.AgProduction_ag_irr_mgmt %>%
add_title("Input table for agriculture commodity production by all technologies") %>%
add_units("Mt") %>%
add_comments("Calibrated outputs are specified by each technology") %>%
add_comments("Subsector shareweights are set at the aggregated region x GLU level, same for all tech") %>%
add_comments("Technology shareweights are set by irrigated vs. rainfed, same for high and low mgmt") %>%
add_legacy_name("L2012.AgProduction_ag_irr_mgmt") %>%
add_precursors("common/GCAM_region_names",
"water/basin_to_country_mapping",
"L161.ag_irrProd_Mt_R_C_Y_GLU",
"L161.ag_rfdProd_Mt_R_C_Y_GLU",
"L181.ag_Prod_Mt_R_C_Y_GLU_irr_level") ->
L2012.AgProduction_ag_irr_mgmt
L2012.AgProduction_For_Past %>%
filter(AgSupplySector == "Forest") %>%
add_title("Input table for forest production") %>%
add_units("bm3") %>%
add_comments("Calibrated ouputs or shareweights are not specify by technology") %>%
add_legacy_name("L2012.AgProduction_For") %>%
same_precursors_as("L2012.AgSupplySubsector") ->
L2012.AgProduction_For
L2012.AgProduction_For_Past %>%
filter(AgSupplySector == "Pasture") %>%
add_title("Input table for pasture production") %>%
add_units("Mt") %>%
add_comments("Calibrated ouputs or shareweights are not specify by technology") %>%
add_legacy_name("L2012.AgProduction_Past") %>%
same_precursors_as("L2012.AgSupplySubsector") ->
L2012.AgProduction_Past
L2012.AgHAtoCL_irr_mgmt %>%
add_title("Harvest area to cropland value of agricultural commodities by year and technology") %>%
add_units("Uniteless") %>%
add_comments("Copy the same value to all technologies") %>%
add_comments("Exclude forest and pasture") %>%
add_legacy_name("L2012.AgHAtoCL_irr_mgmt") %>%
same_precursors_as("L2012.AgProduction_ag_irr_mgmt") %>%
add_precursors("L122.ag_HA_to_CropLand_R_Y_GLU") ->
L2012.AgHAtoCL_irr_mgmt
L2012.AgYield_bio_ref %>%
add_title("Bioenergy grass and tree crops yields by all technologies") %>%
add_units("GJ/m2") %>%
add_comments("Grass crops yields are used for tree crops where available") %>%
add_comments("Apply the irr:generic and rfd:generic conversion factors from grass crops") %>%
add_comments("Use default minimum value where grass crops are not available") %>%
add_comments("Apply different multipliers to high and low management for both grass and tree crops") %>%
add_legacy_name("L2012.AgYield_bio_ref") %>%
add_precursors("common/GCAM_region_names",
"water/basin_to_country_mapping",
"aglu/A_agSupplySector",
"aglu/A_agSupplySubsector",
"L113.ag_bioYield_GJm2_R_GLU",
"L163.ag_irrBioYield_GJm2_R_GLU",
"L163.ag_rfdBioYield_GJm2_R_GLU",
"L181.YieldMult_R_bio_GLU_irr") ->
L2012.AgYield_bio_ref
L201.AgYield_bio_grass %>%
add_title("Bioenergy grass crops yields for aggregate tech") %>%
add_units("GJ/m2") %>%
add_comments("Append region and basin names to L113.ag_bioYield_GJm2_R_GLU") %>%
add_legacy_name("L201.AgYield_bio_grass") %>%
add_precursors("common/GCAM_region_names",
"water/basin_to_country_mapping",
"L113.ag_bioYield_GJm2_R_GLU") ->
L201.AgYield_bio_grass
L201.AgYield_bio_tree %>%
add_title("Bioenergy tree crops yields for aggregate tech") %>%
add_units("GJ/m2") %>%
add_comments("Set bioenergy tree crop yield to the same as the bioenergy grass yield") %>%
add_comments("If data is missing for a region/basin, use the minimum yield") %>%
add_legacy_name("L201.AgYield_bio_tree") %>%
same_precursors_as("L201.AgYield_bio_grass") %>%
same_precursors_as("L2012.AgSupplySubsector") ->
L201.AgYield_bio_tree
return_data(L2012.AgSupplySector, L2012.AgSupplySubsector, L2012.AgProduction_ag_irr_mgmt, L2012.AgProduction_For, L2012.AgProduction_Past, L2012.AgHAtoCL_irr_mgmt, L2012.AgYield_bio_ref, L201.AgYield_bio_grass, L201.AgYield_bio_tree)
} else {
stop("Unknown command")
}
}
|
/input/gcamdata/R/zchunk_L2012.ag_For_Past_bio_input_irr_mgmt.R
|
permissive
|
ashiklom/gcam-core
|
R
| false
| false
| 29,724
|
r
|
#' module_aglu_L2012.ag_For_Past_bio_input_irr_mgmt
#'
#' Build agriculture, forest, pasture and biomass production inputs for all technologies.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{L2012.AgSupplySector}, \code{L2012.AgSupplySubsector}, \code{L2012.AgProduction_ag_irr_mgmt}, \code{L2012.AgProduction_For}, \code{L2012.AgProduction_Past}, \code{L2012.AgHAtoCL_irr_mgmt}, \code{L2012.AgYield_bio_ref}. The corresponding file in the
#' original data system was \code{L2012.ag_For_Past_bio_input_irr_mgmt.R} (aglu level2).
#' @details This chunk specifies the input tables for agriculture, forest, pasture and biomass supply sectors and subsectors,
#' agricultural commodity production and harvest area to cropland by technologies, forest and pasture production,
#' and biomass grass and tree crops yield by technologies.
#' @importFrom assertthat assert_that
#' @importFrom dplyr filter mutate select
#' @importFrom tidyr gather spread
#' @author RC August 2017
module_aglu_L2012.ag_For_Past_bio_input_irr_mgmt <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c(FILE = "common/GCAM_region_names",
FILE = "water/basin_to_country_mapping",
FILE = "aglu/A_agSupplySector",
FILE = "aglu/A_agSupplySubsector",
"L101.ag_Prod_Mt_R_C_Y_GLU",
"L113.ag_bioYield_GJm2_R_GLU",
"L122.ag_HA_to_CropLand_R_Y_GLU",
"L123.ag_Prod_Mt_R_Past_Y_GLU",
"L123.For_Prod_bm3_R_Y_GLU",
"L132.ag_an_For_Prices",
"L1321.prP_R_C_75USDkg",
"L161.ag_irrProd_Mt_R_C_Y_GLU",
"L161.ag_rfdProd_Mt_R_C_Y_GLU",
"L163.ag_irrBioYield_GJm2_R_GLU",
"L163.ag_rfdBioYield_GJm2_R_GLU",
"L181.ag_Prod_Mt_R_C_Y_GLU_irr_level",
"L181.YieldMult_R_bio_GLU_irr"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c("L2012.AgSupplySector",
"L2012.AgSupplySubsector",
"L2012.AgProduction_ag_irr_mgmt",
"L2012.AgProduction_For",
"L2012.AgProduction_Past",
"L2012.AgHAtoCL_irr_mgmt",
"L2012.AgYield_bio_ref",
"L201.AgYield_bio_grass",
"L201.AgYield_bio_tree"))
} else if(command == driver.MAKE) {
GCAM_commodity <- GCAM_region_ID <- region <- value <- year <- GLU <- GLU_name <- GLU_code <-
AgSupplySector <- AgSupplySubsector <- AgProductionTechnology <- share.weight.year <- subs.share.weight <-
tech.share.weight <- logit.year.fillout <- logit.exponent <- calPrice <- calOutputValue <-
market <- IRR_RFD <- Irr_Rfd <- MGMT <- level <- yield <- generic.yield <- yield_irr <-
yieldmult <- Yield_GJm2 <- reg_calPrice <- NULL # silence package check notes
all_data <- list(...)[[1]]
# Load required inputs
GCAM_region_names <- get_data(all_data, "common/GCAM_region_names")
basin_to_country_mapping <- get_data(all_data, "water/basin_to_country_mapping")
A_AgSupplySector <- get_data(all_data, "aglu/A_agSupplySector")
A_AgSupplySubsector <- get_data(all_data, "aglu/A_agSupplySubsector")
L101.ag_Prod_Mt_R_C_Y_GLU <- get_data(all_data, "L101.ag_Prod_Mt_R_C_Y_GLU")
L113.ag_bioYield_GJm2_R_GLU <- get_data(all_data, "L113.ag_bioYield_GJm2_R_GLU")
L122.ag_HA_to_CropLand_R_Y_GLU <- get_data(all_data, "L122.ag_HA_to_CropLand_R_Y_GLU")
L123.ag_Prod_Mt_R_Past_Y_GLU <- get_data(all_data, "L123.ag_Prod_Mt_R_Past_Y_GLU")
L123.For_Prod_bm3_R_Y_GLU <- get_data(all_data, "L123.For_Prod_bm3_R_Y_GLU")
L132.ag_an_For_Prices <- get_data(all_data, "L132.ag_an_For_Prices")
L1321.prP_R_C_75USDkg <- get_data(all_data, "L1321.prP_R_C_75USDkg")
L161.ag_irrProd_Mt_R_C_Y_GLU <- get_data(all_data, "L161.ag_irrProd_Mt_R_C_Y_GLU")
L161.ag_rfdProd_Mt_R_C_Y_GLU <- get_data(all_data, "L161.ag_rfdProd_Mt_R_C_Y_GLU")
L163.ag_irrBioYield_GJm2_R_GLU <- get_data(all_data, "L163.ag_irrBioYield_GJm2_R_GLU")
L163.ag_rfdBioYield_GJm2_R_GLU <- get_data(all_data, "L163.ag_rfdBioYield_GJm2_R_GLU")
L181.ag_Prod_Mt_R_C_Y_GLU_irr_level <- get_data(all_data, "L181.ag_Prod_Mt_R_C_Y_GLU_irr_level")
L181.YieldMult_R_bio_GLU_irr <- get_data(all_data, "L181.YieldMult_R_bio_GLU_irr")
# L2012.AgSupplySector: Generic AgSupplySector characteristics (units, calprice, market, logit)
# Set up the regional price data to be joined in to the ag supplysector table
L2012.prP_R_C <- left_join_error_no_match(L1321.prP_R_C_75USDkg, GCAM_region_names,
by = "GCAM_region_ID") %>%
select(region, GCAM_commodity, reg_calPrice = value)
A_AgSupplySector %>%
# At the supplysector (market) level, all regions get all supplysectors
write_to_all_regions(c(LEVEL2_DATA_NAMES[["AgSupplySector"]], LOGIT_TYPE_COLNAME),
GCAM_region_names = GCAM_region_names) %>%
select(-calPrice) %>%
# Join calibration price data, there are missing value for biomass, use left_join instead
left_join(L132.ag_an_For_Prices, by = c("AgSupplySector" = "GCAM_commodity")) %>%
left_join(L2012.prP_R_C, by = c("region", AgSupplySector = "GCAM_commodity")) %>%
mutate(calPrice = replace(calPrice, AgSupplySector == "biomass", 1), # value irrelevant
calPrice = if_else(is.na(reg_calPrice), calPrice, reg_calPrice),
# For regional commodities, specify market names with region names
market = replace(market, market == "regional", region[market == "regional"])) %>%
select(LEVEL2_DATA_NAMES[["AgSupplySector"]], LOGIT_TYPE_COLNAME) %>%
# Remove any regions for which agriculture and land use are not modeled
filter(!region %in% aglu.NO_AGLU_REGIONS) ->
L2012.AgSupplySector
# L2012.AgSupplySubsector: Generic AgSupplySubsector characteristics (none specified as competition is in the land allocator)
# At the subsector (production) level, only region x GLU combinations that actually exist are created.
# So start with template production tables of available region x commodity x glu for all commodities.
# First, biograss: available anywhere that has any crop production at all
L101.ag_Prod_Mt_R_C_Y_GLU %>%
select(GCAM_region_ID, GLU) %>%
unique %>%
mutate(GCAM_commodity = "biomass_grass") ->
L201.R_C_GLU_biograss
# Second, biotree: available anywhere that has any forest production at all
L123.For_Prod_bm3_R_Y_GLU %>%
select(GCAM_region_ID, GLU) %>%
unique %>%
mutate(GCAM_commodity = "biomass_tree") ->
L201.R_C_GLU_biotree
# Third, bind Ag commodties, forest, pasture and biomass all together
L101.ag_Prod_Mt_R_C_Y_GLU %>%
bind_rows(L123.For_Prod_bm3_R_Y_GLU, L123.ag_Prod_Mt_R_Past_Y_GLU) %>%
select(GCAM_region_ID, GCAM_commodity, GLU) %>%
unique %>%
bind_rows(L201.R_C_GLU_biograss, L201.R_C_GLU_biotree) %>%
arrange(GCAM_region_ID, GLU, GCAM_commodity) %>%
left_join_error_no_match(A_AgSupplySubsector, by = c("GCAM_commodity" = "AgSupplySubsector")) %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
left_join_error_no_match(select(basin_to_country_mapping, GLU_code, GLU_name), by = c("GLU" = "GLU_code")) %>%
# Subsector isn't just the supplysector & GLU for biomass crops, as this is where the grass/tree split is done
mutate(AgSupplySubsector = paste(GCAM_commodity, GLU_name, sep = "_"),
# We do not actually care about the logit here but we need a value to avoid errors
logit.year.fillout = min(MODEL_BASE_YEARS),
logit.exponent = -3,
logit.type = NA) %>%
select(LEVEL2_DATA_NAMES[["AgSupplySubsector"]], LOGIT_TYPE_COLNAME) ->
L2012.AgSupplySubsector
# L2012.AgProduction_ag_irr_mgm: Agricultural commodities production by all technoligies
# Start with L2011.AgProduction_ag_irr to get subsector and technology shareweights
L161.ag_irrProd_Mt_R_C_Y_GLU %>%
mutate(IRR_RFD = "IRR") %>%
bind_rows(mutate(L161.ag_rfdProd_Mt_R_C_Y_GLU, IRR_RFD = "RFD")) %>%
filter(year %in% MODEL_BASE_YEARS) %>%
mutate(calOutputValue = round(value, digits = aglu.DIGITS_CALOUTPUT)) %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
left_join_error_no_match(select(basin_to_country_mapping, GLU_code, GLU_name), by = c("GLU" = "GLU_code")) %>%
# Set subsector year and technology shareweights, subsector shareweights are set at the aggregate level later
mutate(share.weight.year = year,
tech.share.weight = 0,
tech.share.weight = replace(tech.share.weight, calOutputValue > 0, 1)) ->
L2011.AgProduction_ag_irr
# Set subsector shareweights at the aggregate level across irrigated and rainfed production
L2011.AgProduction_ag_irr %>%
group_by(region, GCAM_commodity, GLU_name, year) %>%
summarise(calOutputValue = sum(calOutputValue)) %>%
ungroup %>%
mutate(subs.share.weight = 0,
subs.share.weight = replace(subs.share.weight, calOutputValue > 0, 1)) %>%
select(-calOutputValue) %>%
# Combine with subsector year and technology shareweights
right_join(L2011.AgProduction_ag_irr, by = c("region", "GCAM_commodity", "GLU_name", "year")) %>%
# Copy to high and low management levels
repeat_add_columns(tibble(MGMT = c("hi", "lo"))) %>%
# Add sector, subsector, technology names
mutate(AgSupplySector = GCAM_commodity,
AgSupplySubsector = paste(GCAM_commodity, GLU_name, sep = "_"),
AgProductionTechnology = paste(GCAM_commodity, GLU_name, IRR_RFD, MGMT, sep = "_")) %>%
select(LEVEL2_DATA_NAMES[["AgProduction"]]) ->
L2011.AgProduction_ag_irr
# For agricultural product calibrated output, use the specific management-partitioned data
L181.ag_Prod_Mt_R_C_Y_GLU_irr_level %>%
filter(year %in% MODEL_BASE_YEARS) %>%
mutate(calOutputValue = round(value, digits = aglu.DIGITS_CALOUTPUT)) %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
left_join_error_no_match(select(basin_to_country_mapping, GLU_code, GLU_name), by = c("GLU" = "GLU_code")) %>%
# Add sector, subsector, technology names
mutate(AgProductionTechnology = paste(GCAM_commodity, GLU_name, toupper(Irr_Rfd), level, sep = "_")) %>%
# Combine with subsector and technology shareweights
right_join(select(L2011.AgProduction_ag_irr, -calOutputValue),
by = c("region", "AgProductionTechnology", "year")) %>%
# recheck technology share weight after splitting technology levels and rounding
mutate(tech.share.weight = 0,
tech.share.weight = replace(tech.share.weight, calOutputValue > 0, 1)) %>%
select(LEVEL2_DATA_NAMES[["AgProduction"]]) ->
L2012.AgProduction_ag_irr_mgmt
# L2012.AgProduction_For and L2012.AgProduction_Past: Forest and pasture product calibration (output)
L123.For_Prod_bm3_R_Y_GLU %>%
# Combine forest and pasture production by region x GLU
bind_rows(L123.ag_Prod_Mt_R_Past_Y_GLU) %>%
filter(year %in% MODEL_BASE_YEARS) %>%
mutate(calOutputValue = round(value, digits = aglu.DIGITS_CALOUTPUT)) %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
left_join_error_no_match(select(basin_to_country_mapping, GLU_code, GLU_name), by = c("GLU" = "GLU_code")) %>%
mutate(AgProductionTechnology = paste(GCAM_commodity, GLU_name, sep = "_")) ->
L201.For_Past_Prod_R_Y_GLU
# Subset only forest and pasture products from the main subsector table and paste in calibrated production, rounded
L2012.AgSupplySubsector %>%
# Use semi_join to filter region x GLU that have forest and pasture production
semi_join(L201.For_Past_Prod_R_Y_GLU, by = c("AgSupplySector" = "GCAM_commodity")) %>%
# No disaggregation of technologies
mutate(AgProductionTechnology = AgSupplySubsector) %>%
# Copy to all base years
repeat_add_columns(tibble(year = MODEL_BASE_YEARS)) %>%
left_join_error_no_match(L201.For_Past_Prod_R_Y_GLU, by = c("region", "AgProductionTechnology", "year")) %>%
# Subsector and technology shareweights (subsector requires the year as well)
mutate(share.weight.year = year,
subs.share.weight = 0,
subs.share.weight = replace(subs.share.weight, calOutputValue > 0, 1),
tech.share.weight = 0,
tech.share.weight = replace(tech.share.weight, calOutputValue > 0, 1)) %>%
select(LEVEL2_DATA_NAMES[["AgProduction"]]) ->
L2012.AgProduction_For_Past
# L2012.AgHAtoCL_irr_mgmt: Harvests area to cropland ratio per year, by technologies
# Start with the harvested-area-to-cropland table, extrapolate base year values to all model years
L122.ag_HA_to_CropLand_R_Y_GLU %>%
# Build a template table with all existing region x GLU combinations and by all model years
select(GCAM_region_ID, GLU) %>%
unique %>%
# Add all model years
repeat_add_columns(tibble(year = MODEL_YEARS)) %>%
# Full_join the original data to keep all model years for extrapolation
full_join(L122.ag_HA_to_CropLand_R_Y_GLU, by = c("GCAM_region_ID", "GLU", "year")) %>%
mutate(year = as.numeric(year), value = as.numeric(value)) %>%
group_by(GCAM_region_ID, GLU) %>%
# Copy the last base year value to all model years
mutate(value = approx_fun(year, value, rule = 2)) %>%
ungroup %>%
# Drop other historical years that are not in model base years
filter(year %in% MODEL_YEARS) %>%
mutate(harvests.per.year = round(value, digits = aglu.DIGITS_CALOUTPUT)) %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
left_join_error_no_match(select(basin_to_country_mapping, GLU_code, GLU_name), by = c("GLU" = "GLU_code")) ->
L201.ag_HA_to_CropLand_R_Y_GLU
# Paste in harvested-area-to-cropland only for agriculture commodities, repeat by irr/rfd and mgmt techs
L2012.AgSupplySubsector %>%
semi_join(L101.ag_Prod_Mt_R_C_Y_GLU, by = c("AgSupplySector" = "GCAM_commodity")) %>%
# Copy to all model years
repeat_add_columns(tibble(year = MODEL_YEARS)) %>%
# Separate the AgSupplySubsector variable to get GLU names for matching in the harvest data
mutate(AgSupplySubsector = sub("Root_Tuber", "RootTuber", AgSupplySubsector)) %>%
separate(AgSupplySubsector, c("GCAM_commodity", "GLU_name"), sep = "_") %>%
left_join(L201.ag_HA_to_CropLand_R_Y_GLU, by = c("region", "GLU_name", "year")) %>%
# Copy to both irrigated and rainfed technologies
repeat_add_columns(tibble(IRR_RFD = c("IRR", "RFD"))) %>%
# Copy to high and low management levels
repeat_add_columns(tibble(MGMT = c("hi", "lo"))) %>%
# Add subsector and technology names
mutate(GCAM_commodity = sub("RootTuber", "Root_Tuber", GCAM_commodity),
AgSupplySubsector = paste(GCAM_commodity, GLU_name, sep = "_"),
AgProductionTechnology = paste(GCAM_commodity, GLU_name, IRR_RFD, MGMT, sep = "_")) %>%
select(LEVEL2_DATA_NAMES[["AgHAtoCL"]]) ->
L2012.AgHAtoCL_irr_mgmt
# L2012.AgYield_bio_ref: bioenergy yields.
# For bio grass crops, start with data of irrigated and rainfed split;
# For bio tree crops, use the no-tech-split data of bio grass crops,
# whenever it is not available, use a minimum default yield;
# then split to irrigated and rainfed using irr:rfd yield ratios from grass crops;
# And last for both crops, apply different yield multipliers for high and low managements
# L2011.AgYield_bio_grass_irr: yields of bioenergy grass crops, with irrigated and rainfed split
L163.ag_irrBioYield_GJm2_R_GLU %>%
mutate(IRR_RFD = "IRR") %>%
# Combine irrigated and rainfet yield data
bind_rows(mutate(L163.ag_rfdBioYield_GJm2_R_GLU, IRR_RFD = "RFD")) %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
left_join_error_no_match(select(basin_to_country_mapping, GLU_code, GLU_name), by = c("GLU" = "GLU_code")) %>%
mutate(AgSupplySubsector = paste("biomass_grass", GLU_name, sep = "_")) ->
L2011.AgYield_bio_grass_irr
# From the subsector generic table, filter bio grass crops
L2012.AgSupplySubsector %>%
filter(grepl("biomass_grass", AgSupplySubsector)) %>%
# Copy to all base years
repeat_add_columns(tibble(year = MODEL_BASE_YEARS)) %>%
# Copy to both irrigated and rainfed technologies
repeat_add_columns(tibble(IRR_RFD = c("IRR", "RFD"))) %>%
# Match in yield data, use left_join instead because of NAs
left_join(L2011.AgYield_bio_grass_irr, by = c("region", "AgSupplySubsector", "IRR_RFD")) %>%
# Round yield data
mutate(yield = round(Yield_GJm2, digits = aglu.DIGITS_CALOUTPUT)) ->
L2011.AgYield_bio_grass_irr
L2011.AgYield_bio_grass_irr %>%
# Places with no irrigated crop production will return missing values here.
# May as well set these yields to 0 to ensure that they get no irrigated bioenergy production
# in the future periods (most are tropical areas with no need for irrigation).
replace_na(list(yield = 0)) %>%
mutate(AgProductionTechnology = paste(AgSupplySubsector, IRR_RFD, sep = "_")) %>%
select(LEVEL2_DATA_NAMES[["AgYield"]]) ->
L2011.AgYield_bio_grass_irr
# L201.AgYield_bio_tree: base year biomass yields, tree bioenergy crops
# Start with the grass crop yields with no tech split where available
L113.ag_bioYield_GJm2_R_GLU %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
left_join_error_no_match(select(basin_to_country_mapping, GLU_code, GLU_name), by = c("GLU" = "GLU_code")) %>%
mutate(yield = round(Yield_GJm2, digits = aglu.DIGITS_CALOUTPUT)) %>%
select(-GCAM_region_ID, -GLU, -Yield_GJm2) ->
L201.AgYield_bio_grass
# From the subsector table, filter bio tree crops
L2012.AgSupplySubsector %>%
filter(grepl("biomass_tree", AgSupplySubsector)) %>%
# No tech split yet
mutate(AgProductionTechnology = AgSupplySubsector) %>%
# Copy to all base years
repeat_add_columns(tibble(year = MODEL_BASE_YEARS)) %>%
select(LEVEL2_DATA_NAMES[["AgTechYr"]]) %>%
mutate(GLU_name = AgSupplySubsector,
GLU_name = sub("biomass_tree_", "", GLU_name)) %>%
# Match in grass crop yields where available, use left_join instead because of NAs
left_join(L201.AgYield_bio_grass, by = c("region", "GLU_name")) %>%
# Where not available (i.e., where there is forest but not cropped land),
# use a default minimum as these are likely remote / unpopulated lands
replace_na(list(yield = min(L201.AgYield_bio_grass$yield))) %>%
select(LEVEL2_DATA_NAMES[["AgYield"]]) ->
L201.AgYield_bio_tree
# L2011.AgYield_bio_tree_irr: yield of bioenergy tree crops (use the irr:rfd yield ratios from grass crops)
# This method essentially copies prior versions of GCAM with irrigation, albeit in a different sequence.
# Before, we used the generic bioenergy crop yields by irr/rfd, multiplied by an assumed tree:grass yield conversion factor.
# Here, we start with the generic tree bioenergy crop yields in each land use region,
# and multiply by generic:irr and generic:rfd conversion factors.
# Compile the generic:irr and generic:rfd conversion factors
L201.AgYield_bio_grass %>%
mutate(AgSupplySubsector = paste("biomass_grass", GLU_name, sep = "_")) %>%
# Generic bio grass crop yield w/o tech split
rename(generic.yield = yield) %>%
# Match in irrigated and rainfed bio grass yields
right_join(L2011.AgYield_bio_grass_irr, by = c("region", "AgSupplySubsector")) %>%
# Compute conversion factor as irr/generic, and rfd/generic
mutate(factor = yield / generic.yield,
# Prepare for bio tree crops
AgSupplySubsector = sub("biomass_grass", "biomass_tree", AgSupplySubsector),
AgProductionTechnology = sub("biomass_grass", "biomass_tree", AgProductionTechnology)) %>%
select(-GLU_name, -yield, -generic.yield) ->
L2011.irr_rfd_factors
# Multiply generic tree crop yields by the conversion factors
# For regions that do not have grass crops but only tree crops (e.g., regions w forest but no ag production),
# simply use the generic defaults, which are likely minor ag regions anyway
L201.AgYield_bio_tree %>%
# Copy to both irrigated and rainfed technologies
repeat_add_columns(tibble(IRR_RFD = c("IRR", "RFD"))) %>%
mutate(AgProductionTechnology = paste(AgProductionTechnology, IRR_RFD, sep = "_")) %>%
# Match in conversion factors
left_join(L2011.irr_rfd_factors,
by = c("region", "AgSupplySector", "AgSupplySubsector", "AgProductionTechnology", "year")) %>%
# Calculate the irrigated and rainfed yields using the conversion factors from bio grass crops
mutate(yield_irr = yield * factor,
# When grass crops are not available, use the generic yields
yield = replace(yield, !is.na(yield_irr), yield_irr[!is.na(yield_irr)]),
yield = round(yield, digits = aglu.DIGITS_CALOUTPUT)) %>%
select(LEVEL2_DATA_NAMES[["AgYield"]]) ->
L2011.AgYield_bio_tree_irr
# Last step, apply different yield multipliers for high and low mgmt techs
# Prepare a yield multiplier table, with the mgmt level as a column ID
L181.YieldMult_R_bio_GLU_irr %>%
gather(level, yieldmult, -GCAM_region_ID, -GLU, -Irr_Rfd) %>%
mutate(level = sub("yieldmult_", "", level),
Irr_Rfd = toupper(Irr_Rfd)) %>%
left_join_error_no_match(GCAM_region_names, by = "GCAM_region_ID") %>%
left_join_error_no_match(select(basin_to_country_mapping, GLU_code, GLU_name), by = c("GLU" = "GLU_code")) ->
L2012.YieldMult_R_bio_GLU_irr
# Bind the tree and grass tables, repeat by mgmt level, and match in the yield multipliers
L2011.AgYield_bio_grass_irr %>%
bind_rows(L2011.AgYield_bio_tree_irr) %>%
# Copy to high and low management levels
repeat_add_columns(tibble(MGMT = c("hi", "lo"))) %>%
# Separate technology to match in the multipliers
separate(AgProductionTechnology, c("biomass", "type", "GLU_name", "IRR_RFD")) %>%
# Match in multipliers, use left_join instead because of NAs
left_join(L2012.YieldMult_R_bio_GLU_irr, by = c("region", "GLU_name", "IRR_RFD" = "Irr_Rfd", "MGMT" = "level")) %>%
# For minor region/GLUs that are missing from the ag data, set the multipliers to 1 (effectively fixing yields in all periods)
replace_na(list(yieldmult = 1)) %>%
mutate(yield = yield * yieldmult,
# Add technology back
AgProductionTechnology = paste(AgSupplySubsector, IRR_RFD, MGMT, sep = "_")) %>%
select(LEVEL2_DATA_NAMES[["AgYield"]]) ->
L2012.AgYield_bio_ref
# Add sector, subsector, and technology information to L201.AgYield_bio_grass
L201.AgYield_bio_grass %>%
mutate(AgSupplySector = "biomass",
AgSupplySubsector = paste0("biomass_grass_", GLU_name),
AgProductionTechnology = AgSupplySubsector) %>%
repeat_add_columns((tibble(year = MODEL_BASE_YEARS))) %>%
select(-GLU_name) ->
L201.AgYield_bio_grass
# Produce outputs
L2012.AgSupplySector %>%
add_title("Generic information for agriculture supply sectors") %>%
add_units("Unitless") %>%
add_comments("Specify generic supply sector characteristics (units, calprice, market, logit)") %>%
add_comments("At the supplysector (market) level, all regions get all supplysectors") %>%
add_comments("Remove any regions for which agriculture and land use are not modeled") %>%
add_legacy_name("L2012.AgSupplySector") %>%
add_precursors("common/GCAM_region_names",
"water/basin_to_country_mapping",
"aglu/A_agSupplySector",
"L132.ag_an_For_Prices",
"L1321.prP_R_C_75USDkg") ->
L2012.AgSupplySector
L2012.AgSupplySubsector %>%
add_title("Generic information for agriculture supply subsectors") %>%
add_units("Unitless") %>%
add_comments("Specify generic supply subsector characteristics") %>%
add_comments("At the subsector (production) level, only region x GLU combinations that actually exist are created") %>%
add_legacy_name("L2012.AgSupplySubsector") %>%
add_precursors("common/GCAM_region_names",
"water/basin_to_country_mapping",
"aglu/A_agSupplySubsector",
"L101.ag_Prod_Mt_R_C_Y_GLU",
"L123.ag_Prod_Mt_R_Past_Y_GLU",
"L123.For_Prod_bm3_R_Y_GLU") ->
L2012.AgSupplySubsector
L2012.AgProduction_ag_irr_mgmt %>%
add_title("Input table for agriculture commodity production by all technologies") %>%
add_units("Mt") %>%
add_comments("Calibrated outputs are specified by each technology") %>%
add_comments("Subsector shareweights are set at the aggregated region x GLU level, same for all tech") %>%
add_comments("Technology shareweights are set by irrigated vs. rainfed, same for high and low mgmt") %>%
add_legacy_name("L2012.AgProduction_ag_irr_mgmt") %>%
add_precursors("common/GCAM_region_names",
"water/basin_to_country_mapping",
"L161.ag_irrProd_Mt_R_C_Y_GLU",
"L161.ag_rfdProd_Mt_R_C_Y_GLU",
"L181.ag_Prod_Mt_R_C_Y_GLU_irr_level") ->
L2012.AgProduction_ag_irr_mgmt
L2012.AgProduction_For_Past %>%
filter(AgSupplySector == "Forest") %>%
add_title("Input table for forest production") %>%
add_units("bm3") %>%
add_comments("Calibrated ouputs or shareweights are not specify by technology") %>%
add_legacy_name("L2012.AgProduction_For") %>%
same_precursors_as("L2012.AgSupplySubsector") ->
L2012.AgProduction_For
L2012.AgProduction_For_Past %>%
filter(AgSupplySector == "Pasture") %>%
add_title("Input table for pasture production") %>%
add_units("Mt") %>%
add_comments("Calibrated ouputs or shareweights are not specify by technology") %>%
add_legacy_name("L2012.AgProduction_Past") %>%
same_precursors_as("L2012.AgSupplySubsector") ->
L2012.AgProduction_Past
L2012.AgHAtoCL_irr_mgmt %>%
add_title("Harvest area to cropland value of agricultural commodities by year and technology") %>%
add_units("Uniteless") %>%
add_comments("Copy the same value to all technologies") %>%
add_comments("Exclude forest and pasture") %>%
add_legacy_name("L2012.AgHAtoCL_irr_mgmt") %>%
same_precursors_as("L2012.AgProduction_ag_irr_mgmt") %>%
add_precursors("L122.ag_HA_to_CropLand_R_Y_GLU") ->
L2012.AgHAtoCL_irr_mgmt
L2012.AgYield_bio_ref %>%
add_title("Bioenergy grass and tree crops yields by all technologies") %>%
add_units("GJ/m2") %>%
add_comments("Grass crops yields are used for tree crops where available") %>%
add_comments("Apply the irr:generic and rfd:generic conversion factors from grass crops") %>%
add_comments("Use default minimum value where grass crops are not available") %>%
add_comments("Apply different multipliers to high and low management for both grass and tree crops") %>%
add_legacy_name("L2012.AgYield_bio_ref") %>%
add_precursors("common/GCAM_region_names",
"water/basin_to_country_mapping",
"aglu/A_agSupplySector",
"aglu/A_agSupplySubsector",
"L113.ag_bioYield_GJm2_R_GLU",
"L163.ag_irrBioYield_GJm2_R_GLU",
"L163.ag_rfdBioYield_GJm2_R_GLU",
"L181.YieldMult_R_bio_GLU_irr") ->
L2012.AgYield_bio_ref
L201.AgYield_bio_grass %>%
add_title("Bioenergy grass crops yields for aggregate tech") %>%
add_units("GJ/m2") %>%
add_comments("Append region and basin names to L113.ag_bioYield_GJm2_R_GLU") %>%
add_legacy_name("L201.AgYield_bio_grass") %>%
add_precursors("common/GCAM_region_names",
"water/basin_to_country_mapping",
"L113.ag_bioYield_GJm2_R_GLU") ->
L201.AgYield_bio_grass
L201.AgYield_bio_tree %>%
add_title("Bioenergy tree crops yields for aggregate tech") %>%
add_units("GJ/m2") %>%
add_comments("Set bioenergy tree crop yield to the same as the bioenergy grass yield") %>%
add_comments("If data is missing for a region/basin, use the minimum yield") %>%
add_legacy_name("L201.AgYield_bio_tree") %>%
same_precursors_as("L201.AgYield_bio_grass") %>%
same_precursors_as("L2012.AgSupplySubsector") ->
L201.AgYield_bio_tree
return_data(L2012.AgSupplySector, L2012.AgSupplySubsector, L2012.AgProduction_ag_irr_mgmt, L2012.AgProduction_For, L2012.AgProduction_Past, L2012.AgHAtoCL_irr_mgmt, L2012.AgYield_bio_ref, L201.AgYield_bio_grass, L201.AgYield_bio_tree)
} else {
stop("Unknown command")
}
}
|
# ---
# repo: r-lib/rlang
# file: standalone-zeallot.R
# last-updated: 2020-11-24
# license: https://unlicense.org
# ---
#
# This drop-in file implements a simple version of zeallot::`%<-%`.
#
# nocov start
`%<-%` <- function(lhs, value) {
lhs <- substitute(lhs)
env <- caller_env()
if (!is_call(lhs, "c")) {
abort("The left-hand side of `%<-%` must be a call to `c()`.")
}
vars <- as.list(lhs[-1])
if (length(value) != length(vars)) {
abort("The left- and right-hand sides of `%<-%` must be the same length.")
}
for (i in seq_along(vars)) {
var <- vars[[i]]
if (!is_symbol(var)) {
abort(paste0("Element ", i, " of the left-hand side of `%<-%` must be a symbol."))
}
env[[as_string(var)]] <- value[[i]]
}
invisible(value)
}
# nocov end
|
/R/standalone-zeallot.R
|
permissive
|
markfairbanks/tidytable
|
R
| false
| false
| 795
|
r
|
# ---
# repo: r-lib/rlang
# file: standalone-zeallot.R
# last-updated: 2020-11-24
# license: https://unlicense.org
# ---
#
# This drop-in file implements a simple version of zeallot::`%<-%`.
#
# nocov start
`%<-%` <- function(lhs, value) {
lhs <- substitute(lhs)
env <- caller_env()
if (!is_call(lhs, "c")) {
abort("The left-hand side of `%<-%` must be a call to `c()`.")
}
vars <- as.list(lhs[-1])
if (length(value) != length(vars)) {
abort("The left- and right-hand sides of `%<-%` must be the same length.")
}
for (i in seq_along(vars)) {
var <- vars[[i]]
if (!is_symbol(var)) {
abort(paste0("Element ", i, " of the left-hand side of `%<-%` must be a symbol."))
}
env[[as_string(var)]] <- value[[i]]
}
invisible(value)
}
# nocov end
|
\name{GR.Hospitals}
\alias{GR.Hospitals}
\docType{data}
\title{Greek Hospitals}
\description{Locations of General and Specialised Hospitals in Greece.}
\usage{data("GR.Hospitals")}
\format{
A data frame with 132 observations on the following 15 variables.
\describe{
\item{\code{Address}}{character vector of hospitals' addresses}
\item{\code{Name}}{a character vector of hospitals' names}
\item{\code{ID}}{a integer vector of hospitals' IDs}
\item{\code{X}}{a numeric vector of x coordinates (GGRS87 - Greek Grid)}
\item{\code{Y}}{a numeric vector of y coordinates (GGRS87 - Greek Grid)}
\item{\code{Postcode}}{a numeric vector of the hospitals' postcodes}
\item{\code{URL}}{a character vector of hospitals' websites}
\item{\code{DYPE}}{a integer vector of hospitals' Healthcare Regions}
\item{\code{KallCode}}{a character vector of municipality codes to link with data from the Hellenic Statistical Authority (EL.STAT.)}
\item{\code{Dimos}}{a character vector of municipality names (Greek with latin characters)}
\item{\code{Lat}}{a numeric vector of hospitals' latitudes (WGS84)}
\item{\code{Lon}}{a numeric vector of hospitals' longitudes (WGS84)}
\item{\code{Beds15}}{a integer vector of hospitals' beds in 2015}
\item{\code{Patien15}}{a integer vector of hospitals' admitted patients (hospital discharges) in 2015}
\item{\code{Nights15}}{a numeric vector of in-patients' nights in 2015}
}
}
\details{
The X,Y coordinates (as well as the Lat/Lon coordinates) refer to the exact locations of operating hospitals in the summer 2016. Their identification is the results of registry data, formal hospital addresses, OpenStreetMap (https://www.openstreetmap.org) and Google Maps (https://maps.google.com) including Street View. They have been manually digitised by the author of this package.
}
\source{
The source of the hospital beds, hospital discharges and in-patient nights statistics is the Ministry of Health (http://www.moh.gov.gr/articles/bihealth/stoixeia-noshleytikhs-kinhshs/3865-stoixeia-noshleythentwn-sta-nosokomeia-toy-esy-etoys-2015?dl=1).
}
\references{
Kalogirou, S. (2017). Spatial inequality in the accessibility to hospitals in Greece, The International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences, XLII-4/W2, 91-94, https://doi.org/10.5194/isprs-archives-XLII-4-W2-91-2017.
}
\examples{
data(GR.Hospitals)
hist(GR.Hospitals$Beds15)
}
\keyword{datasets}
\keyword{Greek Public Hospitals}
|
/man/GR.Hospitals.Rd
|
no_license
|
cran/SpatialAcc
|
R
| false
| false
| 2,554
|
rd
|
\name{GR.Hospitals}
\alias{GR.Hospitals}
\docType{data}
\title{Greek Hospitals}
\description{Locations of General and Specialised Hospitals in Greece.}
\usage{data("GR.Hospitals")}
\format{
A data frame with 132 observations on the following 15 variables.
\describe{
\item{\code{Address}}{character vector of hospitals' addresses}
\item{\code{Name}}{a character vector of hospitals' names}
\item{\code{ID}}{a integer vector of hospitals' IDs}
\item{\code{X}}{a numeric vector of x coordinates (GGRS87 - Greek Grid)}
\item{\code{Y}}{a numeric vector of y coordinates (GGRS87 - Greek Grid)}
\item{\code{Postcode}}{a numeric vector of the hospitals' postcodes}
\item{\code{URL}}{a character vector of hospitals' websites}
\item{\code{DYPE}}{a integer vector of hospitals' Healthcare Regions}
\item{\code{KallCode}}{a character vector of municipality codes to link with data from the Hellenic Statistical Authority (EL.STAT.)}
\item{\code{Dimos}}{a character vector of municipality names (Greek with latin characters)}
\item{\code{Lat}}{a numeric vector of hospitals' latitudes (WGS84)}
\item{\code{Lon}}{a numeric vector of hospitals' longitudes (WGS84)}
\item{\code{Beds15}}{a integer vector of hospitals' beds in 2015}
\item{\code{Patien15}}{a integer vector of hospitals' admitted patients (hospital discharges) in 2015}
\item{\code{Nights15}}{a numeric vector of in-patients' nights in 2015}
}
}
\details{
The X,Y coordinates (as well as the Lat/Lon coordinates) refer to the exact locations of operating hospitals in the summer 2016. Their identification is the results of registry data, formal hospital addresses, OpenStreetMap (https://www.openstreetmap.org) and Google Maps (https://maps.google.com) including Street View. They have been manually digitised by the author of this package.
}
\source{
The source of the hospital beds, hospital discharges and in-patient nights statistics is the Ministry of Health (http://www.moh.gov.gr/articles/bihealth/stoixeia-noshleytikhs-kinhshs/3865-stoixeia-noshleythentwn-sta-nosokomeia-toy-esy-etoys-2015?dl=1).
}
\references{
Kalogirou, S. (2017). Spatial inequality in the accessibility to hospitals in Greece, The International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences, XLII-4/W2, 91-94, https://doi.org/10.5194/isprs-archives-XLII-4-W2-91-2017.
}
\examples{
data(GR.Hospitals)
hist(GR.Hospitals$Beds15)
}
\keyword{datasets}
\keyword{Greek Public Hospitals}
|
library(ggplot2)
library(ggcorrplot)
library(ggalt)
library(ggExtra)
library(ggthemes)
library(ggplotify)
library(treemapify)
library(plyr)
library(dplyr)
library(scales)
library(zoo)
library(lubridate)
Sys.setlocale("LC_ALL", 'pt_BR.UTF-8')
##tema dor ggplot2
seta <- grid::arrow (length = grid::unit(0.2, "cm"), type = "open")
my_theme <- function(base_size = 14, base_family = "Arial") {
theme_bw(base_size = base_size, base_family = base_family) %+replace%
theme(axis.ticks = element_blank(),
axis.line = element_line(arrow = seta, color = "gray20"),
legend.background = element_blank(),
legend.key = element_blank(),
panel.background = element_blank(),
panel.border = element_blank(),
strip.background = element_blank(),
plot.background = element_blank(),
plot.title = element_text(hjust =1),
panel.grid = element_blank(),
complete = TRUE)
}
prouni <- readRDS("dados.rds")
por_idade_quantidade <- prouni %>%
group_by_(.dots=c("IDADE")) %>%
summarize(total = n()) %>%
filter(IDADE > 16) %>%
filter(IDADE < 81) %>%
as.data.frame()
### idade x número de bolsistas
plot_scatter_idade <- ggplot(por_idade_quantidade, aes(x = IDADE, y = total)) +
geom_jitter(width = 0.8, height = 0.8, pch = 21, colour = "black",
fill = "white", size = 4) +
geom_smooth(method = "lm", se = FALSE) +
labs(subtitle = "",
y = "Número de bolsistas",
x = "Idade",
title = "Idade x número de bolsistas (2005 - 2016)",
caption = "Fonte: ") +
my_theme()
ggMarginal(plot_scatter_idade, type = "boxplot", fill = "transparent")
plot_scatter_idade
|
/plots/scatterplot_idade.R
|
no_license
|
luizhsalazar/data-visualization
|
R
| false
| false
| 1,699
|
r
|
library(ggplot2)
library(ggcorrplot)
library(ggalt)
library(ggExtra)
library(ggthemes)
library(ggplotify)
library(treemapify)
library(plyr)
library(dplyr)
library(scales)
library(zoo)
library(lubridate)
Sys.setlocale("LC_ALL", 'pt_BR.UTF-8')
##tema dor ggplot2
seta <- grid::arrow (length = grid::unit(0.2, "cm"), type = "open")
my_theme <- function(base_size = 14, base_family = "Arial") {
theme_bw(base_size = base_size, base_family = base_family) %+replace%
theme(axis.ticks = element_blank(),
axis.line = element_line(arrow = seta, color = "gray20"),
legend.background = element_blank(),
legend.key = element_blank(),
panel.background = element_blank(),
panel.border = element_blank(),
strip.background = element_blank(),
plot.background = element_blank(),
plot.title = element_text(hjust =1),
panel.grid = element_blank(),
complete = TRUE)
}
prouni <- readRDS("dados.rds")
por_idade_quantidade <- prouni %>%
group_by_(.dots=c("IDADE")) %>%
summarize(total = n()) %>%
filter(IDADE > 16) %>%
filter(IDADE < 81) %>%
as.data.frame()
### idade x número de bolsistas
plot_scatter_idade <- ggplot(por_idade_quantidade, aes(x = IDADE, y = total)) +
geom_jitter(width = 0.8, height = 0.8, pch = 21, colour = "black",
fill = "white", size = 4) +
geom_smooth(method = "lm", se = FALSE) +
labs(subtitle = "",
y = "Número de bolsistas",
x = "Idade",
title = "Idade x número de bolsistas (2005 - 2016)",
caption = "Fonte: ") +
my_theme()
ggMarginal(plot_scatter_idade, type = "boxplot", fill = "transparent")
plot_scatter_idade
|
library(testthat)
library(samplingbook)
test_check("samplingbook")
|
/tests/testthat.R
|
no_license
|
cran/samplingbook
|
R
| false
| false
| 72
|
r
|
library(testthat)
library(samplingbook)
test_check("samplingbook")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bayesianOverlap.R
\name{bayesianOverlap}
\alias{bayesianOverlap}
\title{Calculate the overlap between two ellipses based on their posterior
distributions.}
\usage{
bayesianOverlap(
ellipse1,
ellipse2,
ellipses.posterior,
draws = 10,
p.interval = 0.95,
n = 100,
do.plot = FALSE
)
}
\arguments{
\item{ellipse1}{character code of the form \code{"x.y"} where \code{x} is an
integer indexing the community, and \code{y} an integer indexing the group
within that community. This specifies the first of two ellipses whose
overlap will be compared.}
\item{ellipse2}{same as \code{ellipse1} specifying a second ellipse.}
\item{ellipses.posterior}{a list of posterior means and covariances fitted
using \code{\link{siberEllipses}}.}
\item{draws}{an integer specifying how many of the posterior draws are to be
used to estimate the posterior overlap. Defaults to \code{10} which uses
the first 10 draws. In all cases, the selection will be \code{1:draws} so
independence of the posterior draws is assumed. Setting to \code{NULL} will
use all the draws (WARNING - like to be very slow).}
\item{p.interval}{the prediction interval used to scale the ellipse as per
\code{\link{addEllipse}}.}
\item{n}{the number of points on the edge of the ellipse used to define it.
Defaults to \code{100} as per \code{\link{addEllipse}}.}
\item{do.plot}{logical switch to determine whether the corresponding ellipses
should be plotted or not. A use-case would be in conjunction with a low
numbered \code{draws} so as to visualise a relatively small number of the
posterior ellipses. Defaults to \code{FALSE}.}
}
\value{
A data.frame comprising three columns: the area of overlap, the area
of the first ellipse and the area of the second ellipse and as many rows as
specified by \code{draws}.
}
\description{
This function loops over the posterior distribution of the means and
covariances matrices of two specified groups.
}
|
/man/bayesianOverlap.Rd
|
no_license
|
AndrewLJackson/SIBER
|
R
| false
| true
| 2,013
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bayesianOverlap.R
\name{bayesianOverlap}
\alias{bayesianOverlap}
\title{Calculate the overlap between two ellipses based on their posterior
distributions.}
\usage{
bayesianOverlap(
ellipse1,
ellipse2,
ellipses.posterior,
draws = 10,
p.interval = 0.95,
n = 100,
do.plot = FALSE
)
}
\arguments{
\item{ellipse1}{character code of the form \code{"x.y"} where \code{x} is an
integer indexing the community, and \code{y} an integer indexing the group
within that community. This specifies the first of two ellipses whose
overlap will be compared.}
\item{ellipse2}{same as \code{ellipse1} specifying a second ellipse.}
\item{ellipses.posterior}{a list of posterior means and covariances fitted
using \code{\link{siberEllipses}}.}
\item{draws}{an integer specifying how many of the posterior draws are to be
used to estimate the posterior overlap. Defaults to \code{10} which uses
the first 10 draws. In all cases, the selection will be \code{1:draws} so
independence of the posterior draws is assumed. Setting to \code{NULL} will
use all the draws (WARNING - like to be very slow).}
\item{p.interval}{the prediction interval used to scale the ellipse as per
\code{\link{addEllipse}}.}
\item{n}{the number of points on the edge of the ellipse used to define it.
Defaults to \code{100} as per \code{\link{addEllipse}}.}
\item{do.plot}{logical switch to determine whether the corresponding ellipses
should be plotted or not. A use-case would be in conjunction with a low
numbered \code{draws} so as to visualise a relatively small number of the
posterior ellipses. Defaults to \code{FALSE}.}
}
\value{
A data.frame comprising three columns: the area of overlap, the area
of the first ellipse and the area of the second ellipse and as many rows as
specified by \code{draws}.
}
\description{
This function loops over the posterior distribution of the means and
covariances matrices of two specified groups.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PowerPackage.R
\name{cube}
\alias{cube}
\title{Cube Calculation}
\usage{
cube(x)
}
\arguments{
\item{x}{numeric}
}
\value{
numeric
}
\description{
Cube Calculation
}
\examples{
cube(2)
}
|
/man/cube.Rd
|
no_license
|
MichaelDiSu/PowerPackage
|
R
| false
| true
| 265
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PowerPackage.R
\name{cube}
\alias{cube}
\title{Cube Calculation}
\usage{
cube(x)
}
\arguments{
\item{x}{numeric}
}
\value{
numeric
}
\description{
Cube Calculation
}
\examples{
cube(2)
}
|
\name{setEdgeLabelColorDirect}
\alias{setEdgeLabelColorDirect}
\alias{setEdgeLabelColorDirect,CytoscapeWindowClass-method}
\title{setEdgeLabelColorDirect}
\description{
In the specified CytoscapeWindow, set the color of the label of the specified
edge or edges.
}
\usage{
setEdgeLabelColorDirect(obj, edge.names, new.value)
}
\arguments{
\item{obj}{a \code{CytoscapeWindowClass} object. }
\item{edge.names}{one or more \code{String} objects, cy2-style edge names.}
\item{new.value}{a \code{String} object, an RGB color in '#RRGGBB' form.}
}
\value{
None.
}
\author{Tanja Muetze, Georgi Kolishovski, Paul Shannon}
\seealso{
setEdgeLabelColorDirect
setEdgeLabelDirect
setEdgeFontSizeDirect
}
\examples{
\dontrun{
# first, delete existing windows to save memory:
deleteAllWindows(CytoscapeConnection())
cw <- CytoscapeWindow ('setEdgeLabelColorDirect.test', graph=makeSimpleGraph())
displayGraph (cw)
layoutNetwork(cw, 'force-directed')
edge.names <- as.character (cy2.edge.names (cw@graph))
setEdgeLabelDirect (cw, edge.names, 'some label')
setEdgeLabelColorDirect (cw, edge.names, '#00FF00')
setEdgeLabelColorDirect (cw, edge.names [1:2], '#FF0000')
}
}
\keyword{graph}
|
/man/setEdgeLabelColorDirect.Rd
|
no_license
|
sebastianrossel/Bioconductor_RCy3_the_new_RCytoscape
|
R
| false
| false
| 1,208
|
rd
|
\name{setEdgeLabelColorDirect}
\alias{setEdgeLabelColorDirect}
\alias{setEdgeLabelColorDirect,CytoscapeWindowClass-method}
\title{setEdgeLabelColorDirect}
\description{
In the specified CytoscapeWindow, set the color of the label of the specified
edge or edges.
}
\usage{
setEdgeLabelColorDirect(obj, edge.names, new.value)
}
\arguments{
\item{obj}{a \code{CytoscapeWindowClass} object. }
\item{edge.names}{one or more \code{String} objects, cy2-style edge names.}
\item{new.value}{a \code{String} object, an RGB color in '#RRGGBB' form.}
}
\value{
None.
}
\author{Tanja Muetze, Georgi Kolishovski, Paul Shannon}
\seealso{
setEdgeLabelColorDirect
setEdgeLabelDirect
setEdgeFontSizeDirect
}
\examples{
\dontrun{
# first, delete existing windows to save memory:
deleteAllWindows(CytoscapeConnection())
cw <- CytoscapeWindow ('setEdgeLabelColorDirect.test', graph=makeSimpleGraph())
displayGraph (cw)
layoutNetwork(cw, 'force-directed')
edge.names <- as.character (cy2.edge.names (cw@graph))
setEdgeLabelDirect (cw, edge.names, 'some label')
setEdgeLabelColorDirect (cw, edge.names, '#00FF00')
setEdgeLabelColorDirect (cw, edge.names [1:2], '#FF0000')
}
}
\keyword{graph}
|
### Rscript /broad/hptmp/mesbah/DNAm/April5_2021/ewasCHIP/format_4Metal.R /broad/hptmp/mesbah/DNAm/April5_2021/ewasCHIP/AA.CHIP_EWAS.swan.rda /broad/hptmp/mesbah/DNAm/April5_2021/ewasCHIP/AA_CHIP_EWAS.SWAN.tsv
## Rscript /broad/hptmp/mesbah/DNAm/April5_2021/ewasCHIP/format_4Metal.R /broad/hptmp/mesbah/DNAm/April5_2021/ewasCHIP/EA.CHIP_EWAS.swan.rda /broad/hptmp/mesbah/DNAm/April5_2021/ewasCHIP/EA_CHIP_EWAS.SWAN.tsv
############
ARGs <- commandArgs(TRUE)
ewas_input <- ARGs[1]
output_filename <- ARGs[2]
#################
# Load Libraries
# library(data.table)
# library(CpGassoc)
loadRData <- function(fileName){
#loads an RData file, and returns it
load(fileName)
get(ls()[ls() != "fileName"])
}
##
d <- loadRData(fileName=ewas_input)
res <- d$results
res$adj.intercept <- d$coefficients$adj.intercept
res$effect.size <- d$coefficients$effect.size
res$std.error <- d$coefficients$std.error
res$Ref <- "A"
res$Alt <- "T"
res <- subset(res, !is.na(res$T.statistic) )
##
write.table(res, output_filename, row.name=F, quote = F, sep="\t")
|
/Scripts/meta_analysis/format_4Metal.R
|
permissive
|
MMesbahU/CHIP-EWAS
|
R
| false
| false
| 1,066
|
r
|
### Rscript /broad/hptmp/mesbah/DNAm/April5_2021/ewasCHIP/format_4Metal.R /broad/hptmp/mesbah/DNAm/April5_2021/ewasCHIP/AA.CHIP_EWAS.swan.rda /broad/hptmp/mesbah/DNAm/April5_2021/ewasCHIP/AA_CHIP_EWAS.SWAN.tsv
## Rscript /broad/hptmp/mesbah/DNAm/April5_2021/ewasCHIP/format_4Metal.R /broad/hptmp/mesbah/DNAm/April5_2021/ewasCHIP/EA.CHIP_EWAS.swan.rda /broad/hptmp/mesbah/DNAm/April5_2021/ewasCHIP/EA_CHIP_EWAS.SWAN.tsv
############
ARGs <- commandArgs(TRUE)
ewas_input <- ARGs[1]
output_filename <- ARGs[2]
#################
# Load Libraries
# library(data.table)
# library(CpGassoc)
loadRData <- function(fileName){
#loads an RData file, and returns it
load(fileName)
get(ls()[ls() != "fileName"])
}
##
d <- loadRData(fileName=ewas_input)
res <- d$results
res$adj.intercept <- d$coefficients$adj.intercept
res$effect.size <- d$coefficients$effect.size
res$std.error <- d$coefficients$std.error
res$Ref <- "A"
res$Alt <- "T"
res <- subset(res, !is.na(res$T.statistic) )
##
write.table(res, output_filename, row.name=F, quote = F, sep="\t")
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "credit-g")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "class")
lrn = makeLearner("classif.ranger", par.vals = list(), predict.type = "prob")
#:# hash
#:# 27becaa837d41b4e8b86ceb633f89135
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
/models/openml_credit-g/classification_class/27becaa837d41b4e8b86ceb633f89135/code.R
|
no_license
|
pysiakk/CaseStudies2019S
|
R
| false
| false
| 682
|
r
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "credit-g")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "class")
lrn = makeLearner("classif.ranger", par.vals = list(), predict.type = "prob")
#:# hash
#:# 27becaa837d41b4e8b86ceb633f89135
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
#' @title Navigate Upstream with Tributaries
#' @description Traverse NHDPlus network upstream with tributaries
#' @param network data.frame NHDPlus flowlines including at a minimum:
#' COMID, Pathlength, LENGTHKM, and Hydroseq.
#' @param comid integer Identifier to start navigating from.
#' @param distance numeric distance in km to limit how many COMIDs are
#' returned. The COMID that exceeds the distance specified is returned.
#' @return integer vector of all COMIDs upstream with tributaries of the
#' starting COMID.
#' @importFrom dplyr filter select
#' @export
#' @examples
#' library(sf)
#' sample_flines <- read_sf(system.file("extdata",
#' "petapsco_flowlines.gpkg",
#' package = "nhdplusTools"))
#' plot(sample_flines$geom)
#' start_COMID <- 11690196
#' UT_COMIDs <- get_UT(sample_flines, start_COMID)
#' plot(dplyr::filter(sample_flines, COMID %in% UT_COMIDs)$geom,
#' col = "red", add = TRUE)
#'
#' UT_COMIDs <- get_UT(sample_flines, start_COMID, distance = 50)
#' plot(dplyr::filter(sample_flines, COMID %in% UT_COMIDs)$geom,
#' col = "blue", add = TRUE)
#'
get_UT <- function(network, comid, distance = NULL) {
if ("sf" %in% class(network)) network <- sf::st_set_geometry(network, NULL)
network <- network %>% check_names("get_UT") %>%
dplyr::select(get("get_UT_attributes", nhdplusTools_env))
start_comid <- get_start_comid(network, comid)
if (!is.null(distance)) {
if (distance < start_comid$LENGTHKM) return(comid)
}
all <- private_get_UT(network, comid)
if (!is.null(distance)) {
stop_pathlength <- start_comid$Pathlength -
start_comid$LENGTHKM +
distance
network <- filter(network, COMID %in% all)
return(filter(network, Pathlength <= stop_pathlength)$COMID)
} else {
return(all)
}
}
private_get_UT <- function(network, comid) {
main <- filter(network, COMID %in% comid)
if (length(main$Hydroseq) == 1) {
full_main <- filter(network,
LevelPathI %in% main$LevelPathI &
Hydroseq >= main$Hydroseq)
trib_lpid <- filter(network, DnHydroseq %in% full_main$Hydroseq &
!LevelPathI %in% main$LevelPathI &
Hydroseq >= main$Hydroseq)$LevelPathI
} else {
full_main <- filter(network, LevelPathI %in% main$LevelPathI)
trib_lpid <- filter(network, DnHydroseq %in% full_main$Hydroseq &
!LevelPathI %in% main$LevelPathI)$LevelPathI
}
trib_comid <- filter(network, LevelPathI %in% trib_lpid)$COMID
if (length(trib_comid) > 0) {
return(c(full_main$COMID, private_get_UT(network, trib_comid)))
} else {
return(full_main$COMID)
}
}
#' @title Navigate Upstream Mainstem
#' @description Traverse NHDPlus network upstream main stem
#' @param network data.frame NHDPlus flowlines including at a minimum:
#' COMID,Pathlength, LevelPathI, UpHydroseq, and Hydroseq.
#' @param comid integer identifier to start navigating from.
#' @param distance numeric distance in km to limit how many COMIDs are
#' @param sort if TRUE, the returned COMID vector will be sorted in order of distance from the input COMID (nearest to farthest)
#' @param include if TRUE, the input COMID will be included in the returned COMID vector
#' returned. The COMID that exceeds the distance specified is returned.
#' @return integer vector of all COMIDs upstream of the starting COMID
#' along the mainstem
#' @importFrom dplyr filter select arrange
#' @export
#' @examples
#' library(sf)
#' sample_flines <- read_sf(system.file("extdata",
#' "petapsco_flowlines.gpkg",
#' package = "nhdplusTools"))
#' plot(sample_flines$geom)
#' start_COMID <- 11690196
#' UM_COMIDs <- get_UM(sample_flines, start_COMID)
#' plot(dplyr::filter(sample_flines, COMID %in% UM_COMIDs)$geom,
#' col = "red", add = TRUE, lwd = 3)
#'
#' UM_COMIDs <- get_UM(sample_flines, start_COMID, distance = 50)
#' plot(dplyr::filter(sample_flines, COMID %in% UM_COMIDs)$geom,
#' col = "blue", add = TRUE, lwd = 2)
#'
get_UM <- function(network, comid, distance = NULL, sort = FALSE, include = TRUE) {
network <- check_names(network, "get_UM")
main <- network %>%
filter(COMID %in% comid) %>%
select(COMID, LevelPathI, Hydroseq, Pathlength, LENGTHKM)
main_us <- network %>%
filter(LevelPathI %in% main$LevelPathI & Hydroseq >= main$Hydroseq) %>%
select(COMID, Hydroseq, Pathlength, LENGTHKM)
if (!is.null(distance)) {
if (length(main$LENGTHKM) == 1) {
if (main$LENGTHKM > distance) {
return(main$COMID)
}
}
stop_pathlength <- main$Pathlength - main$LENGTHKM + distance
main_us <- filter(main_us, Pathlength <= stop_pathlength)
}
if(sort) { main_us <- arrange(main_us, Hydroseq) }
if(!include) { main_us = filter(main_us, COMID != comid) }
return(main_us$COMID)
}
#' @title Navigate Downstream Mainstem
#' @description Traverse NHDPlus network downstream main stem
#' @param network data.frame NHDPlus flowlines including at a minimum:
#' COMID, LENGTHKM, DnHydroseq, and Hydroseq.
#' @param comid integer identifier to start navigating from.
#' @param distance numeric distance in km to limit how many COMIDs are
#' returned. The COMID that exceeds the distance specified is returned.
#' @param sort if TRUE, the returned COMID vector will be sorted in order of distance from the input COMID (nearest to farthest)
#' @param include if TRUE, the input COMID will be included in the returned COMID vector
#' @return integer vector of all COMIDs downstream of the starting COMID
#' along the mainstem
#' @importFrom dplyr select filter arrange desc
#' @export
#' @examples
#' library(sf)
#' sample_flines <- read_sf(system.file("extdata",
#' "petapsco_flowlines.gpkg",
#' package = "nhdplusTools"))
#' plot(sample_flines$geom)
#' start_COMID <- 11690092
#' DM_COMIDs <- get_DM(sample_flines, start_COMID)
#' plot(dplyr::filter(sample_flines, COMID %in% DM_COMIDs)$geom,
#' col = "red", add = TRUE, lwd = 3)
#'
#' DM_COMIDs <- get_DM(sample_flines, start_COMID, distance = 40)
#' plot(dplyr::filter(sample_flines, COMID %in% DM_COMIDs)$geom,
#' col = "blue", add = TRUE, lwd = 2)
#'
#'
get_DM <- function(network, comid, distance = NULL, sort = FALSE, include = TRUE) {
if ("sf" %in% class(network)) { network <- sf::st_set_geometry(network, NULL) }
type <- ifelse(is.null(distance), "get_DM_nolength", "get_DM")
network <- network %>%
check_names(type) %>%
select(get(paste0(type, "_attributes"), nhdplusTools_env))
start_comid <- get_start_comid(network, comid)
if (!is.null(distance)) {
if (distance < start_comid$LENGTHKM){
return(comid)
}
}
main_ds <- private_get_DM(network, comid)
if (!is.null(distance)) {
stop_pathlength <- start_comid$Pathlength + start_comid$LENGTHKM - distance
main_ds <- network %>%
filter(COMID %in% main_ds$COMID, (Pathlength + LENGTHKM) >= stop_pathlength)
}
if(sort){ main_ds <- arrange(main_ds, desc(Hydroseq)) }
if(!include){ main_ds <- filter(main_ds, COMID != comid) }
return(main_ds$COMID)
}
private_get_DM <- function(network, comid) {
main <- ds_main <- filter(network, COMID %in% comid)
if (length(main$Hydroseq) == 1) {
ds_main <- network %>%
filter(LevelPathI %in% main$LevelPathI &
Hydroseq <= main$Hydroseq)
}
ds_hs <- ds_main %>%
filter(!DnLevelPat %in% main$LevelPathI) %>%
select(DnHydroseq)
if (nrow(ds_hs) > 0) {
ds_lpid <- network %>%
filter(Hydroseq == ds_hs$DnHydroseq) %>%
select(LevelPathI)
if (nrow(ds_lpid) > 0) {
ds_comid <- network %>%
filter(LevelPathI == ds_lpid$LevelPathI & Hydroseq <= ds_hs$DnHydroseq) %>%
select(COMID)
return(rbind(
select(ds_main, COMID, Hydroseq),
private_get_DM(network, comid = ds_comid$COMID)
))
}
}
return(select(ds_main, COMID, Hydroseq))
}
#' @title Navigate Downstream with Diversions
#' @description Traverse NHDPlus network downstream with diversions
#' NOTE: This algorithm may not scale well in large watersheds.
#' For reference, the lower Mississippi will take over a minute.
#' @param network data.frame NHDPlus flowlines including at a minimum:
#' COMID, DnMinorHyd, DnHydroseq, and Hydroseq.
#' @param comid integer identifier to start navigating from.
#' @param distance numeric distance in km to limit how many
#' COMIDs are returned.
#' The COMID that exceeds the distance specified is returned.
#' The longest of the diverted paths is used for limiting distance.
#' @return integer vector of all COMIDs downstream of the starting COMID
#' @importFrom dplyr filter
#' @export
#' @examples
#' library(sf)
#' start_COMID <- 11688818
#' sample_flines <- read_sf(system.file("extdata",
#' "petapsco_flowlines.gpkg",
#' package = "nhdplusTools"))
#' DD_COMIDs <- get_DD(sample_flines, start_COMID, distance = 4)
#' plot(dplyr::filter(sample_flines, COMID %in% DD_COMIDs)$geom,
#' col = "red", lwd = 2)
#'
#' DM_COMIDs <- get_DM(sample_flines, start_COMID, distance = 4)
#' plot(dplyr::filter(sample_flines, COMID %in% DM_COMIDs)$geom,
#' col = "blue", add = TRUE, lwd = 2)
#'
get_DD <- function(network, comid, distance = NULL) {
if ("sf" %in% class(network)) network <- sf::st_set_geometry(network, NULL)
network <- network %>% check_names("get_DD") %>%
dplyr::select(get("get_DD_attributes", nhdplusTools_env))
start_comid <- get_start_comid(network, comid)
stop_pathlength <- 0
if (!is.null(distance)) {
if (distance < start_comid$LENGTHKM) return(comid)
stop_pathlength <- start_comid$Pathlength +
start_comid$LENGTHKM -
distance
}
all <- private_get_DD(network, comid, stop_pathlength)
if (!is.null(distance)) {
network <- filter(network, COMID %in% unique(all))
return(filter(network, (Pathlength + LENGTHKM) >= stop_pathlength)$COMID)
} else {
return(unique(all))
}
}
private_get_DD <- function(network, comid, stop_pathlength = 0) {
main <- ds_main <- filter(network, COMID %in% comid)
if (length(main$Hydroseq) == 1) {
ds_main <- filter(network,
LevelPathI %in% main$LevelPathI &
Hydroseq <= main$Hydroseq)
}
ds_hs <- c(filter(ds_main, !DnLevelPat %in% main$LevelPathI)$DnHydroseq,
filter(ds_main, !DnMinorHyd == 0)$DnMinorHyd)
ds_lpid <- filter(network, Hydroseq %in% ds_hs)$LevelPathI
if (length(ds_lpid) > 0) {
if (length(ds_hs) == 1) {
# Same as DM
ds_comid <- filter(network,
LevelPathI %in% ds_lpid &
Hydroseq <= ds_hs)$COMID
} else {
# Works for divergent paths.
ds_hs <- filter(network, Hydroseq %in% ds_hs)
ds_comid <- filter(network, LevelPathI %in% ds_lpid) %>%
dplyr::left_join(select(ds_hs, LevelPathI, max_Hydroseq = Hydroseq),
by = "LevelPathI") %>%
filter(Hydroseq <= .data$max_Hydroseq)
ds_comid <- ds_comid$COMID
}
# This allows this algorithm to work for short distances
# in a reasonable time in large systems.
if (all(ds_main$Pathlength <= stop_pathlength)) return(ds_main$COMID)
c(ds_main$COMID, private_get_DD(network, ds_comid, stop_pathlength))
} else {
return(ds_main$COMID)
}
}
get_start_comid <- function(network, comid) {
start_comid <- filter(network, COMID == comid)
if(nrow(start_comid) > 1) {
stop("Found duplicate ID for starting catchment. Duplicate rows in network?")
}
start_comid
}
|
/R/get_network.R
|
permissive
|
hydroinfo-gis/nhdplusTools
|
R
| false
| false
| 11,844
|
r
|
#' @title Navigate Upstream with Tributaries
#' @description Traverse NHDPlus network upstream with tributaries
#' @param network data.frame NHDPlus flowlines including at a minimum:
#' COMID, Pathlength, LENGTHKM, and Hydroseq.
#' @param comid integer Identifier to start navigating from.
#' @param distance numeric distance in km to limit how many COMIDs are
#' returned. The COMID that exceeds the distance specified is returned.
#' @return integer vector of all COMIDs upstream with tributaries of the
#' starting COMID.
#' @importFrom dplyr filter select
#' @export
#' @examples
#' library(sf)
#' sample_flines <- read_sf(system.file("extdata",
#' "petapsco_flowlines.gpkg",
#' package = "nhdplusTools"))
#' plot(sample_flines$geom)
#' start_COMID <- 11690196
#' UT_COMIDs <- get_UT(sample_flines, start_COMID)
#' plot(dplyr::filter(sample_flines, COMID %in% UT_COMIDs)$geom,
#' col = "red", add = TRUE)
#'
#' UT_COMIDs <- get_UT(sample_flines, start_COMID, distance = 50)
#' plot(dplyr::filter(sample_flines, COMID %in% UT_COMIDs)$geom,
#' col = "blue", add = TRUE)
#'
get_UT <- function(network, comid, distance = NULL) {
if ("sf" %in% class(network)) network <- sf::st_set_geometry(network, NULL)
network <- network %>% check_names("get_UT") %>%
dplyr::select(get("get_UT_attributes", nhdplusTools_env))
start_comid <- get_start_comid(network, comid)
if (!is.null(distance)) {
if (distance < start_comid$LENGTHKM) return(comid)
}
all <- private_get_UT(network, comid)
if (!is.null(distance)) {
stop_pathlength <- start_comid$Pathlength -
start_comid$LENGTHKM +
distance
network <- filter(network, COMID %in% all)
return(filter(network, Pathlength <= stop_pathlength)$COMID)
} else {
return(all)
}
}
private_get_UT <- function(network, comid) {
main <- filter(network, COMID %in% comid)
if (length(main$Hydroseq) == 1) {
full_main <- filter(network,
LevelPathI %in% main$LevelPathI &
Hydroseq >= main$Hydroseq)
trib_lpid <- filter(network, DnHydroseq %in% full_main$Hydroseq &
!LevelPathI %in% main$LevelPathI &
Hydroseq >= main$Hydroseq)$LevelPathI
} else {
full_main <- filter(network, LevelPathI %in% main$LevelPathI)
trib_lpid <- filter(network, DnHydroseq %in% full_main$Hydroseq &
!LevelPathI %in% main$LevelPathI)$LevelPathI
}
trib_comid <- filter(network, LevelPathI %in% trib_lpid)$COMID
if (length(trib_comid) > 0) {
return(c(full_main$COMID, private_get_UT(network, trib_comid)))
} else {
return(full_main$COMID)
}
}
#' @title Navigate Upstream Mainstem
#' @description Traverse NHDPlus network upstream main stem
#' @param network data.frame NHDPlus flowlines including at a minimum:
#' COMID,Pathlength, LevelPathI, UpHydroseq, and Hydroseq.
#' @param comid integer identifier to start navigating from.
#' @param distance numeric distance in km to limit how many COMIDs are
#' @param sort if TRUE, the returned COMID vector will be sorted in order of distance from the input COMID (nearest to farthest)
#' @param include if TRUE, the input COMID will be included in the returned COMID vector
#' returned. The COMID that exceeds the distance specified is returned.
#' @return integer vector of all COMIDs upstream of the starting COMID
#' along the mainstem
#' @importFrom dplyr filter select arrange
#' @export
#' @examples
#' library(sf)
#' sample_flines <- read_sf(system.file("extdata",
#' "petapsco_flowlines.gpkg",
#' package = "nhdplusTools"))
#' plot(sample_flines$geom)
#' start_COMID <- 11690196
#' UM_COMIDs <- get_UM(sample_flines, start_COMID)
#' plot(dplyr::filter(sample_flines, COMID %in% UM_COMIDs)$geom,
#' col = "red", add = TRUE, lwd = 3)
#'
#' UM_COMIDs <- get_UM(sample_flines, start_COMID, distance = 50)
#' plot(dplyr::filter(sample_flines, COMID %in% UM_COMIDs)$geom,
#' col = "blue", add = TRUE, lwd = 2)
#'
get_UM <- function(network, comid, distance = NULL, sort = FALSE, include = TRUE) {
network <- check_names(network, "get_UM")
main <- network %>%
filter(COMID %in% comid) %>%
select(COMID, LevelPathI, Hydroseq, Pathlength, LENGTHKM)
main_us <- network %>%
filter(LevelPathI %in% main$LevelPathI & Hydroseq >= main$Hydroseq) %>%
select(COMID, Hydroseq, Pathlength, LENGTHKM)
if (!is.null(distance)) {
if (length(main$LENGTHKM) == 1) {
if (main$LENGTHKM > distance) {
return(main$COMID)
}
}
stop_pathlength <- main$Pathlength - main$LENGTHKM + distance
main_us <- filter(main_us, Pathlength <= stop_pathlength)
}
if(sort) { main_us <- arrange(main_us, Hydroseq) }
if(!include) { main_us = filter(main_us, COMID != comid) }
return(main_us$COMID)
}
#' @title Navigate Downstream Mainstem
#' @description Traverse NHDPlus network downstream main stem
#' @param network data.frame NHDPlus flowlines including at a minimum:
#' COMID, LENGTHKM, DnHydroseq, and Hydroseq.
#' @param comid integer identifier to start navigating from.
#' @param distance numeric distance in km to limit how many COMIDs are
#' returned. The COMID that exceeds the distance specified is returned.
#' @param sort if TRUE, the returned COMID vector will be sorted in order of distance from the input COMID (nearest to farthest)
#' @param include if TRUE, the input COMID will be included in the returned COMID vector
#' @return integer vector of all COMIDs downstream of the starting COMID
#' along the mainstem
#' @importFrom dplyr select filter arrange desc
#' @export
#' @examples
#' library(sf)
#' sample_flines <- read_sf(system.file("extdata",
#' "petapsco_flowlines.gpkg",
#' package = "nhdplusTools"))
#' plot(sample_flines$geom)
#' start_COMID <- 11690092
#' DM_COMIDs <- get_DM(sample_flines, start_COMID)
#' plot(dplyr::filter(sample_flines, COMID %in% DM_COMIDs)$geom,
#' col = "red", add = TRUE, lwd = 3)
#'
#' DM_COMIDs <- get_DM(sample_flines, start_COMID, distance = 40)
#' plot(dplyr::filter(sample_flines, COMID %in% DM_COMIDs)$geom,
#' col = "blue", add = TRUE, lwd = 2)
#'
#'
get_DM <- function(network, comid, distance = NULL, sort = FALSE, include = TRUE) {
if ("sf" %in% class(network)) { network <- sf::st_set_geometry(network, NULL) }
type <- ifelse(is.null(distance), "get_DM_nolength", "get_DM")
network <- network %>%
check_names(type) %>%
select(get(paste0(type, "_attributes"), nhdplusTools_env))
start_comid <- get_start_comid(network, comid)
if (!is.null(distance)) {
if (distance < start_comid$LENGTHKM){
return(comid)
}
}
main_ds <- private_get_DM(network, comid)
if (!is.null(distance)) {
stop_pathlength <- start_comid$Pathlength + start_comid$LENGTHKM - distance
main_ds <- network %>%
filter(COMID %in% main_ds$COMID, (Pathlength + LENGTHKM) >= stop_pathlength)
}
if(sort){ main_ds <- arrange(main_ds, desc(Hydroseq)) }
if(!include){ main_ds <- filter(main_ds, COMID != comid) }
return(main_ds$COMID)
}
private_get_DM <- function(network, comid) {
main <- ds_main <- filter(network, COMID %in% comid)
if (length(main$Hydroseq) == 1) {
ds_main <- network %>%
filter(LevelPathI %in% main$LevelPathI &
Hydroseq <= main$Hydroseq)
}
ds_hs <- ds_main %>%
filter(!DnLevelPat %in% main$LevelPathI) %>%
select(DnHydroseq)
if (nrow(ds_hs) > 0) {
ds_lpid <- network %>%
filter(Hydroseq == ds_hs$DnHydroseq) %>%
select(LevelPathI)
if (nrow(ds_lpid) > 0) {
ds_comid <- network %>%
filter(LevelPathI == ds_lpid$LevelPathI & Hydroseq <= ds_hs$DnHydroseq) %>%
select(COMID)
return(rbind(
select(ds_main, COMID, Hydroseq),
private_get_DM(network, comid = ds_comid$COMID)
))
}
}
return(select(ds_main, COMID, Hydroseq))
}
#' @title Navigate Downstream with Diversions
#' @description Traverse NHDPlus network downstream with diversions
#' NOTE: This algorithm may not scale well in large watersheds.
#' For reference, the lower Mississippi will take over a minute.
#' @param network data.frame NHDPlus flowlines including at a minimum:
#' COMID, DnMinorHyd, DnHydroseq, and Hydroseq.
#' @param comid integer identifier to start navigating from.
#' @param distance numeric distance in km to limit how many
#' COMIDs are returned.
#' The COMID that exceeds the distance specified is returned.
#' The longest of the diverted paths is used for limiting distance.
#' @return integer vector of all COMIDs downstream of the starting COMID
#' @importFrom dplyr filter
#' @export
#' @examples
#' library(sf)
#' start_COMID <- 11688818
#' sample_flines <- read_sf(system.file("extdata",
#' "petapsco_flowlines.gpkg",
#' package = "nhdplusTools"))
#' DD_COMIDs <- get_DD(sample_flines, start_COMID, distance = 4)
#' plot(dplyr::filter(sample_flines, COMID %in% DD_COMIDs)$geom,
#' col = "red", lwd = 2)
#'
#' DM_COMIDs <- get_DM(sample_flines, start_COMID, distance = 4)
#' plot(dplyr::filter(sample_flines, COMID %in% DM_COMIDs)$geom,
#' col = "blue", add = TRUE, lwd = 2)
#'
get_DD <- function(network, comid, distance = NULL) {
if ("sf" %in% class(network)) network <- sf::st_set_geometry(network, NULL)
network <- network %>% check_names("get_DD") %>%
dplyr::select(get("get_DD_attributes", nhdplusTools_env))
start_comid <- get_start_comid(network, comid)
stop_pathlength <- 0
if (!is.null(distance)) {
if (distance < start_comid$LENGTHKM) return(comid)
stop_pathlength <- start_comid$Pathlength +
start_comid$LENGTHKM -
distance
}
all <- private_get_DD(network, comid, stop_pathlength)
if (!is.null(distance)) {
network <- filter(network, COMID %in% unique(all))
return(filter(network, (Pathlength + LENGTHKM) >= stop_pathlength)$COMID)
} else {
return(unique(all))
}
}
private_get_DD <- function(network, comid, stop_pathlength = 0) {
main <- ds_main <- filter(network, COMID %in% comid)
if (length(main$Hydroseq) == 1) {
ds_main <- filter(network,
LevelPathI %in% main$LevelPathI &
Hydroseq <= main$Hydroseq)
}
ds_hs <- c(filter(ds_main, !DnLevelPat %in% main$LevelPathI)$DnHydroseq,
filter(ds_main, !DnMinorHyd == 0)$DnMinorHyd)
ds_lpid <- filter(network, Hydroseq %in% ds_hs)$LevelPathI
if (length(ds_lpid) > 0) {
if (length(ds_hs) == 1) {
# Same as DM
ds_comid <- filter(network,
LevelPathI %in% ds_lpid &
Hydroseq <= ds_hs)$COMID
} else {
# Works for divergent paths.
ds_hs <- filter(network, Hydroseq %in% ds_hs)
ds_comid <- filter(network, LevelPathI %in% ds_lpid) %>%
dplyr::left_join(select(ds_hs, LevelPathI, max_Hydroseq = Hydroseq),
by = "LevelPathI") %>%
filter(Hydroseq <= .data$max_Hydroseq)
ds_comid <- ds_comid$COMID
}
# This allows this algorithm to work for short distances
# in a reasonable time in large systems.
if (all(ds_main$Pathlength <= stop_pathlength)) return(ds_main$COMID)
c(ds_main$COMID, private_get_DD(network, ds_comid, stop_pathlength))
} else {
return(ds_main$COMID)
}
}
get_start_comid <- function(network, comid) {
start_comid <- filter(network, COMID == comid)
if(nrow(start_comid) > 1) {
stop("Found duplicate ID for starting catchment. Duplicate rows in network?")
}
start_comid
}
|
# ---
# title: "Untitled"
# author: "wangbinzjcc@qq.com"
# date: "2019/06/27"
# output: html_document
# editor_options:
# chunk_output_type: console
# ---
#```{r}
rm(list = ls())
#```
## package
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas")
require(ggplot2)
# devtools::install_github("thomasp85/patchwork")
require(patchwork)
source("EAA_4_graph_program_gam_ggplot_persp_rgl_20190703_1616.R")
#```
# dat_0
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas\\EAA_results")
load(file = "dat_0_pnas_20190703.save")
dat_0 <- dat_0
#```
## patchwork_detailed_values_FUN
#```{r}
patchwork_detailed_values_FUN <- function(
dat_0, plot_name,
gam_k = c(born = 5, clust = 5, diedL = 4, diedS = 4, growth = 3)) {
#
data_types <- unique(dat_0[, "data_type"])
p_j_list <- vector(mode = "list", length = length(data_types))
names(p_j_list) <- data_types
for(j in data_types) {
k_0 <- switch(EXPR = j, born = gam_k[1], clust = gam_k[2], diedL = gam_k[3], diedS = gam_k[4], growth = gam_k[5])
value_names <- c("real_value", "relative_value", "mean_shuffled", "sd_shuffled", "graph_value")
p_i_list <- vector(mode = "list", length = length(value_names))
names(p_i_list) <- value_names
for(i in value_names) {
condition <- switch(EXPR = j, growth = 0:5, 1:4)
dat_1 <- dat_1_FUN(
dat_0 = dat_0, plot_name = plot_name,
data_type = j, condition = condition,
annul_interval = 1)
p_i_list[[i]] <- ggplot_graph_FUN(
dat_1 = dat_1, gam_k = k_0,
graph_name = i)
p_i_list[[i]] <- p_i_list[[i]] + annotate("text", label = i, x = 100, y = 0, size = 6, colour = "black")
}
p_j_list[[j]] <- p_i_list
}
# return
p_j_list
}
#```
## graph_rbind_FUN
#```{r}
require(patchwork)
graph_rbind_FUN <- function(p_j_list) {
#
p_list <- p_j_list[["born"]]
p_born <- p_list[["real_value"]] + p_list[["mean_shuffled"]] + p_list[["relative_value"]] + p_list[["sd_shuffled"]] + p_list[["graph_value"]] + plot_layout(ncol = 5)
#
p_list <- p_j_list[["clust"]]
p_clust <- p_list[["real_value"]] + p_list[["mean_shuffled"]] + p_list[["relative_value"]] + p_list[["sd_shuffled"]] + p_list[["graph_value"]] + plot_layout(ncol = 5)
#
p_list <- p_j_list[["diedS"]]
p_diedS <- p_list[["real_value"]] + p_list[["mean_shuffled"]] + p_list[["relative_value"]] + p_list[["sd_shuffled"]] + p_list[["graph_value"]] + plot_layout(ncol = 5)
#
p_list <- p_j_list[["diedL"]]
p_diedL <- p_list[["real_value"]] + p_list[["mean_shuffled"]] + p_list[["relative_value"]] + p_list[["sd_shuffled"]] + p_list[["graph_value"]] + plot_layout(ncol = 5)
#
p_list <- p_j_list[["growth"]]
p_growth <- p_list[["real_value"]] + p_list[["mean_shuffled"]] + p_list[["relative_value"]] + p_list[["sd_shuffled"]] + p_list[["graph_value"]] + plot_layout(ncol = 5)
#
p_all <- p_born / p_clust / p_diedS / p_diedL / p_growth
# return
p_all
}
#```
## detailed graphs "windriver"
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas")
plot_name = "windriver"
dat_0 = dat_0
gam_k = c(born = 5, clust = 5, diedL = 4, diedS = 4, growth = 3)
#
p_j_list <- patchwork_detailed_values_FUN(dat_0, plot_name, gam_k)
p_all <- graph_rbind_FUN(p_j_list)
filename = sprintf(
fmt = "pdf_save//patchwork_detailed_values_%s.pdf", plot_name)
ggsave(
filename = filename,
plot = p_all, device = "pdf",
width = 8 * 5, height = 8 * 5, units = "cm")
#```
## detailed graphs "mudumalai"
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas")
plot_name = "mudumalai"
dat_0 = dat_0
gam_k = c(born = 5, clust = 5, diedL = 4, diedS = 4, growth = 3)
#
p_j_list <- patchwork_detailed_values_FUN(dat_0, plot_name, gam_k)
p_all <- graph_rbind_FUN(p_j_list)
filename = sprintf(
fmt = "pdf_save//patchwork_detailed_values_%s.pdf", plot_name)
ggsave(
filename = filename,
plot = p_all, device = "pdf",
width = 8 * 5, height = 8 * 5, units = "cm")
#```
## detailed graphs wytham
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas")
plot_name = "wytham"
dat_0 = dat_0
gam_k = c(born = 5, clust = 5, diedL = 4, diedS = 4, growth = 3)
#
p_j_list <- patchwork_detailed_values_FUN(dat_0, plot_name, gam_k)
p_all <- graph_rbind_FUN(p_j_list)
filename = sprintf(
fmt = "pdf_save//patchwork_detailed_values_%s.pdf", plot_name)
ggsave(
filename = filename,
plot = p_all, device = "pdf",
width = 8 * 5, height = 8 * 5, units = "cm")
#```
## detailed graphs gutianshan
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas")
plot_name = "gutianshan"
dat_0 = dat_0
gam_k = c(born = 6, clust = 6, diedL = 5, diedS = 5, growth = 5)
#
p_j_list <- patchwork_detailed_values_FUN(dat_0, plot_name, gam_k)
p_all <- graph_rbind_FUN(p_j_list)
filename = sprintf(
fmt = "pdf_save//patchwork_detailed_values_%s.pdf", plot_name)
ggsave(
filename = filename,
plot = p_all, device = "pdf",
width = 8 * 5, height = 8 * 5, units = "cm")
#```
## detailed graphs nonggang
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas")
plot_name = "nonggang"
dat_0 = dat_0
gam_k = c(born = 5, clust = 5, diedL = 4, diedS = 4, growth = 3)
#
p_j_list <- patchwork_detailed_values_FUN(dat_0, plot_name, gam_k)
p_all <- graph_rbind_FUN(p_j_list)
filename = sprintf(
fmt = "pdf_save//patchwork_detailed_values_%s.pdf", plot_name)
ggsave(
filename = filename,
plot = p_all, device = "pdf",
width = 8 * 5, height = 8 * 5, units = "cm")
#```
## detailed graphs heishiding
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas")
plot_name = "heishiding"
dat_0 = dat_0
gam_k = c(born = 5, clust = 5, diedL = 4, diedS = 4, growth = 3)
#
p_j_list <- patchwork_detailed_values_FUN(dat_0, plot_name, gam_k)
p_all <- graph_rbind_FUN(p_j_list)
filename = sprintf(
fmt = "pdf_save//patchwork_detailed_values_%s.pdf", plot_name)
ggsave(
filename = filename,
plot = p_all, device = "pdf",
width = 8 * 5, height = 8 * 5, units = "cm")
#```
## detailed graphs "bci"
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas")
plot_name = "bci"
dat_0 = dat_0
gam_k = c(born = 5, clust = 5, diedL = 4, diedS = 4, growth = 3)
#
p_j_list <- patchwork_detailed_values_FUN(dat_0, plot_name, gam_k)
p_all <- graph_rbind_FUN(p_j_list)
filename = sprintf(
fmt = "pdf_save//patchwork_detailed_values_%s.pdf", plot_name)
ggsave(
filename = filename,
plot = p_all, device = "pdf",
width = 8 * 5, height = 8 * 5, units = "cm")
#```
## detailed graphs pasoh
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas")
plot_name = "pasoh"
dat_0 = dat_0
gam_k = c(born = 5, clust = 5, diedL = 4, diedS = 4, growth = 3)
#
p_j_list <- patchwork_detailed_values_FUN(dat_0, plot_name, gam_k)
p_all <- graph_rbind_FUN(p_j_list)
filename = sprintf(
fmt = "pdf_save//patchwork_detailed_values_%s.pdf", plot_name)
ggsave(
filename = filename,
plot = p_all, device = "pdf",
width = 8 * 5, height = 8 * 5, units = "cm")
#```
|
/R/EAA_6_patchwork_detailed_values_all_plots_20190703.R
|
no_license
|
wangbinzjcc/EAAr
|
R
| false
| false
| 7,509
|
r
|
# ---
# title: "Untitled"
# author: "wangbinzjcc@qq.com"
# date: "2019/06/27"
# output: html_document
# editor_options:
# chunk_output_type: console
# ---
#```{r}
rm(list = ls())
#```
## package
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas")
require(ggplot2)
# devtools::install_github("thomasp85/patchwork")
require(patchwork)
source("EAA_4_graph_program_gam_ggplot_persp_rgl_20190703_1616.R")
#```
# dat_0
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas\\EAA_results")
load(file = "dat_0_pnas_20190703.save")
dat_0 <- dat_0
#```
## patchwork_detailed_values_FUN
#```{r}
patchwork_detailed_values_FUN <- function(
dat_0, plot_name,
gam_k = c(born = 5, clust = 5, diedL = 4, diedS = 4, growth = 3)) {
#
data_types <- unique(dat_0[, "data_type"])
p_j_list <- vector(mode = "list", length = length(data_types))
names(p_j_list) <- data_types
for(j in data_types) {
k_0 <- switch(EXPR = j, born = gam_k[1], clust = gam_k[2], diedL = gam_k[3], diedS = gam_k[4], growth = gam_k[5])
value_names <- c("real_value", "relative_value", "mean_shuffled", "sd_shuffled", "graph_value")
p_i_list <- vector(mode = "list", length = length(value_names))
names(p_i_list) <- value_names
for(i in value_names) {
condition <- switch(EXPR = j, growth = 0:5, 1:4)
dat_1 <- dat_1_FUN(
dat_0 = dat_0, plot_name = plot_name,
data_type = j, condition = condition,
annul_interval = 1)
p_i_list[[i]] <- ggplot_graph_FUN(
dat_1 = dat_1, gam_k = k_0,
graph_name = i)
p_i_list[[i]] <- p_i_list[[i]] + annotate("text", label = i, x = 100, y = 0, size = 6, colour = "black")
}
p_j_list[[j]] <- p_i_list
}
# return
p_j_list
}
#```
## graph_rbind_FUN
#```{r}
require(patchwork)
graph_rbind_FUN <- function(p_j_list) {
#
p_list <- p_j_list[["born"]]
p_born <- p_list[["real_value"]] + p_list[["mean_shuffled"]] + p_list[["relative_value"]] + p_list[["sd_shuffled"]] + p_list[["graph_value"]] + plot_layout(ncol = 5)
#
p_list <- p_j_list[["clust"]]
p_clust <- p_list[["real_value"]] + p_list[["mean_shuffled"]] + p_list[["relative_value"]] + p_list[["sd_shuffled"]] + p_list[["graph_value"]] + plot_layout(ncol = 5)
#
p_list <- p_j_list[["diedS"]]
p_diedS <- p_list[["real_value"]] + p_list[["mean_shuffled"]] + p_list[["relative_value"]] + p_list[["sd_shuffled"]] + p_list[["graph_value"]] + plot_layout(ncol = 5)
#
p_list <- p_j_list[["diedL"]]
p_diedL <- p_list[["real_value"]] + p_list[["mean_shuffled"]] + p_list[["relative_value"]] + p_list[["sd_shuffled"]] + p_list[["graph_value"]] + plot_layout(ncol = 5)
#
p_list <- p_j_list[["growth"]]
p_growth <- p_list[["real_value"]] + p_list[["mean_shuffled"]] + p_list[["relative_value"]] + p_list[["sd_shuffled"]] + p_list[["graph_value"]] + plot_layout(ncol = 5)
#
p_all <- p_born / p_clust / p_diedS / p_diedL / p_growth
# return
p_all
}
#```
## detailed graphs "windriver"
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas")
plot_name = "windriver"
dat_0 = dat_0
gam_k = c(born = 5, clust = 5, diedL = 4, diedS = 4, growth = 3)
#
p_j_list <- patchwork_detailed_values_FUN(dat_0, plot_name, gam_k)
p_all <- graph_rbind_FUN(p_j_list)
filename = sprintf(
fmt = "pdf_save//patchwork_detailed_values_%s.pdf", plot_name)
ggsave(
filename = filename,
plot = p_all, device = "pdf",
width = 8 * 5, height = 8 * 5, units = "cm")
#```
## detailed graphs "mudumalai"
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas")
plot_name = "mudumalai"
dat_0 = dat_0
gam_k = c(born = 5, clust = 5, diedL = 4, diedS = 4, growth = 3)
#
p_j_list <- patchwork_detailed_values_FUN(dat_0, plot_name, gam_k)
p_all <- graph_rbind_FUN(p_j_list)
filename = sprintf(
fmt = "pdf_save//patchwork_detailed_values_%s.pdf", plot_name)
ggsave(
filename = filename,
plot = p_all, device = "pdf",
width = 8 * 5, height = 8 * 5, units = "cm")
#```
## detailed graphs wytham
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas")
plot_name = "wytham"
dat_0 = dat_0
gam_k = c(born = 5, clust = 5, diedL = 4, diedS = 4, growth = 3)
#
p_j_list <- patchwork_detailed_values_FUN(dat_0, plot_name, gam_k)
p_all <- graph_rbind_FUN(p_j_list)
filename = sprintf(
fmt = "pdf_save//patchwork_detailed_values_%s.pdf", plot_name)
ggsave(
filename = filename,
plot = p_all, device = "pdf",
width = 8 * 5, height = 8 * 5, units = "cm")
#```
## detailed graphs gutianshan
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas")
plot_name = "gutianshan"
dat_0 = dat_0
gam_k = c(born = 6, clust = 6, diedL = 5, diedS = 5, growth = 5)
#
p_j_list <- patchwork_detailed_values_FUN(dat_0, plot_name, gam_k)
p_all <- graph_rbind_FUN(p_j_list)
filename = sprintf(
fmt = "pdf_save//patchwork_detailed_values_%s.pdf", plot_name)
ggsave(
filename = filename,
plot = p_all, device = "pdf",
width = 8 * 5, height = 8 * 5, units = "cm")
#```
## detailed graphs nonggang
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas")
plot_name = "nonggang"
dat_0 = dat_0
gam_k = c(born = 5, clust = 5, diedL = 4, diedS = 4, growth = 3)
#
p_j_list <- patchwork_detailed_values_FUN(dat_0, plot_name, gam_k)
p_all <- graph_rbind_FUN(p_j_list)
filename = sprintf(
fmt = "pdf_save//patchwork_detailed_values_%s.pdf", plot_name)
ggsave(
filename = filename,
plot = p_all, device = "pdf",
width = 8 * 5, height = 8 * 5, units = "cm")
#```
## detailed graphs heishiding
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas")
plot_name = "heishiding"
dat_0 = dat_0
gam_k = c(born = 5, clust = 5, diedL = 4, diedS = 4, growth = 3)
#
p_j_list <- patchwork_detailed_values_FUN(dat_0, plot_name, gam_k)
p_all <- graph_rbind_FUN(p_j_list)
filename = sprintf(
fmt = "pdf_save//patchwork_detailed_values_%s.pdf", plot_name)
ggsave(
filename = filename,
plot = p_all, device = "pdf",
width = 8 * 5, height = 8 * 5, units = "cm")
#```
## detailed graphs "bci"
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas")
plot_name = "bci"
dat_0 = dat_0
gam_k = c(born = 5, clust = 5, diedL = 4, diedS = 4, growth = 3)
#
p_j_list <- patchwork_detailed_values_FUN(dat_0, plot_name, gam_k)
p_all <- graph_rbind_FUN(p_j_list)
filename = sprintf(
fmt = "pdf_save//patchwork_detailed_values_%s.pdf", plot_name)
ggsave(
filename = filename,
plot = p_all, device = "pdf",
width = 8 * 5, height = 8 * 5, units = "cm")
#```
## detailed graphs pasoh
#```{r}
setwd("F:\\porjects-wangbin\\EAA_pnas")
plot_name = "pasoh"
dat_0 = dat_0
gam_k = c(born = 5, clust = 5, diedL = 4, diedS = 4, growth = 3)
#
p_j_list <- patchwork_detailed_values_FUN(dat_0, plot_name, gam_k)
p_all <- graph_rbind_FUN(p_j_list)
filename = sprintf(
fmt = "pdf_save//patchwork_detailed_values_%s.pdf", plot_name)
ggsave(
filename = filename,
plot = p_all, device = "pdf",
width = 8 * 5, height = 8 * 5, units = "cm")
#```
|
library("data.table",quietly = T)
tool="Argot"
#cafa_gaf <- argot2_cafa
filter_mixed_gaf <- function(cafa_gaf,tool,config){
cafa_data = read_gaf(cafa_gaf)
print(config$data[["mixed-method"]][[tool]])
score_ths = config$data[["mixed-method"]][[tool]]$score_th
flog.info(score_ths)
flog.info(paste("Filtering annotations with follwing score thresholds for ",tool))
flog.info(paste(score_ths,names(score_ths)))
cafa_data[,with:=as.numeric(with)]
tmp_out <- lapply(names(score_ths),function(x){
score_th = as.numeric(score_ths[[x]])
cafa_data[aspect == x & with>score_th]
})
out_gaf = do.call(rbind,tmp_out)
out_gaf[,with:=as.character(with)]
out_gaf[,with:=""]
return(out_gaf)
}
|
/code/R/filter_mixed.r
|
permissive
|
Dill-PICL/GOMAP
|
R
| false
| false
| 769
|
r
|
library("data.table",quietly = T)
tool="Argot"
#cafa_gaf <- argot2_cafa
filter_mixed_gaf <- function(cafa_gaf,tool,config){
cafa_data = read_gaf(cafa_gaf)
print(config$data[["mixed-method"]][[tool]])
score_ths = config$data[["mixed-method"]][[tool]]$score_th
flog.info(score_ths)
flog.info(paste("Filtering annotations with follwing score thresholds for ",tool))
flog.info(paste(score_ths,names(score_ths)))
cafa_data[,with:=as.numeric(with)]
tmp_out <- lapply(names(score_ths),function(x){
score_th = as.numeric(score_ths[[x]])
cafa_data[aspect == x & with>score_th]
})
out_gaf = do.call(rbind,tmp_out)
out_gaf[,with:=as.character(with)]
out_gaf[,with:=""]
return(out_gaf)
}
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "qsar-biodeg")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Class")
lrn = makeLearner("classif.xgboost", par.vals = list(booster = "dart", normalize_type = "forest", sample_type = "uniform"), predict.type = "prob")
#:# hash
#:# ccab95989c097ff5257ab9bdb6a4d594
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
/models/openml_qsar-biodeg/classification_Class/ccab95989c097ff5257ab9bdb6a4d594/code.R
|
no_license
|
pysiakk/CaseStudies2019S
|
R
| false
| false
| 754
|
r
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "qsar-biodeg")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Class")
lrn = makeLearner("classif.xgboost", par.vals = list(booster = "dart", normalize_type = "forest", sample_type = "uniform"), predict.type = "prob")
#:# hash
#:# ccab95989c097ff5257ab9bdb6a4d594
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
# Exercise 2: indexing and filtering vectors
# Create a vector `first_ten` that has the values 10 through 20 in it (using
# the : operator)
first_ten <- 10:20
# Create a vector `next_ten` that has the values 21 through 30 in it (using the
# seq() function)
next_ten <- seq(21, 30)
# Create a vector `all_numbers` by combining the previous two vectors
all_numbers <- c(first_ten, next_ten)
# Create a variable `eleventh` that contains the 11th element in `all_numbers`
eleventh <- all_numbers[11]
# Create a vector `some_numbers` that contains the 2nd through the 5th elements
# of `all_numbers`
some_numbers <- all_numbers[2:5]
# Create a vector `even` that holds the even numbers from 1 to 100
even <- seq(2, 100, 2)
# Using the `all()` function and `%%` (modulo) operator, confirm that all of the
# numbers in your `even` vector are even
even %% 2
# Create a vector `phone_numbers` that contains the numbers 8, 6, 7, 5, 3, 0, 9
phone_numbers <- c(8, 6, 7, 5, 3, 0 ,9)
# Create a vector `prefix` that has the first three elements of `phone_numbers`
prefix <- phone_numbers[1:3]
# Create a vector `small` that has the values of `phone_numbers` that are
# less than or equal to 5
small <- phone_numbers[phone_numbers < 5]
# Create a vector `large` that has the values of `phone_numbers` that are
# strictly greater than 5
large <- phone_numbers[phone_numbers > 5]
# Replace the values in `phone_numbers` that are larger than 5 with the number 5
phone_numbers <- phone_numbers[phone_numbers >= 5]
# Replace every odd-numbered value in `phone_numbers` with the number 0
phone_numbers <- gsub((x %% 2) == 1, 0, phone_numbers)
|
/chapter-07-exercises/exercise-2/exercise.R
|
permissive
|
krislee1204/book-exercises
|
R
| false
| false
| 1,640
|
r
|
# Exercise 2: indexing and filtering vectors
# Create a vector `first_ten` that has the values 10 through 20 in it (using
# the : operator)
first_ten <- 10:20
# Create a vector `next_ten` that has the values 21 through 30 in it (using the
# seq() function)
next_ten <- seq(21, 30)
# Create a vector `all_numbers` by combining the previous two vectors
all_numbers <- c(first_ten, next_ten)
# Create a variable `eleventh` that contains the 11th element in `all_numbers`
eleventh <- all_numbers[11]
# Create a vector `some_numbers` that contains the 2nd through the 5th elements
# of `all_numbers`
some_numbers <- all_numbers[2:5]
# Create a vector `even` that holds the even numbers from 1 to 100
even <- seq(2, 100, 2)
# Using the `all()` function and `%%` (modulo) operator, confirm that all of the
# numbers in your `even` vector are even
even %% 2
# Create a vector `phone_numbers` that contains the numbers 8, 6, 7, 5, 3, 0, 9
phone_numbers <- c(8, 6, 7, 5, 3, 0 ,9)
# Create a vector `prefix` that has the first three elements of `phone_numbers`
prefix <- phone_numbers[1:3]
# Create a vector `small` that has the values of `phone_numbers` that are
# less than or equal to 5
small <- phone_numbers[phone_numbers < 5]
# Create a vector `large` that has the values of `phone_numbers` that are
# strictly greater than 5
large <- phone_numbers[phone_numbers > 5]
# Replace the values in `phone_numbers` that are larger than 5 with the number 5
phone_numbers <- phone_numbers[phone_numbers >= 5]
# Replace every odd-numbered value in `phone_numbers` with the number 0
phone_numbers <- gsub((x %% 2) == 1, 0, phone_numbers)
|
#Download file
if(!file.exists("data")){dir.create("data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile="data\\powerconsumption.zip")
#Unzips the file
unzip(zipfile="./data/powerconsumption.zip",exdir="./data")
#Loading Files into R
pcDT<- read.table(file.path("./data", "household_power_consumption.txt"),sep=";",header=TRUE, na.strings="?")
#Loading required packages
library(tidyr)
library(lubridate)
#Subsetting data the Feburary 1st and 2nd of 2007
pcDT$Date<- as.Date(pcDT$Date, format="%d/%m/%Y")
pcDT<- subset(pcDT, Date >= as.Date("2007/02/01") & Date <= as.Date("2007/02/02"))
#Formatting Data for plotting
mergeDT<- unite(pcDT, Date, c(Date, Time), remove=FALSE)
mergeDT$Date<- ymd_hms(mergeDT$Date)
#Plotting to file
png(file="plot2.png", width=480, height=480, units="px")
with(mergeDT,plot(Date,Global_active_power, ylab="Global Active Power (kilowatts)", xlab="", type="l"))
dev.off()
|
/Course4/Assignment1/Plot2.R
|
no_license
|
kdbode/DataScience-CourseraCourses
|
R
| false
| false
| 993
|
r
|
#Download file
if(!file.exists("data")){dir.create("data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile="data\\powerconsumption.zip")
#Unzips the file
unzip(zipfile="./data/powerconsumption.zip",exdir="./data")
#Loading Files into R
pcDT<- read.table(file.path("./data", "household_power_consumption.txt"),sep=";",header=TRUE, na.strings="?")
#Loading required packages
library(tidyr)
library(lubridate)
#Subsetting data the Feburary 1st and 2nd of 2007
pcDT$Date<- as.Date(pcDT$Date, format="%d/%m/%Y")
pcDT<- subset(pcDT, Date >= as.Date("2007/02/01") & Date <= as.Date("2007/02/02"))
#Formatting Data for plotting
mergeDT<- unite(pcDT, Date, c(Date, Time), remove=FALSE)
mergeDT$Date<- ymd_hms(mergeDT$Date)
#Plotting to file
png(file="plot2.png", width=480, height=480, units="px")
with(mergeDT,plot(Date,Global_active_power, ylab="Global Active Power (kilowatts)", xlab="", type="l"))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/caching.R
\name{ridl_memoise_clear}
\alias{ridl_memoise_clear}
\title{Clear memory cache used to memoise ridl functions}
\usage{
ridl_memoise_clear()
}
\description{
Clear memory cache used to memoise ridl functions
}
|
/man/ridl_memoise_clear.Rd
|
permissive
|
UNHCRmdl/ridl-1
|
R
| false
| true
| 296
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/caching.R
\name{ridl_memoise_clear}
\alias{ridl_memoise_clear}
\title{Clear memory cache used to memoise ridl functions}
\usage{
ridl_memoise_clear()
}
\description{
Clear memory cache used to memoise ridl functions
}
|
library(naivebayes)
library(dplyr)
library (ggplot2)
library(psych)
Entrance=read.csv(file.choose(),sep=",",header=TRUE)
str(Entrance)
summary(Entrance)
Entrance$admit=factor(Entrance$admit,levels=c(0,1),labels=c("No","YES"))
Entrance$rank=as.factor(Entrance$rank)
pairs.panels(Entrance[,-1])
Entrance %>%
ggplot(aes(x=gre,fill=admit))+geom_density(alpha=0.8,color="black")
set.seed(1234)
ind=sample(2,nrow(Entrance),replace=T,prob=c(0.8,0.2))
Entrance_train=Entrance[ind==1,]
Entrance_test=Entrance[ind==2,]
Entrance_Naive_model=naive_bayes(admit~.,data=Entrance_train)
Entrance_Naive_model
plot(Entrance_Naive_model)
p=predict(Entrance_Naive_model,Entrance_test,type="prob")
head(cbind(p,Entrance_test))
P1=predict(Entrance_Naive_model,Entrance_test)
tab1=table(P1,Entrance_test$admit)
sum(diag(tab1))/sum(tab1)
|
/NAIVE BAYES ON ENTRANCETEST.R
|
no_license
|
stdntlfe/R-Machine-Learning-Algorithms
|
R
| false
| false
| 877
|
r
|
library(naivebayes)
library(dplyr)
library (ggplot2)
library(psych)
Entrance=read.csv(file.choose(),sep=",",header=TRUE)
str(Entrance)
summary(Entrance)
Entrance$admit=factor(Entrance$admit,levels=c(0,1),labels=c("No","YES"))
Entrance$rank=as.factor(Entrance$rank)
pairs.panels(Entrance[,-1])
Entrance %>%
ggplot(aes(x=gre,fill=admit))+geom_density(alpha=0.8,color="black")
set.seed(1234)
ind=sample(2,nrow(Entrance),replace=T,prob=c(0.8,0.2))
Entrance_train=Entrance[ind==1,]
Entrance_test=Entrance[ind==2,]
Entrance_Naive_model=naive_bayes(admit~.,data=Entrance_train)
Entrance_Naive_model
plot(Entrance_Naive_model)
p=predict(Entrance_Naive_model,Entrance_test,type="prob")
head(cbind(p,Entrance_test))
P1=predict(Entrance_Naive_model,Entrance_test)
tab1=table(P1,Entrance_test$admit)
sum(diag(tab1))/sum(tab1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/processPhotoId.R
\name{processPhotoId}
\alias{processPhotoId}
\title{Process Photo ID}
\usage{
processPhotoId(file_path = "", idType = "auto", imageSource = "auto",
correctOrientation = "true", correctSkew = "true",
description = "", pdfPassword = "", ...)
}
\arguments{
\item{file_path}{path to file; required}
\item{idType}{optional; default = "auto"}
\item{imageSource}{optional; default = "auto"}
\item{correctOrientation}{String. Optional; default: \code{true}.
Options: \code{true} or \code{false}}
\item{correctSkew}{String. Optional; default: \code{true}.
Options: \code{true} or \code{false}}
\item{description}{optional; default = ""}
\item{pdfPassword}{optional; default = ""}
\item{\dots}{Additional arguments passed to \code{\link{abbyy_POST}}.}
}
\value{
Data frame with details of the task associated with the submitted Photo ID image
}
\description{
Get data from a Photo ID. The function is under testing and may not work fully.
}
\examples{
\dontrun{
processPhotoId(file_path = "file_path", idType = "auto", imageSource = "auto")
}
}
\references{
\url{http://ocrsdk.com/documentation/apireference/processPhotoId/}
}
|
/man/processPhotoId.Rd
|
permissive
|
KrishAK47/abbyyR
|
R
| false
| true
| 1,224
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/processPhotoId.R
\name{processPhotoId}
\alias{processPhotoId}
\title{Process Photo ID}
\usage{
processPhotoId(file_path = "", idType = "auto", imageSource = "auto",
correctOrientation = "true", correctSkew = "true",
description = "", pdfPassword = "", ...)
}
\arguments{
\item{file_path}{path to file; required}
\item{idType}{optional; default = "auto"}
\item{imageSource}{optional; default = "auto"}
\item{correctOrientation}{String. Optional; default: \code{true}.
Options: \code{true} or \code{false}}
\item{correctSkew}{String. Optional; default: \code{true}.
Options: \code{true} or \code{false}}
\item{description}{optional; default = ""}
\item{pdfPassword}{optional; default = ""}
\item{\dots}{Additional arguments passed to \code{\link{abbyy_POST}}.}
}
\value{
Data frame with details of the task associated with the submitted Photo ID image
}
\description{
Get data from a Photo ID. The function is under testing and may not work fully.
}
\examples{
\dontrun{
processPhotoId(file_path = "file_path", idType = "auto", imageSource = "auto")
}
}
\references{
\url{http://ocrsdk.com/documentation/apireference/processPhotoId/}
}
|
## This is assignment 2
makeCacheMatrix <- function(x = matrix()) {
if(!is.matrix(x)) {
message("Please input a matrix")
} else {
m.original <<- x
getm.o <<- function() x
m.inverse <<- solve(x)
getm.i <-
}
list(matrix.o = getm.original, matrix.i = getm.inverse)
}
cacheSolve <- function(x,...) {
o <- x$matrix.o
i <- x$matrix.i
}
|
/as2.R
|
no_license
|
hybeeson/datasciencecoursera
|
R
| false
| false
| 473
|
r
|
## This is assignment 2
makeCacheMatrix <- function(x = matrix()) {
if(!is.matrix(x)) {
message("Please input a matrix")
} else {
m.original <<- x
getm.o <<- function() x
m.inverse <<- solve(x)
getm.i <-
}
list(matrix.o = getm.original, matrix.i = getm.inverse)
}
cacheSolve <- function(x,...) {
o <- x$matrix.o
i <- x$matrix.i
}
|
library(data.table)
data1 <- fread('./data/raw/Iowa_Liquor_Sales.csv', header = T, sep = ',', verbose=TRUE)
data1$DATE <- as.Date(data1$DATE, "%m/%d/%Y")
data1$ZIPCODE <- factor(data1$ZIPCODE, ordered=F)
data1$`STATE BTL COST` <- sapply(strsplit(data1$`STATE BTL COST`, split='$', fixed=TRUE), function(x) (x[2]))
data1$`STATE BTL COST` <- as.numeric(data1$`STATE BTL COST`)
data1$`BTL PRICE` <- sapply(strsplit(data1$`BTL PRICE`, split='$', fixed=TRUE), function(x) (x[2]))
data1$`BTL PRICE` <- as.numeric(data1$`BTL PRICE`)
data1$`TOTAL` <- sapply(strsplit(data1$`TOTAL`, split='$', fixed=TRUE), function(x) (x[2]))
data1$`TOTAL` <- as.numeric(data1$`TOTAL`)
data1 <- subset(data1, DATE<"2015-01-01")
##Insert establishment data
est_data <- fread('./data/raw/iowa_food_drink.csv', header = T, sep = ',', verbose=TRUE)
#Start merging
data_zipcodes <- unique(data1$ZIPCODE)
#Rogue zipcodes are 97 (sioux city, Morningside avenue) and NA (Dunlap). Dunlap is 61525 and Sioux city is 51106 (googled it)
summary(data_zipcodes)
min(na.omit(data_zipcodes))
max(na.omit(data_zipcodes))
sum(is.na(data1$ZIPCODE))
data1$CITY[is.na(data1$ZIPCODE)]
data1$ZIPCODE[is.na(data1$ZIPCODE)] <- 61525
data1$CITY[data1$ZIPCODE==97]
data1$ZIPCODE[data1$ZIPCODE==97] <- 51106
install.packages("tidyr")
library(tidyr)
library(dplyr)
#Drop columns
est_data2 <- select(as.data.frame(est_data), -GEOGRAPHY, -NAICS2007)
#Remove duplicates
est_data2 <- est_data2[!duplicated(est_data2),]
#Transform dataset
library(tidyr)
est_data3 <- spread(est_data2, NAICS2007_MEANING, ESTAB)
#Set missing to zero
names(data1)[23:45]
est_data3[is.na(est_data3)] <- 0
est_data3 <- as.data.table(est_data3)
#Join
data1 <- left_join(data1, est_data3, by = "ZIPCODE")
summary(data1)
#NA's present because zipcode was not in est_data3
data1 <- na.omit(data1)
summary(data1)
#Calculate total establishments
data1$TOTAL_EST <- rowSums(data1[seq(0,nrow(data1)), 23:45,with=FALSE])
summary(data1)
##Insert population data
pop_data <- fread('./data/raw/Iowa_population.csv', header = T, sep = ',', verbose=TRUE)
str(pop_data)
#Merge population data with base
data1 <- left_join(data1, pop_data, by = "ZIPCODE")
#Write for python use
write.csv2(unique(data1$ZIPCODE), file="zipcodes.csv", sep=",")
##Insert weather data
weather_data <- fread('./data/raw/Iowa_weather.csv', header = T, sep = ',', verbose=TRUE)
str(weather_data)
summary(weather_data)
weather_data <- select(weather_data, -STATION, -TSUN, -AWND, -WT09, -WT01, -WT06, -WT05, -WT02, -WT11, -WT04, -WT08, -WT03)
weather_data$ELEVATION <- as.numeric(weather_data$ELEVATION)
weather_data$LATITUDE <- as.numeric(weather_data$LATITUDE)
weather_data$LONGITUDE <- as.numeric(weather_data$LONGITUDE)
#lct <- Sys.getlocale("LC_TIME"); Sys.setlocale("LC_TIME", "C")
weather_data$DATE <- as.Date(as.character(weather_data$DATE), "%Y%m%d")
#Celsius convert. range of T is -200 to 200; something wrong
#weather_data$TMIN <- (weather_data$TMIN-32)*5/9
#Insert zipcode and lat/lon data
zip_lat_lon_data <- fread('./data/processed/ZIPCODES_LATLON.csv', header = T, sep = ',', verbose=TRUE)
str(zip_lat_lon_data)
weather_data <- left_join(weather_data, zip_lat_lon_data, by=c("LATITUDE", "LONGITUDE"))
weather_data <- select(weather_data, -V1)
#NAs in ZIPCODE. Possibly latitude and longitude not exactly same. Trying to populate based on min distance.
a1 <- weather_data$LATITUDE[is.na(weather_data$ZIPCODE)]
a2 <- weather_data$LONGITUDE[is.na(weather_data$ZIPCODE)]
a3 <- data.frame(a1,a2)
a3 <- a3[!duplicated(a3),]
a3 <- a3[c(2,1)]
a3 <- na.omit(a3)
b1 <- select(zip_lat_lon_data, LATITUDE, LONGITUDE, ZIPCODE)
b2 <- as.data.frame(select(zip_lat_lon_data, LATITUDE, LONGITUDE, ZIPCODE))
b2 <- b2[c(2,1,3)]
install.packages('geosphere')
library("geosphere")
#Code to add zipcode in intermediate a3 data.frame that had NA's. To be merged to data1.
for (index1 in seq(1,nrow(a3))) {
b <- c()
for (index2 in seq(1,nrow(b2))) {
a <- distGeo(a3[index1,], b2[index2, 1:2])
b <- append(a, b)
}
a3[index1,3] <- b2[which.min(b),3]
}
colnames(a3)[1] <- "LONGITUDE"
colnames(a3)[2] <- "LATITUDE"
colnames(a3)[3] <- "ZIPCODE"
a3 <- a3[c(2,1,3)]
#Merge to master dataset
cond <- (is.na(weather_data$ZIPCODE))&(!is.na(weather_data$LATITUDE))
#Remove zipcode na's from
weather_data3 <- select(weather_data[cond], -ZIPCODE)
weather_data3 <- left_join(weather_data3,a3, by=c("LATITUDE", "LONGITUDE"),copy=TRUE)
weather_data4 <- weather_data[!cond]
weather_data <- rbind(weather_data3, weather_data4)
setnames(weather_data, "LONGITUDE", "W_LONGITUDE")
setnames(weather_data, "LATITUDE", "W_LATITUDE")
setnames(weather_data, "STATION_NAME", "W_STATION_NAME")
weather_data <- select(weather_data, c(DATE, ZIPCODE, PRCP, SNOW, TMIN, TMAX))
#Convert to Celsius
weather_data$TMIN <- (weather_data$TMIN/10 - 32)*0.5556
weather_data$TMAX <- (weather_data$TMAX/10 - 32)*0.5556
#Final merge weather data to master
inters_zipcodes <- intersect(unique(data1$ZIPCODE), unique(weather_data5$ZIPCODE))
data1a <- data1[which(data1$ZIPCODE==inters_zipcodes)]
data1b <- data1[!which(data1$ZIPCODE==inters_zipcodes)]
#Initialise columns for rbind
data1b$PRCP <- NA
data1b$SNOW <- NA
data1b$TMIN <- NA
data1b$TMAX <- NA
#Remove duplicate zipcode-date entries. These are valid as they are from different stations
weather_data <- weather_data[!duplicated(weather_data, by=c("DATE","ZIPCODE"))]
data1a <- left_join(data1a, weather_data, by=c("DATE", "ZIPCODE"))
#Final
data1 <- rbind(data1a, data1b)
#Import nationalities data scraped from zipatlas
nationalities <- c('arab', 'asian','black','chinese','czech','danish','dutch','english','filipino',
'french','german','greek','hispanic','indian','irish','italian','japanese','korean','lithuanian',
'mexican','native','norwegian','polish','portuguese','russian','scottish','slovak','swedish','swiss',
'welsh','west','white')
sub_nationalities <- nationalities[c(1:4, 7,8, 10:17, 20,21,23, 25,26,32)]
data1 <- data1[,c(1:22, 46:51)]
for (nation in sub_nationalities){
assign(nation, fread(paste('./data/processed/',
paste(nation,".csv", sep=""), sep=""),
header = T, sep = ',', verbose=TRUE))
n = get(nation)
setnames(n, colnames(n)[3], "ZIPCODE")
setnames(n, colnames(n)[7], paste("Perc_",nation,sep=""))
setnames(n, colnames(n)[8], paste("NRank_",nation, sep=""))
n <- n[,c(3,7,8), with=FALSE]
n <- n[(n$ZIPCODE %in% unique(data9$ZIPCODE)),]
n <- as.data.table(sapply(n,gsub,pattern="[,#%]",replacement=""))
n <- as.data.table(sapply(n, function(x) as.numeric(as.character(x))))
assign(nation, n)
data1 <- left_join(data1, get(nation), by="ZIPCODE")
}
#Normalise liquor quantities by population
data1$BOTTLE_QTY_NORM <- NA
data1$TOTAL_NORM <- NA
data1$TOTAL_EST_NORM <- NA
data1$`BOTTLE_QTY_NORM` <- data1$'BOTTLE QTY'/data1$POPULATION
data1$TOTAL_NORM <- data1$TOTAL/data1$POPULATION
data1$TOTAL_EST_NORM <- data1$TOTAL_EST/data1$POPULATION
|
/project/R/EDA_project_analysis.R
|
no_license
|
alejio/Udacity-EDA-draft
|
R
| false
| false
| 7,020
|
r
|
library(data.table)
data1 <- fread('./data/raw/Iowa_Liquor_Sales.csv', header = T, sep = ',', verbose=TRUE)
data1$DATE <- as.Date(data1$DATE, "%m/%d/%Y")
data1$ZIPCODE <- factor(data1$ZIPCODE, ordered=F)
data1$`STATE BTL COST` <- sapply(strsplit(data1$`STATE BTL COST`, split='$', fixed=TRUE), function(x) (x[2]))
data1$`STATE BTL COST` <- as.numeric(data1$`STATE BTL COST`)
data1$`BTL PRICE` <- sapply(strsplit(data1$`BTL PRICE`, split='$', fixed=TRUE), function(x) (x[2]))
data1$`BTL PRICE` <- as.numeric(data1$`BTL PRICE`)
data1$`TOTAL` <- sapply(strsplit(data1$`TOTAL`, split='$', fixed=TRUE), function(x) (x[2]))
data1$`TOTAL` <- as.numeric(data1$`TOTAL`)
data1 <- subset(data1, DATE<"2015-01-01")
##Insert establishment data
est_data <- fread('./data/raw/iowa_food_drink.csv', header = T, sep = ',', verbose=TRUE)
#Start merging
data_zipcodes <- unique(data1$ZIPCODE)
#Rogue zipcodes are 97 (sioux city, Morningside avenue) and NA (Dunlap). Dunlap is 61525 and Sioux city is 51106 (googled it)
summary(data_zipcodes)
min(na.omit(data_zipcodes))
max(na.omit(data_zipcodes))
sum(is.na(data1$ZIPCODE))
data1$CITY[is.na(data1$ZIPCODE)]
data1$ZIPCODE[is.na(data1$ZIPCODE)] <- 61525
data1$CITY[data1$ZIPCODE==97]
data1$ZIPCODE[data1$ZIPCODE==97] <- 51106
install.packages("tidyr")
library(tidyr)
library(dplyr)
#Drop columns
est_data2 <- select(as.data.frame(est_data), -GEOGRAPHY, -NAICS2007)
#Remove duplicates
est_data2 <- est_data2[!duplicated(est_data2),]
#Transform dataset
library(tidyr)
est_data3 <- spread(est_data2, NAICS2007_MEANING, ESTAB)
#Set missing to zero
names(data1)[23:45]
est_data3[is.na(est_data3)] <- 0
est_data3 <- as.data.table(est_data3)
#Join
data1 <- left_join(data1, est_data3, by = "ZIPCODE")
summary(data1)
#NA's present because zipcode was not in est_data3
data1 <- na.omit(data1)
summary(data1)
#Calculate total establishments
data1$TOTAL_EST <- rowSums(data1[seq(0,nrow(data1)), 23:45,with=FALSE])
summary(data1)
##Insert population data
pop_data <- fread('./data/raw/Iowa_population.csv', header = T, sep = ',', verbose=TRUE)
str(pop_data)
#Merge population data with base
data1 <- left_join(data1, pop_data, by = "ZIPCODE")
#Write for python use
write.csv2(unique(data1$ZIPCODE), file="zipcodes.csv", sep=",")
##Insert weather data
weather_data <- fread('./data/raw/Iowa_weather.csv', header = T, sep = ',', verbose=TRUE)
str(weather_data)
summary(weather_data)
weather_data <- select(weather_data, -STATION, -TSUN, -AWND, -WT09, -WT01, -WT06, -WT05, -WT02, -WT11, -WT04, -WT08, -WT03)
weather_data$ELEVATION <- as.numeric(weather_data$ELEVATION)
weather_data$LATITUDE <- as.numeric(weather_data$LATITUDE)
weather_data$LONGITUDE <- as.numeric(weather_data$LONGITUDE)
#lct <- Sys.getlocale("LC_TIME"); Sys.setlocale("LC_TIME", "C")
weather_data$DATE <- as.Date(as.character(weather_data$DATE), "%Y%m%d")
#Celsius convert. range of T is -200 to 200; something wrong
#weather_data$TMIN <- (weather_data$TMIN-32)*5/9
#Insert zipcode and lat/lon data
zip_lat_lon_data <- fread('./data/processed/ZIPCODES_LATLON.csv', header = T, sep = ',', verbose=TRUE)
str(zip_lat_lon_data)
weather_data <- left_join(weather_data, zip_lat_lon_data, by=c("LATITUDE", "LONGITUDE"))
weather_data <- select(weather_data, -V1)
#NAs in ZIPCODE. Possibly latitude and longitude not exactly same. Trying to populate based on min distance.
a1 <- weather_data$LATITUDE[is.na(weather_data$ZIPCODE)]
a2 <- weather_data$LONGITUDE[is.na(weather_data$ZIPCODE)]
a3 <- data.frame(a1,a2)
a3 <- a3[!duplicated(a3),]
a3 <- a3[c(2,1)]
a3 <- na.omit(a3)
b1 <- select(zip_lat_lon_data, LATITUDE, LONGITUDE, ZIPCODE)
b2 <- as.data.frame(select(zip_lat_lon_data, LATITUDE, LONGITUDE, ZIPCODE))
b2 <- b2[c(2,1,3)]
install.packages('geosphere')
library("geosphere")
#Code to add zipcode in intermediate a3 data.frame that had NA's. To be merged to data1.
for (index1 in seq(1,nrow(a3))) {
b <- c()
for (index2 in seq(1,nrow(b2))) {
a <- distGeo(a3[index1,], b2[index2, 1:2])
b <- append(a, b)
}
a3[index1,3] <- b2[which.min(b),3]
}
colnames(a3)[1] <- "LONGITUDE"
colnames(a3)[2] <- "LATITUDE"
colnames(a3)[3] <- "ZIPCODE"
a3 <- a3[c(2,1,3)]
#Merge to master dataset
cond <- (is.na(weather_data$ZIPCODE))&(!is.na(weather_data$LATITUDE))
#Remove zipcode na's from
weather_data3 <- select(weather_data[cond], -ZIPCODE)
weather_data3 <- left_join(weather_data3,a3, by=c("LATITUDE", "LONGITUDE"),copy=TRUE)
weather_data4 <- weather_data[!cond]
weather_data <- rbind(weather_data3, weather_data4)
setnames(weather_data, "LONGITUDE", "W_LONGITUDE")
setnames(weather_data, "LATITUDE", "W_LATITUDE")
setnames(weather_data, "STATION_NAME", "W_STATION_NAME")
weather_data <- select(weather_data, c(DATE, ZIPCODE, PRCP, SNOW, TMIN, TMAX))
#Convert to Celsius
weather_data$TMIN <- (weather_data$TMIN/10 - 32)*0.5556
weather_data$TMAX <- (weather_data$TMAX/10 - 32)*0.5556
#Final merge weather data to master
inters_zipcodes <- intersect(unique(data1$ZIPCODE), unique(weather_data5$ZIPCODE))
data1a <- data1[which(data1$ZIPCODE==inters_zipcodes)]
data1b <- data1[!which(data1$ZIPCODE==inters_zipcodes)]
#Initialise columns for rbind
data1b$PRCP <- NA
data1b$SNOW <- NA
data1b$TMIN <- NA
data1b$TMAX <- NA
#Remove duplicate zipcode-date entries. These are valid as they are from different stations
weather_data <- weather_data[!duplicated(weather_data, by=c("DATE","ZIPCODE"))]
data1a <- left_join(data1a, weather_data, by=c("DATE", "ZIPCODE"))
#Final
data1 <- rbind(data1a, data1b)
#Import nationalities data scraped from zipatlas
nationalities <- c('arab', 'asian','black','chinese','czech','danish','dutch','english','filipino',
'french','german','greek','hispanic','indian','irish','italian','japanese','korean','lithuanian',
'mexican','native','norwegian','polish','portuguese','russian','scottish','slovak','swedish','swiss',
'welsh','west','white')
sub_nationalities <- nationalities[c(1:4, 7,8, 10:17, 20,21,23, 25,26,32)]
data1 <- data1[,c(1:22, 46:51)]
for (nation in sub_nationalities){
assign(nation, fread(paste('./data/processed/',
paste(nation,".csv", sep=""), sep=""),
header = T, sep = ',', verbose=TRUE))
n = get(nation)
setnames(n, colnames(n)[3], "ZIPCODE")
setnames(n, colnames(n)[7], paste("Perc_",nation,sep=""))
setnames(n, colnames(n)[8], paste("NRank_",nation, sep=""))
n <- n[,c(3,7,8), with=FALSE]
n <- n[(n$ZIPCODE %in% unique(data9$ZIPCODE)),]
n <- as.data.table(sapply(n,gsub,pattern="[,#%]",replacement=""))
n <- as.data.table(sapply(n, function(x) as.numeric(as.character(x))))
assign(nation, n)
data1 <- left_join(data1, get(nation), by="ZIPCODE")
}
#Normalise liquor quantities by population
data1$BOTTLE_QTY_NORM <- NA
data1$TOTAL_NORM <- NA
data1$TOTAL_EST_NORM <- NA
data1$`BOTTLE_QTY_NORM` <- data1$'BOTTLE QTY'/data1$POPULATION
data1$TOTAL_NORM <- data1$TOTAL/data1$POPULATION
data1$TOTAL_EST_NORM <- data1$TOTAL_EST/data1$POPULATION
|
setwd('/Users/peiboxu/Desktop/merge-seq analysis/')
library(tidyverse)
library(ggpubr)
### ### ### ### ### ###
### start from here ###
### ### ### ### ### ###
valid_cells_names=c('AI_valid','DMS_valid','MD_valid','BLA_valid','LH_valid')
#exn_meta=read.csv('exn_meta_valid.csv',row.names = 1)
exn_meta=exn_meta %>% mutate(cluster = factor(cluster,
levels = c('L2/3-Calb1','L2/3-Rorb',
'L5-Bcl6','L5-Htr2c',
'L5-S100b','L6-Npy','L6-Syt6')))
mouse = rep(c('unsort_1-3','sort_4-6'), c(7765,1603))
exn_meta$mouse=mouse
sub_exn_palette=c('#7fc97f', '#beaed4', '#fdc086', '#386cb0', '#f0027f', '#bf5b17',
'#666666')
### plot one by one due to color palette unmatches ###
i=1 # AI
df= exn_meta %>% select(valid_cells_names[i],cluster,mouse) %>%
filter(.data[[valid_cells_names[[i]]]]==valid_cells_names[[i]]) %>%
group_by(mouse,cluster) %>% summarise(., count = n()) %>%
mutate(ratio=count/sum(count)) %>% mutate(ratio=round(ratio,3))
df
p=ggbarplot(df, x = "mouse", y = "ratio",
color = "cluster", fill = "cluster",
palette = sub_exn_palette,
label = TRUE, lab.pos = "in", lab.col = "black",
repel=T,lab.size = 7,
xlab ="",
ylab = 'Ratio', ggtheme=theme_pubclean()) +
font("xlab", size = 20,face = "bold") +
font("ylab", size = 20,face = "bold") +
font("xy.text", size = 20,face = "bold") +
font("legend.title",size = 20,face = "bold") +
font("legend.text",face = "bold",size = 20)
AI_comparison=compare_means(ratio ~ mouse, data = df,
group.by = "cluster")
AI_comparison
write.csv(AI_comparison,file='AI_comparison.csv')
p
ggsave('AI_projection_motif_by_mouse.pdf',height = 5,width = 3)
####
i=2 # DMS
df= exn_meta %>% select(valid_cells_names[i],cluster,mouse) %>%
filter(.data[[valid_cells_names[[i]]]]==valid_cells_names[[i]]) %>%
group_by(mouse,cluster) %>% summarise(., count = n()) %>%
mutate(ratio=count/sum(count)) %>% mutate(ratio=round(ratio,3))
df
p=ggbarplot(df, x = "mouse", y = "ratio",
color = "cluster", fill = "cluster",
palette = sub_exn_palette,
label = TRUE, lab.pos = "in", lab.col = "black",
repel=T,lab.size = 7,
xlab ="",
ylab = 'Ratio', ggtheme=theme_pubclean()) +
font("xlab", size = 20,face = "bold") +
font("ylab", size = 20,face = "bold") +
font("xy.text", size = 20,face = "bold") +
font("legend.title",size = 20,face = "bold") +
font("legend.text",face = "bold",size = 20)
p
DMS_comparison=compare_means(ratio ~ mouse, data = df,
group.by = "cluster")
DMS_comparison
write.csv(DMS_comparison,file='DMS_comparison.csv')
ggsave('DMS_projection_motif_by_mouse.pdf',height = 5,width = 3)
####
i=3 # MD
df= exn_meta %>% select(valid_cells_names[i],cluster,mouse) %>%
filter(.data[[valid_cells_names[[i]]]]==valid_cells_names[[i]]) %>%
group_by(mouse,cluster) %>% summarise(., count = n()) %>%
mutate(ratio=count/sum(count)) %>% mutate(ratio=round(ratio,3))
df
sub_exn_palette=c('#7fc97f', '#beaed4', '#fdc086', '#f0027f', '#bf5b17',
'#666666')
p=ggbarplot(df, x = "mouse", y = "ratio",
color = "cluster", fill = "cluster",
palette = sub_exn_palette,
label = TRUE, lab.pos = "in", lab.col = "black",
repel=T,lab.size = 7,
xlab ="",
ylab = 'Ratio', ggtheme=theme_pubclean()) +
font("xlab", size = 20,face = "bold") +
font("ylab", size = 20,face = "bold") +
font("xy.text", size = 20,face = "bold") +
font("legend.title",size = 20,face = "bold") +
font("legend.text",face = "bold",size = 20)
p
MD_comparison=compare_means(ratio ~ mouse, data = df,
group.by = "cluster")
MD_comparison
write.csv(MD_comparison,file='MD_comparison.csv')
ggsave('MD_projection_motif_by_mouse.pdf',height = 5,width = 3)
####
i=4 #BLA
df= exn_meta %>% select(valid_cells_names[i],cluster,mouse) %>%
filter(.data[[valid_cells_names[[i]]]]==valid_cells_names[[i]]) %>%
group_by(mouse,cluster) %>% summarise(., count = n()) %>%
mutate(ratio=count/sum(count)) %>% mutate(ratio=round(ratio,3))
df
sub_exn_palette=c('#7fc97f', '#beaed4', '#fdc086', '#f0027f', '#bf5b17',
'#666666')
p=ggbarplot(df, x = "mouse", y = "ratio",
color = "cluster", fill = "cluster",
palette = sub_exn_palette,
label = TRUE, lab.pos = "in", lab.col = "black",
repel=T,lab.size = 7,
xlab ="",
ylab = 'Ratio', ggtheme=theme_pubclean()) +
font("xlab", size = 20,face = "bold") +
font("ylab", size = 20,face = "bold") +
font("xy.text", size = 20,face = "bold") +
font("legend.title",size = 20,face = "bold") +
font("legend.text",face = "bold",size = 20)
p
BLA_comparison=compare_means(ratio ~ mouse, data = df,
group.by = "cluster")
BLA_comparison
write.csv(BLA_comparison,file='BLA_comparison.csv')
ggsave('BLA_projection_motif_by_mouse.pdf',height = 5,width = 3)
####
i=5 #LH
df= exn_meta %>% select(valid_cells_names[i],cluster,mouse) %>%
filter(.data[[valid_cells_names[[i]]]]==valid_cells_names[[i]]) %>%
group_by(mouse,cluster) %>% summarise(., count = n()) %>%
mutate(ratio=count/sum(count)) %>% mutate(ratio=round(ratio,3))
df
sub_exn_palette=c('#7fc97f', '#beaed4', '#fdc086', '#f0027f', '#bf5b17',
'#666666')
p=ggbarplot(df, x = "mouse", y = "ratio",
color = "cluster", fill = "cluster",
palette = sub_exn_palette,
label = TRUE, lab.pos = "in", lab.col = "black",
repel=T,lab.size = 7,
xlab ="",
ylab = 'Ratio', ggtheme=theme_pubclean()) +
font("xlab", size = 20,face = "bold") +
font("ylab", size = 20,face = "bold") +
font("xy.text", size = 20,face = "bold") +
font("legend.title",size = 20,face = "bold") +
font("legend.text",face = "bold",size = 20)
LH_comparison=compare_means(ratio ~ mouse, data = df,
group.by = "cluster")
LH_comparison
write.csv(LH_comparison,file='LH_comparison.csv')
p
ggsave('LH_projection_motif_by_mouse.pdf',height = 5,width = 3)
|
/figure2&S2/figureS2F/projection_motif_by_4_mice.R
|
no_license
|
MichaelPeibo/MERGE-seq-analysis
|
R
| false
| false
| 6,587
|
r
|
setwd('/Users/peiboxu/Desktop/merge-seq analysis/')
library(tidyverse)
library(ggpubr)
### ### ### ### ### ###
### start from here ###
### ### ### ### ### ###
valid_cells_names=c('AI_valid','DMS_valid','MD_valid','BLA_valid','LH_valid')
#exn_meta=read.csv('exn_meta_valid.csv',row.names = 1)
exn_meta=exn_meta %>% mutate(cluster = factor(cluster,
levels = c('L2/3-Calb1','L2/3-Rorb',
'L5-Bcl6','L5-Htr2c',
'L5-S100b','L6-Npy','L6-Syt6')))
mouse = rep(c('unsort_1-3','sort_4-6'), c(7765,1603))
exn_meta$mouse=mouse
sub_exn_palette=c('#7fc97f', '#beaed4', '#fdc086', '#386cb0', '#f0027f', '#bf5b17',
'#666666')
### plot one by one due to color palette unmatches ###
i=1 # AI
df= exn_meta %>% select(valid_cells_names[i],cluster,mouse) %>%
filter(.data[[valid_cells_names[[i]]]]==valid_cells_names[[i]]) %>%
group_by(mouse,cluster) %>% summarise(., count = n()) %>%
mutate(ratio=count/sum(count)) %>% mutate(ratio=round(ratio,3))
df
p=ggbarplot(df, x = "mouse", y = "ratio",
color = "cluster", fill = "cluster",
palette = sub_exn_palette,
label = TRUE, lab.pos = "in", lab.col = "black",
repel=T,lab.size = 7,
xlab ="",
ylab = 'Ratio', ggtheme=theme_pubclean()) +
font("xlab", size = 20,face = "bold") +
font("ylab", size = 20,face = "bold") +
font("xy.text", size = 20,face = "bold") +
font("legend.title",size = 20,face = "bold") +
font("legend.text",face = "bold",size = 20)
AI_comparison=compare_means(ratio ~ mouse, data = df,
group.by = "cluster")
AI_comparison
write.csv(AI_comparison,file='AI_comparison.csv')
p
ggsave('AI_projection_motif_by_mouse.pdf',height = 5,width = 3)
####
i=2 # DMS
df= exn_meta %>% select(valid_cells_names[i],cluster,mouse) %>%
filter(.data[[valid_cells_names[[i]]]]==valid_cells_names[[i]]) %>%
group_by(mouse,cluster) %>% summarise(., count = n()) %>%
mutate(ratio=count/sum(count)) %>% mutate(ratio=round(ratio,3))
df
p=ggbarplot(df, x = "mouse", y = "ratio",
color = "cluster", fill = "cluster",
palette = sub_exn_palette,
label = TRUE, lab.pos = "in", lab.col = "black",
repel=T,lab.size = 7,
xlab ="",
ylab = 'Ratio', ggtheme=theme_pubclean()) +
font("xlab", size = 20,face = "bold") +
font("ylab", size = 20,face = "bold") +
font("xy.text", size = 20,face = "bold") +
font("legend.title",size = 20,face = "bold") +
font("legend.text",face = "bold",size = 20)
p
DMS_comparison=compare_means(ratio ~ mouse, data = df,
group.by = "cluster")
DMS_comparison
write.csv(DMS_comparison,file='DMS_comparison.csv')
ggsave('DMS_projection_motif_by_mouse.pdf',height = 5,width = 3)
####
i=3 # MD
df= exn_meta %>% select(valid_cells_names[i],cluster,mouse) %>%
filter(.data[[valid_cells_names[[i]]]]==valid_cells_names[[i]]) %>%
group_by(mouse,cluster) %>% summarise(., count = n()) %>%
mutate(ratio=count/sum(count)) %>% mutate(ratio=round(ratio,3))
df
sub_exn_palette=c('#7fc97f', '#beaed4', '#fdc086', '#f0027f', '#bf5b17',
'#666666')
p=ggbarplot(df, x = "mouse", y = "ratio",
color = "cluster", fill = "cluster",
palette = sub_exn_palette,
label = TRUE, lab.pos = "in", lab.col = "black",
repel=T,lab.size = 7,
xlab ="",
ylab = 'Ratio', ggtheme=theme_pubclean()) +
font("xlab", size = 20,face = "bold") +
font("ylab", size = 20,face = "bold") +
font("xy.text", size = 20,face = "bold") +
font("legend.title",size = 20,face = "bold") +
font("legend.text",face = "bold",size = 20)
p
MD_comparison=compare_means(ratio ~ mouse, data = df,
group.by = "cluster")
MD_comparison
write.csv(MD_comparison,file='MD_comparison.csv')
ggsave('MD_projection_motif_by_mouse.pdf',height = 5,width = 3)
####
i=4 #BLA
df= exn_meta %>% select(valid_cells_names[i],cluster,mouse) %>%
filter(.data[[valid_cells_names[[i]]]]==valid_cells_names[[i]]) %>%
group_by(mouse,cluster) %>% summarise(., count = n()) %>%
mutate(ratio=count/sum(count)) %>% mutate(ratio=round(ratio,3))
df
sub_exn_palette=c('#7fc97f', '#beaed4', '#fdc086', '#f0027f', '#bf5b17',
'#666666')
p=ggbarplot(df, x = "mouse", y = "ratio",
color = "cluster", fill = "cluster",
palette = sub_exn_palette,
label = TRUE, lab.pos = "in", lab.col = "black",
repel=T,lab.size = 7,
xlab ="",
ylab = 'Ratio', ggtheme=theme_pubclean()) +
font("xlab", size = 20,face = "bold") +
font("ylab", size = 20,face = "bold") +
font("xy.text", size = 20,face = "bold") +
font("legend.title",size = 20,face = "bold") +
font("legend.text",face = "bold",size = 20)
p
BLA_comparison=compare_means(ratio ~ mouse, data = df,
group.by = "cluster")
BLA_comparison
write.csv(BLA_comparison,file='BLA_comparison.csv')
ggsave('BLA_projection_motif_by_mouse.pdf',height = 5,width = 3)
####
i=5 #LH
df= exn_meta %>% select(valid_cells_names[i],cluster,mouse) %>%
filter(.data[[valid_cells_names[[i]]]]==valid_cells_names[[i]]) %>%
group_by(mouse,cluster) %>% summarise(., count = n()) %>%
mutate(ratio=count/sum(count)) %>% mutate(ratio=round(ratio,3))
df
sub_exn_palette=c('#7fc97f', '#beaed4', '#fdc086', '#f0027f', '#bf5b17',
'#666666')
p=ggbarplot(df, x = "mouse", y = "ratio",
color = "cluster", fill = "cluster",
palette = sub_exn_palette,
label = TRUE, lab.pos = "in", lab.col = "black",
repel=T,lab.size = 7,
xlab ="",
ylab = 'Ratio', ggtheme=theme_pubclean()) +
font("xlab", size = 20,face = "bold") +
font("ylab", size = 20,face = "bold") +
font("xy.text", size = 20,face = "bold") +
font("legend.title",size = 20,face = "bold") +
font("legend.text",face = "bold",size = 20)
LH_comparison=compare_means(ratio ~ mouse, data = df,
group.by = "cluster")
LH_comparison
write.csv(LH_comparison,file='LH_comparison.csv')
p
ggsave('LH_projection_motif_by_mouse.pdf',height = 5,width = 3)
|
library(shiny)
library(ggplot2)
library(nycflights13)
library(data.table)
flights$date<-as.Date(paste(flights$year, flights$month, flights$day, sep = '-'))
flightsDT <- data.table(flights)
shinyServer(function(input, output) {
#flghts<-flights[flights$month>=input$startmonth,]
output$ggplot <- renderPlot({
ggplot(flights, aes_string(x = input$var1, y = input$var2, col = as.factor(input$col))) + geom_point() +
geom_smooth(method = 'lm', formula = y ~ poly(x, as.numeric(input$poly)), se = FALSE,col="blue")
})
output$model <- renderPrint({
fit <- lm(flights[, input$var2] ~ poly(flights[, input$var1], input$poly),na.rm=TRUE)
summary(fit)
})
output$coeff <- renderTable({
fit <- lm(flights[, input$var2] ~ poly(flights[, input$var1], input$poly))
summary(fit)$coeff
})
})
|
/Server.R
|
no_license
|
Zsopi/shiny
|
R
| false
| false
| 1,093
|
r
|
library(shiny)
library(ggplot2)
library(nycflights13)
library(data.table)
flights$date<-as.Date(paste(flights$year, flights$month, flights$day, sep = '-'))
flightsDT <- data.table(flights)
shinyServer(function(input, output) {
#flghts<-flights[flights$month>=input$startmonth,]
output$ggplot <- renderPlot({
ggplot(flights, aes_string(x = input$var1, y = input$var2, col = as.factor(input$col))) + geom_point() +
geom_smooth(method = 'lm', formula = y ~ poly(x, as.numeric(input$poly)), se = FALSE,col="blue")
})
output$model <- renderPrint({
fit <- lm(flights[, input$var2] ~ poly(flights[, input$var1], input$poly),na.rm=TRUE)
summary(fit)
})
output$coeff <- renderTable({
fit <- lm(flights[, input$var2] ~ poly(flights[, input$var1], input$poly))
summary(fit)$coeff
})
})
|
#data_raw is the data frame including information that should be in one row being split up
#into two rows. In this example, a student needed the number of several events per soccer
#game (e.g. shots or passes) but collected them per team in different rows. For sure, a
#problem which can happen with different types of data and in many research areas.
#Now, let’s assume that it would not be able to identify unique observations based on one
#item, which actually was the case for this example. The student had one item called
#“match-up”, but as the data.frame included several seasons, it could occur several times.
#Thus, here an example if you have to identify unique observations using two variables:
#Create the new data frame and the helper (k) for the row. More information about this later
Observations <- data.frame()
k = 0
#Use the variable with less unique values as first criteria and start a loop.
#I call this variable the SR_Variable (for smaller range), the other variable BG_Variable
Cases_SR_Variable <- unique(data_raw$Saison)
end_loop <- length(Cases_SR_Variable)
for (i in 1:end_loop) {
#Select all the IDs that have the respective value in the SR Variable. Basically, I extract IDs for a subset
SR_Variable_IDs <-
as.integer(rownames(subset(
data_raw, data_raw[, 1] == Cases_SR_Variable[i]
)))
#Create a Vector including all unique values of the BG_Variable for this subset IDs
VectorObservations <- data_raw[SR_Variable_IDs, 2]
#Now, I create a list with each object being one occurence of an unique observation.
#Each object again consists of the IDs for this unique observation. In this case of two IDs.
BR_Variable_ID_list <-
tapply(seq_along(VectorObservations),
VectorObservations,
identity)[unique(VectorObservations)]
#Now I loop through this list
for (j in 1:length(BR_Variable_ID_list)) {
#Extract the ids for the object (unique observation)
id1 = SR_Variable_IDs[BR_Variable_ID_list[[j]][1]]
id2 = SR_Variable_IDs[BR_Variable_ID_list[[j]][2]]
#I will just give two examples of combining data for one observation.
#Example: Add up the values for the observation and write it down in the Obseravtions data frame. E.g. for columns 3:10
Observations[j + k, 3:10] = data_raw[id1, 3:10] + data_raw[id2, 3:10]
#Example: If information is equal for both rows of the observation. E.g. column 11
Observations[j + k, 11] = data_raw[id1, 11]
}
#Note: Why j+k? j would only be enough if we can identify unique observations based on one variable
#(and therefore in one loop)
#This is why, we need k and manipulate it here
k = k + length(BR_Variable_ID_list)
}
|
/combine_rows_for_split_up_observations.R
|
no_license
|
PostdOK/data_cleaning_helpers_R
|
R
| false
| false
| 2,688
|
r
|
#data_raw is the data frame including information that should be in one row being split up
#into two rows. In this example, a student needed the number of several events per soccer
#game (e.g. shots or passes) but collected them per team in different rows. For sure, a
#problem which can happen with different types of data and in many research areas.
#Now, let’s assume that it would not be able to identify unique observations based on one
#item, which actually was the case for this example. The student had one item called
#“match-up”, but as the data.frame included several seasons, it could occur several times.
#Thus, here an example if you have to identify unique observations using two variables:
#Create the new data frame and the helper (k) for the row. More information about this later
Observations <- data.frame()
k = 0
#Use the variable with less unique values as first criteria and start a loop.
#I call this variable the SR_Variable (for smaller range), the other variable BG_Variable
Cases_SR_Variable <- unique(data_raw$Saison)
end_loop <- length(Cases_SR_Variable)
for (i in 1:end_loop) {
#Select all the IDs that have the respective value in the SR Variable. Basically, I extract IDs for a subset
SR_Variable_IDs <-
as.integer(rownames(subset(
data_raw, data_raw[, 1] == Cases_SR_Variable[i]
)))
#Create a Vector including all unique values of the BG_Variable for this subset IDs
VectorObservations <- data_raw[SR_Variable_IDs, 2]
#Now, I create a list with each object being one occurence of an unique observation.
#Each object again consists of the IDs for this unique observation. In this case of two IDs.
BR_Variable_ID_list <-
tapply(seq_along(VectorObservations),
VectorObservations,
identity)[unique(VectorObservations)]
#Now I loop through this list
for (j in 1:length(BR_Variable_ID_list)) {
#Extract the ids for the object (unique observation)
id1 = SR_Variable_IDs[BR_Variable_ID_list[[j]][1]]
id2 = SR_Variable_IDs[BR_Variable_ID_list[[j]][2]]
#I will just give two examples of combining data for one observation.
#Example: Add up the values for the observation and write it down in the Obseravtions data frame. E.g. for columns 3:10
Observations[j + k, 3:10] = data_raw[id1, 3:10] + data_raw[id2, 3:10]
#Example: If information is equal for both rows of the observation. E.g. column 11
Observations[j + k, 11] = data_raw[id1, 11]
}
#Note: Why j+k? j would only be enough if we can identify unique observations based on one variable
#(and therefore in one loop)
#This is why, we need k and manipulate it here
k = k + length(BR_Variable_ID_list)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s3_operations.R
\name{s3_head_bucket}
\alias{s3_head_bucket}
\title{This operation is useful to determine if a bucket exists and you have
permission to access it}
\usage{
s3_head_bucket(Bucket)
}
\arguments{
\item{Bucket}{[required]}
}
\description{
This operation is useful to determine if a bucket exists and you have
permission to access it.
}
\section{Request syntax}{
\preformatted{svc$head_bucket(
Bucket = "string"
)
}
}
\examples{
# This operation checks to see if a bucket exists.
\donttest{svc$head_bucket(
Bucket = "acl1"
)}
}
\keyword{internal}
|
/paws/man/s3_head_bucket.Rd
|
permissive
|
peoplecure/paws
|
R
| false
| true
| 641
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s3_operations.R
\name{s3_head_bucket}
\alias{s3_head_bucket}
\title{This operation is useful to determine if a bucket exists and you have
permission to access it}
\usage{
s3_head_bucket(Bucket)
}
\arguments{
\item{Bucket}{[required]}
}
\description{
This operation is useful to determine if a bucket exists and you have
permission to access it.
}
\section{Request syntax}{
\preformatted{svc$head_bucket(
Bucket = "string"
)
}
}
\examples{
# This operation checks to see if a bucket exists.
\donttest{svc$head_bucket(
Bucket = "acl1"
)}
}
\keyword{internal}
|
##Download and unzip the data
###create the "data" directory
if (!dir.exists("./data")){
dir.create("./data")
}
###Download zip file
if (!file.exists("./data/householdpowerconsumption.zip")){
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, destfile = "./data/householdpowerconsumption.zip")
unzip("./data/householdpowerconsumption.zip", exdir = "./data")
}
###unzip the file
if (!file.exists("./data/household_power_consumption.txt")){
unzip("./data/householdpowerconsumption.zip", exdir = "./data")
}
##load data into memory
powercons <- read.table("./data/household_power_consumption.txt", header = TRUE,
sep = ";", na.strings = "?", colClasses = "character",
nrows = 70000, comment.char = "")
##convert date, time and numbers to appropriate classes
powercons$Date <- as.Date(powercons$Date, "%d/%m/%Y")
powercons$Time <- strptime(powercons$Time, "%H:%M:%S", tz = "America/Los_Angeles")
powercons[3:9] <- as.data.frame(sapply(powercons[3:9], as.numeric))
##Subset just 2007-02-01 to 2007-02-02
powerconsFeb <- subset(powercons, Date >= as.Date("2007-02-01") & Date <= as.Date("2007-02-02"))
######Till here are prepring data and will be the same for all charts####
##Firsts plot
png("./assignment1/plot1.png", type = "cairo")
hist(powerconsFeb$Global_active_power, col = "red",
xlab = "Global Active Power (kilowatts)", main = "")
title(main = "Global Active Power")
dev.off()
|
/plot1.R
|
no_license
|
srhumir/ExData_Plotting1
|
R
| false
| false
| 1,540
|
r
|
##Download and unzip the data
###create the "data" directory
if (!dir.exists("./data")){
dir.create("./data")
}
###Download zip file
if (!file.exists("./data/householdpowerconsumption.zip")){
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, destfile = "./data/householdpowerconsumption.zip")
unzip("./data/householdpowerconsumption.zip", exdir = "./data")
}
###unzip the file
if (!file.exists("./data/household_power_consumption.txt")){
unzip("./data/householdpowerconsumption.zip", exdir = "./data")
}
##load data into memory
powercons <- read.table("./data/household_power_consumption.txt", header = TRUE,
sep = ";", na.strings = "?", colClasses = "character",
nrows = 70000, comment.char = "")
##convert date, time and numbers to appropriate classes
powercons$Date <- as.Date(powercons$Date, "%d/%m/%Y")
powercons$Time <- strptime(powercons$Time, "%H:%M:%S", tz = "America/Los_Angeles")
powercons[3:9] <- as.data.frame(sapply(powercons[3:9], as.numeric))
##Subset just 2007-02-01 to 2007-02-02
powerconsFeb <- subset(powercons, Date >= as.Date("2007-02-01") & Date <= as.Date("2007-02-02"))
######Till here are prepring data and will be the same for all charts####
##Firsts plot
png("./assignment1/plot1.png", type = "cairo")
hist(powerconsFeb$Global_active_power, col = "red",
xlab = "Global Active Power (kilowatts)", main = "")
title(main = "Global Active Power")
dev.off()
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/ca-scoreFACT_V.R
\name{scoreFACT_V}
\alias{scoreFACT_V}
\title{Score the FACT-V}
\usage{
scoreFACT_V(df, updateItems = FALSE, keepNvalid = FALSE)
}
\arguments{
\item{df}{A data frame with the FACT-V items, appropriately-named.}
\item{updateItems}{Logical, if \code{TRUE} any original item that is
reverse coded for scoring will be replaced by its reverse coded version
in the returned data frame, and any values of 8 or 9 will be replaced
with NA. The default, \code{FALSE}, returns the original items
unmodified.}
\item{keepNvalid}{Logical, if \code{TRUE} the function
returns an additional variable for each of the returned scale scores
containing the number of valid, non-missing responses from each
respondent to the items on the given scale. If \code{FALSE} (the
default), these variables are omitted from the returned data frame.}
}
\value{
The original data frame is returned (optionally with modified
items if \code{updateItems = TRUE}) with new variables corresponding to
the scored scales. If \code{keepNvalid = TRUE}, for each scored scale an
additional variable is returned that contains the number of valid
responses each respondent made to the items making up the given scale.
These optional variables have names of the format \code{SCALENAME_N}.
The following scale scores are returned:
\describe{
\item{PWB}{Physical Well-Being subscale}
\item{SWB}{Social/Family Well-Being subscale}
\item{EWB}{Emotional Well-Being subscale}
\item{FWB}{Physical Well-Being subscale}
\item{FACTG}{FACT-G Total Score (i.e., PWB+SWB+EWB+FWB)}
\item{VCS}{Vulvar Cancer subscale}
\item{FACT_V_TOTAL}{FACT-V Total Score (i.e., PWB+SWB+EWB+FWB+VCS)}
\item{FACT_V_TOI}{FACT-V Trial Outcome Index (e.g., PWB+FWB+VCS)}
}
}
\description{
Generates all of the scores of the Functional Assessment of Cancer Therapy -
Vulvar Cancer (FACT-V, v4) from item responses.
}
\details{
Given a data frame that includes all of the FACT-V (Version 4) items as
variables, appropriately named, this function generates all of the FACT-V
scale scores. It is crucial that the item variables in the supplied data
frame are named according to FACT conventions. For example, the first
physical well-being item should be named GP1, the second GP2, and so on.
Please refer to the materials provided by \url{http://www.facit.org} for the
particular questionnaire you are using. In particular, refer to the left
margin of the official questionnaire (i.e., from facit.org) for the
appropriate item variable names.
}
\section{Note}{
Keep in mind that this function (and R in general) is case-sensitive.
All variables should be in numeric or integer format.
This scoring function expects missing item responses to be coded as NA,
8, or 9, and valid item responses to be coded as 0, 1, 2, 3, or 4. Any
other value for any of the items will result in an error message and no
scores.
Some item variables are reverse coded for the purpose of generating the
scale scores. The official (e.g., from \url{http://www.facit.org}) SAS
and SPSS scoring algorithms for this questionnaire automatically replace
the original items with their reverse-coded versions. This can be
confusing if you accidentally run the algorithm more than once on your
data. As its default, \code{scoreFACT_V} DOES NOT replace any of your
original item variables with the reverse coded versions. However, for
consistentcy with the behavior of the other versions on
\url{http://www.facit.org}, the \code{updateItems} argument is
provided. If set to \code{TRUE}, any item that is supposed to be
reverse coded will be replaced with its reversed version in the data
frame returned by \code{scoreFACT_V}.
}
\examples{
## Setting up item names for fake data
G_names <- c(paste0('GP', 1:7),
paste0('GS', 1:7),
paste0('GE', 1:6),
paste0('GF', 1:7))
AC_names <- c('V1', 'V2', 'Cx3', 'V3', 'Cx4', 'V4', 'Cx5', 'BL4', 'C7', 'Cx6', 'C6', 'BL1',
'V5', 'Cx7', 'V6', 'V7', 'V8', 'V9', 'HN1')
itemNames <- c(G_names, AC_names)
## Generating random item responses for 8 fake respondents
set.seed(6375309)
exampleDat <- t(replicate(8, sample(0:4, size = length(itemNames), replace = TRUE)))
## Making half of respondents missing about 10\% of items,
## half missing about 50\%.
miss10 <- t(replicate(4, sample(c(0, 9), prob = c(0.9, 0.1),
size = length(itemNames), replace = TRUE)))
miss50 <- t(replicate(4, sample(c(0, 9), prob = c(0.5, 0.5),
size = length(itemNames), replace = TRUE)))
missMtx <- rbind(miss10, miss50)
## Using 9 as the code for missing responses
exampleDat[missMtx == 9] <- 9
exampleDat <- as.data.frame(cbind(ID = paste0('ID', 1:8),
as.data.frame(exampleDat)))
names(exampleDat) <- c('ID', itemNames)
## Returns data frame with scale scores and with original items untouched
scoredDat <- scoreFACT_V(exampleDat)
names(scoredDat)
scoredDat
## Returns data frame with scale scores, with the appropriate items
## reverse scored, and with item values of 8 and 9 replaced with NA.
## Also illustrates the effect of setting keepNvalid = TRUE.
scoredDat <- scoreFACT_V(exampleDat, updateItems = TRUE, keepNvalid = TRUE)
names(scoredDat)
## Descriptives of scored scales
summary(scoredDat[, c('PWB', 'SWB', 'EWB', 'FWB', 'FACTG',
'VCS', 'FACT_V_TOTAL', 'FACT_V_TOI')])
}
\references{
FACT-V Scoring Guidelines, available at \url{http://www.facit.org}
}
|
/man/scoreFACT_V.Rd
|
no_license
|
cran/FACTscorer
|
R
| false
| false
| 5,607
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/ca-scoreFACT_V.R
\name{scoreFACT_V}
\alias{scoreFACT_V}
\title{Score the FACT-V}
\usage{
scoreFACT_V(df, updateItems = FALSE, keepNvalid = FALSE)
}
\arguments{
\item{df}{A data frame with the FACT-V items, appropriately-named.}
\item{updateItems}{Logical, if \code{TRUE} any original item that is
reverse coded for scoring will be replaced by its reverse coded version
in the returned data frame, and any values of 8 or 9 will be replaced
with NA. The default, \code{FALSE}, returns the original items
unmodified.}
\item{keepNvalid}{Logical, if \code{TRUE} the function
returns an additional variable for each of the returned scale scores
containing the number of valid, non-missing responses from each
respondent to the items on the given scale. If \code{FALSE} (the
default), these variables are omitted from the returned data frame.}
}
\value{
The original data frame is returned (optionally with modified
items if \code{updateItems = TRUE}) with new variables corresponding to
the scored scales. If \code{keepNvalid = TRUE}, for each scored scale an
additional variable is returned that contains the number of valid
responses each respondent made to the items making up the given scale.
These optional variables have names of the format \code{SCALENAME_N}.
The following scale scores are returned:
\describe{
\item{PWB}{Physical Well-Being subscale}
\item{SWB}{Social/Family Well-Being subscale}
\item{EWB}{Emotional Well-Being subscale}
\item{FWB}{Physical Well-Being subscale}
\item{FACTG}{FACT-G Total Score (i.e., PWB+SWB+EWB+FWB)}
\item{VCS}{Vulvar Cancer subscale}
\item{FACT_V_TOTAL}{FACT-V Total Score (i.e., PWB+SWB+EWB+FWB+VCS)}
\item{FACT_V_TOI}{FACT-V Trial Outcome Index (e.g., PWB+FWB+VCS)}
}
}
\description{
Generates all of the scores of the Functional Assessment of Cancer Therapy -
Vulvar Cancer (FACT-V, v4) from item responses.
}
\details{
Given a data frame that includes all of the FACT-V (Version 4) items as
variables, appropriately named, this function generates all of the FACT-V
scale scores. It is crucial that the item variables in the supplied data
frame are named according to FACT conventions. For example, the first
physical well-being item should be named GP1, the second GP2, and so on.
Please refer to the materials provided by \url{http://www.facit.org} for the
particular questionnaire you are using. In particular, refer to the left
margin of the official questionnaire (i.e., from facit.org) for the
appropriate item variable names.
}
\section{Note}{
Keep in mind that this function (and R in general) is case-sensitive.
All variables should be in numeric or integer format.
This scoring function expects missing item responses to be coded as NA,
8, or 9, and valid item responses to be coded as 0, 1, 2, 3, or 4. Any
other value for any of the items will result in an error message and no
scores.
Some item variables are reverse coded for the purpose of generating the
scale scores. The official (e.g., from \url{http://www.facit.org}) SAS
and SPSS scoring algorithms for this questionnaire automatically replace
the original items with their reverse-coded versions. This can be
confusing if you accidentally run the algorithm more than once on your
data. As its default, \code{scoreFACT_V} DOES NOT replace any of your
original item variables with the reverse coded versions. However, for
consistentcy with the behavior of the other versions on
\url{http://www.facit.org}, the \code{updateItems} argument is
provided. If set to \code{TRUE}, any item that is supposed to be
reverse coded will be replaced with its reversed version in the data
frame returned by \code{scoreFACT_V}.
}
\examples{
## Setting up item names for fake data
G_names <- c(paste0('GP', 1:7),
paste0('GS', 1:7),
paste0('GE', 1:6),
paste0('GF', 1:7))
AC_names <- c('V1', 'V2', 'Cx3', 'V3', 'Cx4', 'V4', 'Cx5', 'BL4', 'C7', 'Cx6', 'C6', 'BL1',
'V5', 'Cx7', 'V6', 'V7', 'V8', 'V9', 'HN1')
itemNames <- c(G_names, AC_names)
## Generating random item responses for 8 fake respondents
set.seed(6375309)
exampleDat <- t(replicate(8, sample(0:4, size = length(itemNames), replace = TRUE)))
## Making half of respondents missing about 10\% of items,
## half missing about 50\%.
miss10 <- t(replicate(4, sample(c(0, 9), prob = c(0.9, 0.1),
size = length(itemNames), replace = TRUE)))
miss50 <- t(replicate(4, sample(c(0, 9), prob = c(0.5, 0.5),
size = length(itemNames), replace = TRUE)))
missMtx <- rbind(miss10, miss50)
## Using 9 as the code for missing responses
exampleDat[missMtx == 9] <- 9
exampleDat <- as.data.frame(cbind(ID = paste0('ID', 1:8),
as.data.frame(exampleDat)))
names(exampleDat) <- c('ID', itemNames)
## Returns data frame with scale scores and with original items untouched
scoredDat <- scoreFACT_V(exampleDat)
names(scoredDat)
scoredDat
## Returns data frame with scale scores, with the appropriate items
## reverse scored, and with item values of 8 and 9 replaced with NA.
## Also illustrates the effect of setting keepNvalid = TRUE.
scoredDat <- scoreFACT_V(exampleDat, updateItems = TRUE, keepNvalid = TRUE)
names(scoredDat)
## Descriptives of scored scales
summary(scoredDat[, c('PWB', 'SWB', 'EWB', 'FWB', 'FACTG',
'VCS', 'FACT_V_TOTAL', 'FACT_V_TOI')])
}
\references{
FACT-V Scoring Guidelines, available at \url{http://www.facit.org}
}
|
df<-read.csv("temperatures.csv")
df
men<-df$temp[df$gender=="Male"]
women<-df$temp[df$gender=="Female"]
t.test(men,women,mu=0)
t.test(men-women,mu=0)
head(df)
hist(df$temp,breaks=50)
tapply(df$temp,df$gender,mean)
tapply(df$temp,df$gender,summary)
men<-df$temp[df$gender=="Male"]
women<-df$temp[df$gender=="Female"]
par(mfrow=c(2,1))
hist(men,breaks=30)
hist(women,breaks=30)
#boxplot(df$temp - df$gender)
t.test(men,conf.level=0.95)$conf.int
mean(men) #x = 98.10
sd(men) #s =.6987558
length(men)# n = 65(degrees of freedom = 64)
qqnorm(men)
t.test(men,mu=98.6)# mu is the hypothesis
#our sample data lines 5.7 standard dev below the hypothesisd mean
#p-value
t.test(men,mu=98.1)
#if p-value <0.05 alot, reject Ho
|
/csv/csvvvv/Hypothesis_Testing.R
|
no_license
|
Roger7410/R_Data_Mining
|
R
| false
| false
| 724
|
r
|
df<-read.csv("temperatures.csv")
df
men<-df$temp[df$gender=="Male"]
women<-df$temp[df$gender=="Female"]
t.test(men,women,mu=0)
t.test(men-women,mu=0)
head(df)
hist(df$temp,breaks=50)
tapply(df$temp,df$gender,mean)
tapply(df$temp,df$gender,summary)
men<-df$temp[df$gender=="Male"]
women<-df$temp[df$gender=="Female"]
par(mfrow=c(2,1))
hist(men,breaks=30)
hist(women,breaks=30)
#boxplot(df$temp - df$gender)
t.test(men,conf.level=0.95)$conf.int
mean(men) #x = 98.10
sd(men) #s =.6987558
length(men)# n = 65(degrees of freedom = 64)
qqnorm(men)
t.test(men,mu=98.6)# mu is the hypothesis
#our sample data lines 5.7 standard dev below the hypothesisd mean
#p-value
t.test(men,mu=98.1)
#if p-value <0.05 alot, reject Ho
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cross_predixcan.R
\name{load_expression}
\alias{load_expression}
\title{Load predicted expression files fro ma folder}
\usage{
load_expression(folder, white_list = NULL, metaxcan_style = FALSE)
}
\description{
Load predicted expression files fro ma folder
}
|
/ratools/man/load_expression.Rd
|
permissive
|
hakyimlab/metaxcan-paper
|
R
| false
| true
| 336
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cross_predixcan.R
\name{load_expression}
\alias{load_expression}
\title{Load predicted expression files fro ma folder}
\usage{
load_expression(folder, white_list = NULL, metaxcan_style = FALSE)
}
\description{
Load predicted expression files fro ma folder
}
|
yaml <- '
default:
db_name: dbase
databases:
db1: !expr paste0(db_name, "/one")
db2: !expr paste0(db_name, "/two")
staging:
staging_postfix: _staging
db_name: dbase
databases:
db1: !expr paste0(db_name, staging_postfix, "/one")
db2: !expr paste0(db_name, staging_postfix, "/two")
'
# Ensure that base::get() doesn't get masked, for tests on CRAN
get <- base::get
with_config(yaml, config::get() )
with_config(yaml, config::get("databases", config = "default") )
with_config(yaml, config::get("databases", config = "staging") )
config_file <- system.file("tests/testthat/config.yml", package = "config")
if (file.exists(config_file)) {
with_config(config_file, config::get())
}
|
/inst/examples/example_with_config.R
|
no_license
|
rstudio/config
|
R
| false
| false
| 710
|
r
|
yaml <- '
default:
db_name: dbase
databases:
db1: !expr paste0(db_name, "/one")
db2: !expr paste0(db_name, "/two")
staging:
staging_postfix: _staging
db_name: dbase
databases:
db1: !expr paste0(db_name, staging_postfix, "/one")
db2: !expr paste0(db_name, staging_postfix, "/two")
'
# Ensure that base::get() doesn't get masked, for tests on CRAN
get <- base::get
with_config(yaml, config::get() )
with_config(yaml, config::get("databases", config = "default") )
with_config(yaml, config::get("databases", config = "staging") )
config_file <- system.file("tests/testthat/config.yml", package = "config")
if (file.exists(config_file)) {
with_config(config_file, config::get())
}
|
#Copyright © 2016 RTE Réseau de transport d’électricité
#' View the content of an antares output
#'
#' This function displays each element of an \code{antaresData} object in a
#' spreadsheet-like viewer.
#'
#' @param x
#' An object of class \code{antaresData}, generated by the function
#' \code{\link{readAntares}}.
#' @param ...
#' Currently unused
#'
#' @return
#' Invisible NULL.
#'
#' @examples
#' \dontrun{
#' setSimulationPath()
#'
#' areas <-readAntares()
#' viewAntares(areas)
#'
#' output <- studyAntares(areas="all", links = "all", clusters = "all")
#' viewAntares(output) # Opens three data viewers for each element of output
#' }
#'
#' @export
#'
#'
viewAntares <- function(x, ...) {
UseMethod("viewAntares", x)
}
#' @export
viewAntares.default <- function(x, ...) {
title <- deparse(substitute(x))
if (is.data.frame(x) && ncol(x) > 100) {
warning(title, " has ", ncol(x),
" columns but the data viewer can only display the 100 columns. ",
ncol(x) - 100, "columns are masked.")
}
View(x, title)
}
#' @export
viewAntares.antaresDataList <- function(x, ...) {
title <- deparse(substitute(x))
for (k in names(x)) {
if (is.data.frame(x[[k]])) View(x[[k]], paste(title, k, sep = "$"))
}
invisible(NULL)
}
|
/R/viewAntares.R
|
no_license
|
cran/antaresRead
|
R
| false
| false
| 1,333
|
r
|
#Copyright © 2016 RTE Réseau de transport d’électricité
#' View the content of an antares output
#'
#' This function displays each element of an \code{antaresData} object in a
#' spreadsheet-like viewer.
#'
#' @param x
#' An object of class \code{antaresData}, generated by the function
#' \code{\link{readAntares}}.
#' @param ...
#' Currently unused
#'
#' @return
#' Invisible NULL.
#'
#' @examples
#' \dontrun{
#' setSimulationPath()
#'
#' areas <-readAntares()
#' viewAntares(areas)
#'
#' output <- studyAntares(areas="all", links = "all", clusters = "all")
#' viewAntares(output) # Opens three data viewers for each element of output
#' }
#'
#' @export
#'
#'
viewAntares <- function(x, ...) {
UseMethod("viewAntares", x)
}
#' @export
viewAntares.default <- function(x, ...) {
title <- deparse(substitute(x))
if (is.data.frame(x) && ncol(x) > 100) {
warning(title, " has ", ncol(x),
" columns but the data viewer can only display the 100 columns. ",
ncol(x) - 100, "columns are masked.")
}
View(x, title)
}
#' @export
viewAntares.antaresDataList <- function(x, ...) {
title <- deparse(substitute(x))
for (k in names(x)) {
if (is.data.frame(x[[k]])) View(x[[k]], paste(title, k, sep = "$"))
}
invisible(NULL)
}
|
#' Show landscape metrics
#'
#' @description Show landscape metrics on patch level printed in their corresponding patch.
#'
#' @param landscape *Raster object
#' @param what Patch level what to plot
#' @param class How to show the labeled patches: "global" (single map), "all" (every class as facet),
#' or a vector with the specific classes one wants to show (every selected class as facet).
#' @param directions The number of directions in which patches should be
#' connected: 4 (rook's case) or 8 (queen's case).
#' @param consider_boundary Logical if cells that only neighbour the landscape boundary should be considered as core
#' @param edge_depth Distance (in cells) a cell has the be away from the patch edge to be considered as core cell
#' @param labels Logical flag indicating whether to print or not to print patch labels.
#' @param label_lsm If true, the value of the landscape metric is used as label
#' @param nrow,ncol Number of rows and columns for the facet.
#'
#' @details The function plots all patches with a fill corresponding to the value of the chosen landscape metric on patch level.
#'
#' @return ggplot
#'
#' @examples
#' show_lsm(landscape, what = "lsm_p_area", directions = 4)
#' show_lsm(landscape, what = "lsm_p_shape", class = c(1, 2), label_lsm = TRUE)
#' show_lsm(landscape, what = "lsm_p_circle", class = 3, labels = TRUE)
#'
#' @aliases show_lsm
#' @rdname show_lsm
#'
#' @export
show_lsm <- function(landscape, what, class = "global", directions = 8,
consider_boundary = FALSE, edge_depth = 1,
labels = FALSE, label_lsm = FALSE,
nrow = NULL, ncol = NULL) {
landscape <- landscape_as_list(landscape)
result <- lapply(X = landscape,
FUN = show_lsm_internal,
what = what,
class = class,
directions = directions,
consider_boundary = consider_boundary,
edge_depth = edge_depth,
labels = labels,
label_lsm = label_lsm,
nrow = nrow,
ncol = ncol)
names(result) <- paste0("layer_", 1:length(result))
return(result)
}
show_lsm_internal <- function(landscape, what, class,
directions, consider_boundary, edge_depth,
labels, label_lsm,
nrow, ncol) {
if (!what %in% list_lsm(level = "patch", simplify = TRUE) || length(what) > 1) {
stop("Please provide one patch level metric only. To list available metrics, run list_lsm(level = 'patch').",
call. = FALSE)
}
if (any(!(class %in% c("all", "global")))) {
if (!any(class %in% raster::unique(landscape))) {
stop("'class' must contain at least one value of a class existing in the landscape.",
call. = FALSE)
}
}
if (length(class) > 1 & any(class %in% c("all", "global"))) {
warning("'global' and 'all' can't be combined with any other class-argument.",
call. = FALSE)
}
landscape_labeled <- get_patches(landscape, directions = directions)[[1]]
lsm_fun <- match.fun(what)
if (what %in% c("lsm_p_core", "lsm_p_ncore")) {
fill_value <- lsm_fun(landscape,
directions = directions,
consider_boundary = consider_boundary,
edge_depth = edge_depth)
} else {
fill_value <- lsm_fun(landscape, directions = directions)
}
if (any(class == "global")) {
patches_tibble <- raster::as.data.frame(sum(raster::stack(landscape_labeled),
na.rm = TRUE),
xy = TRUE)
names(patches_tibble) <- c("x", "y", "id")
patches_tibble$id <- replace(patches_tibble$id,
patches_tibble$id == 0,
NA)
patches_tibble <- merge(x = patches_tibble,
y = fill_value,
by = "id",
all.x = TRUE)
patches_tibble$class.get_patches <- "global"
if (!labels) {
patches_tibble$label <- NA
} else {
if (label_lsm) {
patches_tibble$label <- round(patches_tibble$value, 2)
} else {
patches_tibble$label <- patches_tibble$id
}
}
}
if (any(class != "global")) {
patches_tibble <- lapply(X = seq_along(landscape_labeled), FUN = function(i){
names(landscape_labeled[[i]]) <- "id"
x <- raster::as.data.frame(landscape_labeled[[i]], xy = TRUE)
x$class <- names(landscape_labeled[i])
return(x)}
)
patches_tibble <- do.call(rbind, patches_tibble)
patches_tibble <- merge(x = patches_tibble,
y = fill_value,
by = "id",
all.x = TRUE,
suffixes = c(".get_patches", ".lsm"))
if (any(!(class %in% "all"))) {
class_index <- which(patches_tibble$class.get_patches %in% paste0("class_", class))
patches_tibble <- patches_tibble[class_index, ]
}
if (!labels) {
patches_tibble$label <- NA
} else {
if (label_lsm) {
patches_tibble$label <- round(patches_tibble$value, 2)
} else{
patches_tibble$label <- patches_tibble$id
}
}
}
plot <- ggplot2::ggplot(patches_tibble, ggplot2::aes(x, y)) +
ggplot2::coord_fixed() +
ggplot2::geom_raster(ggplot2::aes(fill = value)) +
ggplot2::geom_text(ggplot2::aes(label = label),
colour = "black", size = 2, na.rm = TRUE) +
ggplot2::facet_wrap(~ class.get_patches,
nrow = nrow, ncol = ncol) +
ggplot2::scale_x_continuous(expand = c(0, 0)) +
ggplot2::scale_y_continuous(expand = c(0, 0)) +
ggplot2::labs(titel = NULL, x = NULL, y = NULL) +
ggplot2::scale_fill_viridis_c(option = "E",
name = what,
na.value = "grey85") +
ggplot2::theme(
axis.title = ggplot2::element_blank(),
axis.ticks = ggplot2::element_blank(),
axis.text = ggplot2::element_blank(),
panel.grid = ggplot2::element_blank(),
axis.line = ggplot2::element_blank(),
strip.background = ggplot2::element_rect(fill = "grey80"),
strip.text = ggplot2::element_text(hjust = 0),
plot.margin = ggplot2::unit(c(0, 0, 0, 0), "lines"))
return(plot)
}
|
/R/show_lsm.R
|
no_license
|
cran/landscapemetrics
|
R
| false
| false
| 7,173
|
r
|
#' Show landscape metrics
#'
#' @description Show landscape metrics on patch level printed in their corresponding patch.
#'
#' @param landscape *Raster object
#' @param what Patch level what to plot
#' @param class How to show the labeled patches: "global" (single map), "all" (every class as facet),
#' or a vector with the specific classes one wants to show (every selected class as facet).
#' @param directions The number of directions in which patches should be
#' connected: 4 (rook's case) or 8 (queen's case).
#' @param consider_boundary Logical if cells that only neighbour the landscape boundary should be considered as core
#' @param edge_depth Distance (in cells) a cell has the be away from the patch edge to be considered as core cell
#' @param labels Logical flag indicating whether to print or not to print patch labels.
#' @param label_lsm If true, the value of the landscape metric is used as label
#' @param nrow,ncol Number of rows and columns for the facet.
#'
#' @details The function plots all patches with a fill corresponding to the value of the chosen landscape metric on patch level.
#'
#' @return ggplot
#'
#' @examples
#' show_lsm(landscape, what = "lsm_p_area", directions = 4)
#' show_lsm(landscape, what = "lsm_p_shape", class = c(1, 2), label_lsm = TRUE)
#' show_lsm(landscape, what = "lsm_p_circle", class = 3, labels = TRUE)
#'
#' @aliases show_lsm
#' @rdname show_lsm
#'
#' @export
show_lsm <- function(landscape, what, class = "global", directions = 8,
consider_boundary = FALSE, edge_depth = 1,
labels = FALSE, label_lsm = FALSE,
nrow = NULL, ncol = NULL) {
landscape <- landscape_as_list(landscape)
result <- lapply(X = landscape,
FUN = show_lsm_internal,
what = what,
class = class,
directions = directions,
consider_boundary = consider_boundary,
edge_depth = edge_depth,
labels = labels,
label_lsm = label_lsm,
nrow = nrow,
ncol = ncol)
names(result) <- paste0("layer_", 1:length(result))
return(result)
}
show_lsm_internal <- function(landscape, what, class,
directions, consider_boundary, edge_depth,
labels, label_lsm,
nrow, ncol) {
if (!what %in% list_lsm(level = "patch", simplify = TRUE) || length(what) > 1) {
stop("Please provide one patch level metric only. To list available metrics, run list_lsm(level = 'patch').",
call. = FALSE)
}
if (any(!(class %in% c("all", "global")))) {
if (!any(class %in% raster::unique(landscape))) {
stop("'class' must contain at least one value of a class existing in the landscape.",
call. = FALSE)
}
}
if (length(class) > 1 & any(class %in% c("all", "global"))) {
warning("'global' and 'all' can't be combined with any other class-argument.",
call. = FALSE)
}
landscape_labeled <- get_patches(landscape, directions = directions)[[1]]
lsm_fun <- match.fun(what)
if (what %in% c("lsm_p_core", "lsm_p_ncore")) {
fill_value <- lsm_fun(landscape,
directions = directions,
consider_boundary = consider_boundary,
edge_depth = edge_depth)
} else {
fill_value <- lsm_fun(landscape, directions = directions)
}
if (any(class == "global")) {
patches_tibble <- raster::as.data.frame(sum(raster::stack(landscape_labeled),
na.rm = TRUE),
xy = TRUE)
names(patches_tibble) <- c("x", "y", "id")
patches_tibble$id <- replace(patches_tibble$id,
patches_tibble$id == 0,
NA)
patches_tibble <- merge(x = patches_tibble,
y = fill_value,
by = "id",
all.x = TRUE)
patches_tibble$class.get_patches <- "global"
if (!labels) {
patches_tibble$label <- NA
} else {
if (label_lsm) {
patches_tibble$label <- round(patches_tibble$value, 2)
} else {
patches_tibble$label <- patches_tibble$id
}
}
}
if (any(class != "global")) {
patches_tibble <- lapply(X = seq_along(landscape_labeled), FUN = function(i){
names(landscape_labeled[[i]]) <- "id"
x <- raster::as.data.frame(landscape_labeled[[i]], xy = TRUE)
x$class <- names(landscape_labeled[i])
return(x)}
)
patches_tibble <- do.call(rbind, patches_tibble)
patches_tibble <- merge(x = patches_tibble,
y = fill_value,
by = "id",
all.x = TRUE,
suffixes = c(".get_patches", ".lsm"))
if (any(!(class %in% "all"))) {
class_index <- which(patches_tibble$class.get_patches %in% paste0("class_", class))
patches_tibble <- patches_tibble[class_index, ]
}
if (!labels) {
patches_tibble$label <- NA
} else {
if (label_lsm) {
patches_tibble$label <- round(patches_tibble$value, 2)
} else{
patches_tibble$label <- patches_tibble$id
}
}
}
plot <- ggplot2::ggplot(patches_tibble, ggplot2::aes(x, y)) +
ggplot2::coord_fixed() +
ggplot2::geom_raster(ggplot2::aes(fill = value)) +
ggplot2::geom_text(ggplot2::aes(label = label),
colour = "black", size = 2, na.rm = TRUE) +
ggplot2::facet_wrap(~ class.get_patches,
nrow = nrow, ncol = ncol) +
ggplot2::scale_x_continuous(expand = c(0, 0)) +
ggplot2::scale_y_continuous(expand = c(0, 0)) +
ggplot2::labs(titel = NULL, x = NULL, y = NULL) +
ggplot2::scale_fill_viridis_c(option = "E",
name = what,
na.value = "grey85") +
ggplot2::theme(
axis.title = ggplot2::element_blank(),
axis.ticks = ggplot2::element_blank(),
axis.text = ggplot2::element_blank(),
panel.grid = ggplot2::element_blank(),
axis.line = ggplot2::element_blank(),
strip.background = ggplot2::element_rect(fill = "grey80"),
strip.text = ggplot2::element_text(hjust = 0),
plot.margin = ggplot2::unit(c(0, 0, 0, 0), "lines"))
return(plot)
}
|
d <- read.table("household_power_consumption.txt", sep=";", header=TRUE)
d<- d[d$Date=="1/2/2007" | d$Date=="2/2/2007",]
#Plot1
png(file = "plot1.png", width = 480, height = 480)
hist(d$Global_active_power,col="red", breaks = 12, main ="Global Active Power", xlab = "Global Active Power (kilowatts)",ylab = "Frequency")
dev.off()
|
/plot1.R
|
no_license
|
Tatiana10/ExData_Plotting1
|
R
| false
| false
| 330
|
r
|
d <- read.table("household_power_consumption.txt", sep=";", header=TRUE)
d<- d[d$Date=="1/2/2007" | d$Date=="2/2/2007",]
#Plot1
png(file = "plot1.png", width = 480, height = 480)
hist(d$Global_active_power,col="red", breaks = 12, main ="Global Active Power", xlab = "Global Active Power (kilowatts)",ylab = "Frequency")
dev.off()
|
## The two functions below are used to demonstrate the principle of caching
## expensive calculations witin an R object that also contains the target data.
## makeCacheMatrix creates a special matrix that is capable of storing/caching
## its own inverse matrix (the matrix x is assumned to be square & invertable)
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinv <- function(solve) i <<- solve
getinv <- function() i
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve returns the inverse matrix of x. If it has already been calculated
## then the cached matrix is returned, otherwise the solve() function is used
## to calculate the inverse matrix (and also store it ready for the next call).
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinv()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinv(i)
i
}
|
/cachematrix.R
|
no_license
|
SoundGeeza/ProgrammingAssignment2
|
R
| false
| false
| 1,068
|
r
|
## The two functions below are used to demonstrate the principle of caching
## expensive calculations witin an R object that also contains the target data.
## makeCacheMatrix creates a special matrix that is capable of storing/caching
## its own inverse matrix (the matrix x is assumned to be square & invertable)
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinv <- function(solve) i <<- solve
getinv <- function() i
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve returns the inverse matrix of x. If it has already been calculated
## then the cached matrix is returned, otherwise the solve() function is used
## to calculate the inverse matrix (and also store it ready for the next call).
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinv()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinv(i)
i
}
|
## Quick R script to conduct monte carlo null model test for the number of steps on a phylogenetic tree
library(ape)
library(phangorn)
# Here's a 32 taxon tree with a basal split between 8 and 24 taxon clades:
t32=read.tree(text="((((t30:1,t23:1):1,(t32:1,(t19:1,(t11:1,t24:1):1):1):1):1,((((t26:1,t14:1):1,(((t21:1,t20:1):1,t5:1):1,(t22:1,t13:1):1):1):1,(t10:1,(t3:1,t18:1):1):1):1,((t15:1,t7:1):1,(((t6:1,(t17:1,t9:1):1):1,t29:1):1,(t25:1,t31:1):1):1):1):1):1,(((t12:1,t27:1):1,(t4:1,t1:1):1):1,(t8:1,(t16:1,(t28:1,t2:1):1):1):1):1);")
plot(t32,'cladogram',use.edge.length=F)
#create a color pallette for plotting trait values
pall <- c('yellow','lightblue')
# create a trait with a high degree of phylosignal, convert to class phyDat to work with the phangorn library
x <- c(rep(1,24),rep(0,8))
names(x) <- t32$tip.label
xx <- phyDat(x,type='USER',levels=c(0,1))
# plot tree with trait values
plot(t32,'cladogram',use.edge.length=F,show.tip.label = F,direction='upwards',edge.width=2)
tiplabels(xx,bg=pall[x+1])
# calculate minimum number of steps under parsimony for evolution of this trait
parsimony(t32,xx)
# now create a trait with 8 0's scattered across tips, each one with a sister taxon with state 1
y <- c(1,0,1,1,1,0,1,1,1,0,1,1,1,1,1,0,1,1,1,1,0,1,1,0,1,1,1,0,1,1,1,0)
names(y) <- t32$tip.label
yy <- phyDat(y,type='USER',levels=c(0,1))
plot(t2,'cladogram',use.edge.length=F,show.tip.label = F,direction='upwards',edge.width=2)
tiplabels(y,bg=pall[y+1])
# calculate minimum number of steps
parsimony(t32,yy)
# create a function that takes a tree and a trait vector, and randomly resamples the trait with or without replacement. Without replacement simply permutes the states across the tips. With replacement will create a distribution of trait vectors where the number of taxa with each state follows a binomial distribution around the observed number. Function returns the number of taxa with state '1' and the number of steps in the evolution of the trait. As written this will only make sense if states are (0,1), because I use a simple sum to calculate number of taxa with state 1
rpars <- function(t,x,rep=F) {
xx <- sample(x,replace = rep)
names(xx) <- t$tip.label
xp <- phyDat(xx,type='USER',levels=c(0,1))
ns <- parsimony(t,xp)
return(c(sum(xx),ns))
}
# Now obtain the null distribution without replacement. The replicate command runs a function N times and returns the results as a matrix with N columns and as many rows as there are output values in the function
x8 <- replicate(9999,rpars(t32,x,F))
dim(x8)
# Add the observed result for trait x above to the vector of null results, and look at the distribution. The table is useful because the bars in the tail are so small you can't see them in the histogram. Calculate the p value for trait x as the number of cases in which the null values are equal to or less than the observed
xMP <- parsimony(t32,xx)
x8n <- c(xMP,x8[2,])
hist(x8n,breaks=seq(0.5,8.5,by=1),main='sample without replacement',xlab='number of steps')
table(x8n)
p1 <- length(which(x8n<=xMP))/10000
p1
# Now repeat sampling with replacement
x8r <- replicate(9999,rpars(t32,x,T))
x8n <- c(xMP,x8[2,])
hist(x8r[2,],xlab='number of steps',main='resample with replacement')
plot(t(x8r),xlab="number of 1's",ylab='number of steps')
table(x8r)
p2 <- length(which(x8r<=xMP))/10000
p2
|
/lect/lect16.R
|
no_license
|
wf8/ib200
|
R
| false
| false
| 3,352
|
r
|
## Quick R script to conduct monte carlo null model test for the number of steps on a phylogenetic tree
library(ape)
library(phangorn)
# Here's a 32 taxon tree with a basal split between 8 and 24 taxon clades:
t32=read.tree(text="((((t30:1,t23:1):1,(t32:1,(t19:1,(t11:1,t24:1):1):1):1):1,((((t26:1,t14:1):1,(((t21:1,t20:1):1,t5:1):1,(t22:1,t13:1):1):1):1,(t10:1,(t3:1,t18:1):1):1):1,((t15:1,t7:1):1,(((t6:1,(t17:1,t9:1):1):1,t29:1):1,(t25:1,t31:1):1):1):1):1):1,(((t12:1,t27:1):1,(t4:1,t1:1):1):1,(t8:1,(t16:1,(t28:1,t2:1):1):1):1):1);")
plot(t32,'cladogram',use.edge.length=F)
#create a color pallette for plotting trait values
pall <- c('yellow','lightblue')
# create a trait with a high degree of phylosignal, convert to class phyDat to work with the phangorn library
x <- c(rep(1,24),rep(0,8))
names(x) <- t32$tip.label
xx <- phyDat(x,type='USER',levels=c(0,1))
# plot tree with trait values
plot(t32,'cladogram',use.edge.length=F,show.tip.label = F,direction='upwards',edge.width=2)
tiplabels(xx,bg=pall[x+1])
# calculate minimum number of steps under parsimony for evolution of this trait
parsimony(t32,xx)
# now create a trait with 8 0's scattered across tips, each one with a sister taxon with state 1
y <- c(1,0,1,1,1,0,1,1,1,0,1,1,1,1,1,0,1,1,1,1,0,1,1,0,1,1,1,0,1,1,1,0)
names(y) <- t32$tip.label
yy <- phyDat(y,type='USER',levels=c(0,1))
plot(t2,'cladogram',use.edge.length=F,show.tip.label = F,direction='upwards',edge.width=2)
tiplabels(y,bg=pall[y+1])
# calculate minimum number of steps
parsimony(t32,yy)
# create a function that takes a tree and a trait vector, and randomly resamples the trait with or without replacement. Without replacement simply permutes the states across the tips. With replacement will create a distribution of trait vectors where the number of taxa with each state follows a binomial distribution around the observed number. Function returns the number of taxa with state '1' and the number of steps in the evolution of the trait. As written this will only make sense if states are (0,1), because I use a simple sum to calculate number of taxa with state 1
rpars <- function(t,x,rep=F) {
xx <- sample(x,replace = rep)
names(xx) <- t$tip.label
xp <- phyDat(xx,type='USER',levels=c(0,1))
ns <- parsimony(t,xp)
return(c(sum(xx),ns))
}
# Now obtain the null distribution without replacement. The replicate command runs a function N times and returns the results as a matrix with N columns and as many rows as there are output values in the function
x8 <- replicate(9999,rpars(t32,x,F))
dim(x8)
# Add the observed result for trait x above to the vector of null results, and look at the distribution. The table is useful because the bars in the tail are so small you can't see them in the histogram. Calculate the p value for trait x as the number of cases in which the null values are equal to or less than the observed
xMP <- parsimony(t32,xx)
x8n <- c(xMP,x8[2,])
hist(x8n,breaks=seq(0.5,8.5,by=1),main='sample without replacement',xlab='number of steps')
table(x8n)
p1 <- length(which(x8n<=xMP))/10000
p1
# Now repeat sampling with replacement
x8r <- replicate(9999,rpars(t32,x,T))
x8n <- c(xMP,x8[2,])
hist(x8r[2,],xlab='number of steps',main='resample with replacement')
plot(t(x8r),xlab="number of 1's",ylab='number of steps')
table(x8r)
p2 <- length(which(x8r<=xMP))/10000
p2
|
library(gcookbook)
ggplot(cabbage_exp,aes(x=Date,y=Weight,fill=Cultivar))+geom_bar(position="dodge",stat="identity",colour="black")+scale_fill_brewer(palette="Pastell")
|
/3-05.r
|
no_license
|
wenbin5243/r
|
R
| false
| false
| 168
|
r
|
library(gcookbook)
ggplot(cabbage_exp,aes(x=Date,y=Weight,fill=Cultivar))+geom_bar(position="dodge",stat="identity",colour="black")+scale_fill_brewer(palette="Pastell")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mean_sd.R
\name{mean_sd}
\alias{mean_sd}
\title{Summarise a numerical vector to mean (SD).}
\usage{
mean_sd(...)
}
\arguments{
\item{x}{Numerical vector}
\item{digits}{Number of decimals to use}
}
\value{
Character vector
}
\description{
Summarise a numerical vector to a mean and a SD,
or a median and an interquartile range (p25 and p75).
}
\examples{
mean_sd(1:10)
median_iqr(1:10)
}
|
/man/mean_sd.Rd
|
no_license
|
jaspervanm/JasperTools
|
R
| false
| true
| 466
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mean_sd.R
\name{mean_sd}
\alias{mean_sd}
\title{Summarise a numerical vector to mean (SD).}
\usage{
mean_sd(...)
}
\arguments{
\item{x}{Numerical vector}
\item{digits}{Number of decimals to use}
}
\value{
Character vector
}
\description{
Summarise a numerical vector to a mean and a SD,
or a median and an interquartile range (p25 and p75).
}
\examples{
mean_sd(1:10)
median_iqr(1:10)
}
|
library(FSelector)
data(iris)
# se obtienen las medidas mediante ganancia de informacion
weights <- FSelector::information.gain(Species~., iris)
# se muestran los pesos y se seleccionan los mejores
print(weights)
subset <- FSelector::cutoff.k(weights,2)
f <- as.simple.formula(subset,"Species")
print(f)
# igual, pero con ganancia de informacion
weights <- FSelector::gain.ratio(Species~., iris)
print(weights)
# e igual con symmetrical.uncertainty
weights <- FSelector::symmetrical.uncertainty(Species~., iris)
print(weights)
|
/Minería de datos. Preprocesamiento y clasificacion/preprocesamiento/fSelector-entropy.R
|
permissive
|
Tiburtzio/Master-Ciencias-de-Datos-UGR
|
R
| false
| false
| 532
|
r
|
library(FSelector)
data(iris)
# se obtienen las medidas mediante ganancia de informacion
weights <- FSelector::information.gain(Species~., iris)
# se muestran los pesos y se seleccionan los mejores
print(weights)
subset <- FSelector::cutoff.k(weights,2)
f <- as.simple.formula(subset,"Species")
print(f)
# igual, pero con ganancia de informacion
weights <- FSelector::gain.ratio(Species~., iris)
print(weights)
# e igual con symmetrical.uncertainty
weights <- FSelector::symmetrical.uncertainty(Species~., iris)
print(weights)
|
library(ggplot2)
library(rgl)
wh<-read.table(file='WaitTimesPerHour.csv',sep=',',header=TRUE)
attach(wh)
names(wh)
wd<-read.table(file='WaitTimesPerDay.csv',sep=',',header=TRUE)
attach(wd)
names(wd)
g <- ggplot(wh, aes(x = AvgWait, y= Count)) + geom_point(aes(color=Hour)) + facet_wrap(~ Airport)
g
ggplot(wh, aes(x = AvgWait, y= Count, fill = Airport)) +
geom_histogram(data = wd, fill = "grey", alpha = .5) +
geom_histogram(colour = "black") +
facet_wrap(~ Airport) +
guides(fill = FALSE) + # to remove the legend
theme_bw()
##### EXAMPLE OF HISTOGRAMS ###############################
ggplot(wd, aes(x = AvgWait, fill = Airport)) +
geom_histogram(data = wd, fill = "grey", alpha = .5) +
geom_histogram(colour = "black") +
facet_wrap(~ Airport) +
guides(fill = FALSE) + # to remove the legend
theme_bw() # for clean look overall
ggplot(wd, aes(x = MaxWait, fill = Airport)) +
geom_histogram(data = wd, fill = "grey", alpha = .5) +
geom_histogram(colour = "black") +
facet_wrap(~ Airport) +
guides(fill = FALSE) + # to remove the legend
theme_bw() # for clean look overall
#### EXAMPLE OF SCATTERPLOTS #################################
ggplot(wd, aes(x =Booths , y =AvgWait, colour = Airport)) +
geom_point(data = wd, colour = "grey", alpha = .2) +
geom_point() +
facet_wrap(~ Airport) +
guides(colour = FALSE) +
theme_bw()
ggplot(wd, aes(x =AvgWait , y =MaxWait, colour = Airport)) +
geom_point(data = wd, colour = "grey", alpha = .2) +
geom_point() +
facet_wrap(~ Airport) +
guides(colour = FALSE) +
theme_bw()
#### EXAMPLE OF BOXPLOTS #################################
p <- ggplot(wd, aes(factor(Airport), AvgWait))
p + geom_boxplot() + geom_jitter()
p + geom_boxplot(aes(fill = factor(Airport)))
p <- ggplot(wd, aes(factor(Airport), MaxWait))
p + geom_boxplot() + geom_jitter()
p + geom_boxplot(aes(fill = factor(Airport)))
p <- ggplot(wh, aes(factor(Airport), AvgWait))
p + geom_boxplot() + geom_jitter()
p + geom_boxplot(aes(fill = factor(Airport)))
p <- ggplot(wh, aes(factor(Airport), MaxWait))
p + geom_boxplot() + geom_jitter()
p + geom_boxplot(aes(fill = factor(Airport)))
##Comparative density plots
ggplot(wd, aes(x=AvgWait, fill=Airport)) + geom_density(alpha=0.3)
ggplot(wd, aes(x=MaxWait, fill=Airport)) + geom_density(alpha=0.3)
ggplot(wh, aes(x=AvgWait, fill=Airport)) + geom_density(alpha=0.3)
ggplot(wh, aes(x=MaxWait, fill=Airport)) + geom_density(alpha=0.3)
######## FINAL
library(corrplot)
whn<-read.table(file='WaitTimesPerHour.csv',sep=',',header=TRUE)
attach(whn)
names(whn)
## Correlation plots by day
wdn<-read.table(file='WaitTimesPerDay.csv',sep=',',header=TRUE)
attach(wdn)
names(wdn)
wdn1<-cbind(wdn[,1:7],wdn[,9])
pairs(wdn1)
cbd<-cor(wdn1)
corrplot(cbd)
## Correlation plots by hour
wdh<-read.table(file='WaitTimesPerHour.csv',sep=',',header=TRUE)
attach(wdh)
names(wdh)
wdh1<-cbind(wdh[,1:7],wdh[,9])
pairs(wdh1)
cbh<-cor(wdh1)
corrplot(cbh)
# how big and dark blue, there is no -ve correlation
library(car)
scatterplotMatrix(~AvgWait+Count+Booths|Airport, data=wd,main="Scatterplots and regression lines per airport")
pc1<-prcomp(wdh1)
library(scatterplot3d)
# plot the first, second, and third principal components
s3d = scatterplot3d(pc1$rotation[,1], pc1$rotation[,2], pc1$rotation[,3],
xlab='Comp.1', ylab='Comp.2', zlab='Comp.3', pch = 20)
#smooth regression line, correlation +ve correlation between booths and count
# not very informative because of lot of data
# Booths and AvgWait not clear relationship because line looks like close to horizontal
plot(pc1) # amount of variation
biplot(pc1)
##### VIOLIN
library(plyr)
hwt <- ggplot(wh, aes(x=Hour, y=AvgWait))+geom_point(shape=19, color=rgb(0,0,1,0.3))
hwt + facet_grid(. ~ Airport)
hwt + facet_wrap( ~ Airport, ncol=3)
hwt
### VIO
allvio <- ggplot(wh, aes(Hour, AvgWait, fill = Hour, colour = Hour))
p2 <- allvio+geom_violin(alpha=0.3, size=0.7, width=0.7, trim = FALSE, scale = "width", adjust = 0.5) +
geom_boxplot(width=0.1, outlier.shape = 19, outlier.colour="black", notch = FALSE,
notchwidth = .5, alpha = 0.5, colour = "black")+
labs(y = "Average Wait", x = "Hour")
p2 + facet_wrap( ~ Airport, ncol=6) # not a very pretty sight
p2 <- p2+geom_violin(alpha=0.3, size=0.7, width=0.7, trim = FALSE, scale = "width", adjust = 0.5) +
geom_boxplot(width=0.1, outlier.shape = 19, outlier.colour="black", notch = FALSE,
notchwidth = .5, alpha = 0.5, colour = "black")+
labs(y = "Average Wait", x = "Hour")
p2 + facet_wrap( ~ Airport, ncol=2)
# different range for y axis for the 4 airports
p2 + facet_wrap( ~ Airport, ncol=2, scales = "free_y")
hwtl <- ggplot(wh, aes(x=Hour, y=Count, color = Airport))+geom_line(size = 1)
hwtl + facet_grid(. ~ Airport)
hwtl + facet_wrap( ~ Airport, nrow=2, scales = "free_y")
hwtl
fgi <- hourwt[hourwt[,1]=="GUM" | hourwt[,1]=="FAT" |hourwt[,1]=="FLL" | hourwt[,1]=="IAD" ,]
### gradient colour on AvgWait values
hwt <- ggplot(wh, aes(x=Hour, y=AvgWait, color = AvgWait))+geom_point()+
scale_color_gradientn(colours=c("blue","green","red"), values = c(0, 0.4, 1))
hwt1 <- hwt + facet_wrap( ~ Airport, ncol=3)
#### Average wait and max wait are correlated
### TODO : change attributes
cor(wh[,5], wh[,6])
# [1] 0.8718369
# graph of average wait coloured by max wait values
hwt1 <- ggplot(wh, aes(x=Hour, y=AvgWait, color = MaxWait))+geom_point()+
scale_color_gradientn(colours=c("blue","green","red"), values = c(0, 0.3, 1))
hwt1 <- hwt1 + facet_wrap( ~ Airport, ncol=3)
|
/r-data/wait-times/hrd31.R
|
no_license
|
hardikgw/elastic-node
|
R
| false
| false
| 5,629
|
r
|
library(ggplot2)
library(rgl)
wh<-read.table(file='WaitTimesPerHour.csv',sep=',',header=TRUE)
attach(wh)
names(wh)
wd<-read.table(file='WaitTimesPerDay.csv',sep=',',header=TRUE)
attach(wd)
names(wd)
g <- ggplot(wh, aes(x = AvgWait, y= Count)) + geom_point(aes(color=Hour)) + facet_wrap(~ Airport)
g
ggplot(wh, aes(x = AvgWait, y= Count, fill = Airport)) +
geom_histogram(data = wd, fill = "grey", alpha = .5) +
geom_histogram(colour = "black") +
facet_wrap(~ Airport) +
guides(fill = FALSE) + # to remove the legend
theme_bw()
##### EXAMPLE OF HISTOGRAMS ###############################
ggplot(wd, aes(x = AvgWait, fill = Airport)) +
geom_histogram(data = wd, fill = "grey", alpha = .5) +
geom_histogram(colour = "black") +
facet_wrap(~ Airport) +
guides(fill = FALSE) + # to remove the legend
theme_bw() # for clean look overall
ggplot(wd, aes(x = MaxWait, fill = Airport)) +
geom_histogram(data = wd, fill = "grey", alpha = .5) +
geom_histogram(colour = "black") +
facet_wrap(~ Airport) +
guides(fill = FALSE) + # to remove the legend
theme_bw() # for clean look overall
#### EXAMPLE OF SCATTERPLOTS #################################
ggplot(wd, aes(x =Booths , y =AvgWait, colour = Airport)) +
geom_point(data = wd, colour = "grey", alpha = .2) +
geom_point() +
facet_wrap(~ Airport) +
guides(colour = FALSE) +
theme_bw()
ggplot(wd, aes(x =AvgWait , y =MaxWait, colour = Airport)) +
geom_point(data = wd, colour = "grey", alpha = .2) +
geom_point() +
facet_wrap(~ Airport) +
guides(colour = FALSE) +
theme_bw()
#### EXAMPLE OF BOXPLOTS #################################
p <- ggplot(wd, aes(factor(Airport), AvgWait))
p + geom_boxplot() + geom_jitter()
p + geom_boxplot(aes(fill = factor(Airport)))
p <- ggplot(wd, aes(factor(Airport), MaxWait))
p + geom_boxplot() + geom_jitter()
p + geom_boxplot(aes(fill = factor(Airport)))
p <- ggplot(wh, aes(factor(Airport), AvgWait))
p + geom_boxplot() + geom_jitter()
p + geom_boxplot(aes(fill = factor(Airport)))
p <- ggplot(wh, aes(factor(Airport), MaxWait))
p + geom_boxplot() + geom_jitter()
p + geom_boxplot(aes(fill = factor(Airport)))
##Comparative density plots
ggplot(wd, aes(x=AvgWait, fill=Airport)) + geom_density(alpha=0.3)
ggplot(wd, aes(x=MaxWait, fill=Airport)) + geom_density(alpha=0.3)
ggplot(wh, aes(x=AvgWait, fill=Airport)) + geom_density(alpha=0.3)
ggplot(wh, aes(x=MaxWait, fill=Airport)) + geom_density(alpha=0.3)
######## FINAL
library(corrplot)
whn<-read.table(file='WaitTimesPerHour.csv',sep=',',header=TRUE)
attach(whn)
names(whn)
## Correlation plots by day
wdn<-read.table(file='WaitTimesPerDay.csv',sep=',',header=TRUE)
attach(wdn)
names(wdn)
wdn1<-cbind(wdn[,1:7],wdn[,9])
pairs(wdn1)
cbd<-cor(wdn1)
corrplot(cbd)
## Correlation plots by hour
wdh<-read.table(file='WaitTimesPerHour.csv',sep=',',header=TRUE)
attach(wdh)
names(wdh)
wdh1<-cbind(wdh[,1:7],wdh[,9])
pairs(wdh1)
cbh<-cor(wdh1)
corrplot(cbh)
# how big and dark blue, there is no -ve correlation
library(car)
scatterplotMatrix(~AvgWait+Count+Booths|Airport, data=wd,main="Scatterplots and regression lines per airport")
pc1<-prcomp(wdh1)
library(scatterplot3d)
# plot the first, second, and third principal components
s3d = scatterplot3d(pc1$rotation[,1], pc1$rotation[,2], pc1$rotation[,3],
xlab='Comp.1', ylab='Comp.2', zlab='Comp.3', pch = 20)
#smooth regression line, correlation +ve correlation between booths and count
# not very informative because of lot of data
# Booths and AvgWait not clear relationship because line looks like close to horizontal
plot(pc1) # amount of variation
biplot(pc1)
##### VIOLIN
library(plyr)
hwt <- ggplot(wh, aes(x=Hour, y=AvgWait))+geom_point(shape=19, color=rgb(0,0,1,0.3))
hwt + facet_grid(. ~ Airport)
hwt + facet_wrap( ~ Airport, ncol=3)
hwt
### VIO
allvio <- ggplot(wh, aes(Hour, AvgWait, fill = Hour, colour = Hour))
p2 <- allvio+geom_violin(alpha=0.3, size=0.7, width=0.7, trim = FALSE, scale = "width", adjust = 0.5) +
geom_boxplot(width=0.1, outlier.shape = 19, outlier.colour="black", notch = FALSE,
notchwidth = .5, alpha = 0.5, colour = "black")+
labs(y = "Average Wait", x = "Hour")
p2 + facet_wrap( ~ Airport, ncol=6) # not a very pretty sight
p2 <- p2+geom_violin(alpha=0.3, size=0.7, width=0.7, trim = FALSE, scale = "width", adjust = 0.5) +
geom_boxplot(width=0.1, outlier.shape = 19, outlier.colour="black", notch = FALSE,
notchwidth = .5, alpha = 0.5, colour = "black")+
labs(y = "Average Wait", x = "Hour")
p2 + facet_wrap( ~ Airport, ncol=2)
# different range for y axis for the 4 airports
p2 + facet_wrap( ~ Airport, ncol=2, scales = "free_y")
hwtl <- ggplot(wh, aes(x=Hour, y=Count, color = Airport))+geom_line(size = 1)
hwtl + facet_grid(. ~ Airport)
hwtl + facet_wrap( ~ Airport, nrow=2, scales = "free_y")
hwtl
fgi <- hourwt[hourwt[,1]=="GUM" | hourwt[,1]=="FAT" |hourwt[,1]=="FLL" | hourwt[,1]=="IAD" ,]
### gradient colour on AvgWait values
hwt <- ggplot(wh, aes(x=Hour, y=AvgWait, color = AvgWait))+geom_point()+
scale_color_gradientn(colours=c("blue","green","red"), values = c(0, 0.4, 1))
hwt1 <- hwt + facet_wrap( ~ Airport, ncol=3)
#### Average wait and max wait are correlated
### TODO : change attributes
cor(wh[,5], wh[,6])
# [1] 0.8718369
# graph of average wait coloured by max wait values
hwt1 <- ggplot(wh, aes(x=Hour, y=AvgWait, color = MaxWait))+geom_point()+
scale_color_gradientn(colours=c("blue","green","red"), values = c(0, 0.3, 1))
hwt1 <- hwt1 + facet_wrap( ~ Airport, ncol=3)
|
setwd("D:\\lhac\\analysis\\Rtmp")
library(VennDiagram)
f1="RNAi"
f2="RNAi"
a<- read.csv(paste(f1,"B2normal-DEG.csv",sep=""))
b<- read.csv(paste(f2,"B1normal-DEG.csv",sep=""))
a1<-a[,1]
b1<-b[,1]
set<-list(a=a1,b=b1)
names(set)<-c(f1,f2)
venn.diagram(set,fill=c("red","blue"),paste(f1,"B2vsB1",f2,"out.tiff",sep=""))
write.csv(intersect(a1,b1),file=paste(f1,"B2vsB1",f2,"out.csv",sep=""))
|
/R_coder/VenneB1vsB2.R
|
no_license
|
lhaclove/MyCode
|
R
| false
| false
| 392
|
r
|
setwd("D:\\lhac\\analysis\\Rtmp")
library(VennDiagram)
f1="RNAi"
f2="RNAi"
a<- read.csv(paste(f1,"B2normal-DEG.csv",sep=""))
b<- read.csv(paste(f2,"B1normal-DEG.csv",sep=""))
a1<-a[,1]
b1<-b[,1]
set<-list(a=a1,b=b1)
names(set)<-c(f1,f2)
venn.diagram(set,fill=c("red","blue"),paste(f1,"B2vsB1",f2,"out.tiff",sep=""))
write.csv(intersect(a1,b1),file=paste(f1,"B2vsB1",f2,"out.csv",sep=""))
|
#' Easier-to-use function for grabbing a block of data out of a Raster*.
#'
#' @param x Raster* Some input Raster* object.
#' @param r1 Numeric. The start row of the chunk.
#' @param r2 Numeric. The end row of the chunk.
#' @param c1 Numeric. The start column of the chunk.
#' @param c2 Numeric. The end row of the chunk.
#' @param lyrs Numeric. Vector of layer IDs. Defaults to all layers (1:nlayers(x)).
#' @param format Character. See Details.
#' @param ... Other parameters.
#'
#' @details This allows for a larger number of output formats to be generated
#' when extracting chunks of data from a Raster* object. If format="array" (default),
#' the chunk will be returned in a 3-d array with dimensions representing column,row,and layer.
#' If "raster", the chunk will be returned as a Raster* object. If "data.frame", it will
#' be returned as a data.frame. If "data.frame.dims", it will return a list, where the first
#' component (named "values") is the same as the data.frame when using format="data.frame", and
#' the second component (named "dim") is the dimensions of the extracted chunk.
#'
#' @return An array or raster object.
#' @author Jonathan A. Greenberg
#' @seealso \code{\link[raster]{getValues}}
#' @examples
#' library("raster")
#' tahoe_highrez <- brick(system.file("external/tahoe_highrez.tif", package="spatial.tools"))
#' mychunk <- getValuesBlock_enhanced(tahoe_highrez,r1=100,r2=110,c1=20,c2=50)
#' class(mychunk)
#' dim(mychunk)
#' mychunk_raster <- getValuesBlock_enhanced(tahoe_highrez,r1=100,r2=110,c1=20,c2=50,format="raster")
#' mychunk_raster
#' @import raster
#' @export
getValuesBlock_enhanced=function(x,r1=1,r2=nrow(x),c1=1,c2=ncol(x),lyrs=seq(nlayers(x)),format="array",...)
{
if(format=="array")
{
layer_names=names(x)
getvalues_raw <- as.numeric(getValuesBlock_stackfix(x,row=r1,nrows=(r2-r1+1),col=c1,ncols=(c2-c1+1),lyrs=lyrs))
getvalues_raw_nrows=r2-r1+1
getvalues_raw_ncols=c2-c1+1
getvalues_raw_nlayers=nlayers(x)
# Test the input file.
if(getvalues_raw_nlayers==1)
{
# Raster
getvalues_array=array(data=getvalues_raw,
dim=c(getvalues_raw_ncols,getvalues_raw_nrows,getvalues_raw_nlayers))
} else
{
# Brick or stack
getvalues_array=array(data=getvalues_raw,
dim=c(getvalues_raw_ncols,getvalues_raw_nrows,getvalues_raw_nlayers))
}
dimnames(getvalues_array) <- list(NULL,NULL,NULL)
if(!is.null(layer_names)) dimnames(getvalues_array)[[3]]=layer_names
return(getvalues_array)
}
if(format=="raster")
{
return(crop(x, extent(x, r1=r1, r2=r2, c1=c1,c2=c2)))
}
if(format=="data.frame" || format=="data.frame.dims")
{
# layer_names=names(x)
getvalues_raw <- getValuesBlock_stackfix(x,row=r1,nrows=(r2-r1+1),col=c1,ncols=(c2-c1+1),lyrs=lyrs)
getvalues_df <- as.data.frame(getvalues_raw)
names(getvalues_df) <- names(x)
# Fix factors:
factor_layers <- is.factor(x)
if(any(factor_layers))
{
factor_levels <- levels(x)
for(i in seq(nlayers(x))[factor_layers])
{
temp_factor_levels <- factor_levels[[i]][[1]]
temp_factor_levels <- data.frame(ID=temp_factor_levels[,1],code=temp_factor_levels[,2])
temp_df_data <- getvalues_df[[i]]
temp_factor_column <- with(temp_factor_levels,code[match(temp_df_data,ID)])
getvalues_df[[i]] <- temp_factor_column
}
}
names(getvalues_df) <- names(x)
if(format=="data.frame.dims")
{
getvalues_raw_nrows=r2-r1+1
getvalues_raw_ncols=c2-c1+1
getvalues_raw_nlayers=nlayers(x)
getValues.dims <- c(getvalues_raw_ncols,getvalues_raw_nrows,getvalues_raw_nlayers)
getvalues_df <- list(values=getvalues_df,dim=getValues.dims)
}
return(getvalues_df)
}
}
|
/R/getValuesBlock_enhanced.R
|
no_license
|
gearslaboratory/spatial.tools
|
R
| false
| false
| 3,805
|
r
|
#' Easier-to-use function for grabbing a block of data out of a Raster*.
#'
#' @param x Raster* Some input Raster* object.
#' @param r1 Numeric. The start row of the chunk.
#' @param r2 Numeric. The end row of the chunk.
#' @param c1 Numeric. The start column of the chunk.
#' @param c2 Numeric. The end row of the chunk.
#' @param lyrs Numeric. Vector of layer IDs. Defaults to all layers (1:nlayers(x)).
#' @param format Character. See Details.
#' @param ... Other parameters.
#'
#' @details This allows for a larger number of output formats to be generated
#' when extracting chunks of data from a Raster* object. If format="array" (default),
#' the chunk will be returned in a 3-d array with dimensions representing column,row,and layer.
#' If "raster", the chunk will be returned as a Raster* object. If "data.frame", it will
#' be returned as a data.frame. If "data.frame.dims", it will return a list, where the first
#' component (named "values") is the same as the data.frame when using format="data.frame", and
#' the second component (named "dim") is the dimensions of the extracted chunk.
#'
#' @return An array or raster object.
#' @author Jonathan A. Greenberg
#' @seealso \code{\link[raster]{getValues}}
#' @examples
#' library("raster")
#' tahoe_highrez <- brick(system.file("external/tahoe_highrez.tif", package="spatial.tools"))
#' mychunk <- getValuesBlock_enhanced(tahoe_highrez,r1=100,r2=110,c1=20,c2=50)
#' class(mychunk)
#' dim(mychunk)
#' mychunk_raster <- getValuesBlock_enhanced(tahoe_highrez,r1=100,r2=110,c1=20,c2=50,format="raster")
#' mychunk_raster
#' @import raster
#' @export
getValuesBlock_enhanced=function(x,r1=1,r2=nrow(x),c1=1,c2=ncol(x),lyrs=seq(nlayers(x)),format="array",...)
{
if(format=="array")
{
layer_names=names(x)
getvalues_raw <- as.numeric(getValuesBlock_stackfix(x,row=r1,nrows=(r2-r1+1),col=c1,ncols=(c2-c1+1),lyrs=lyrs))
getvalues_raw_nrows=r2-r1+1
getvalues_raw_ncols=c2-c1+1
getvalues_raw_nlayers=nlayers(x)
# Test the input file.
if(getvalues_raw_nlayers==1)
{
# Raster
getvalues_array=array(data=getvalues_raw,
dim=c(getvalues_raw_ncols,getvalues_raw_nrows,getvalues_raw_nlayers))
} else
{
# Brick or stack
getvalues_array=array(data=getvalues_raw,
dim=c(getvalues_raw_ncols,getvalues_raw_nrows,getvalues_raw_nlayers))
}
dimnames(getvalues_array) <- list(NULL,NULL,NULL)
if(!is.null(layer_names)) dimnames(getvalues_array)[[3]]=layer_names
return(getvalues_array)
}
if(format=="raster")
{
return(crop(x, extent(x, r1=r1, r2=r2, c1=c1,c2=c2)))
}
if(format=="data.frame" || format=="data.frame.dims")
{
# layer_names=names(x)
getvalues_raw <- getValuesBlock_stackfix(x,row=r1,nrows=(r2-r1+1),col=c1,ncols=(c2-c1+1),lyrs=lyrs)
getvalues_df <- as.data.frame(getvalues_raw)
names(getvalues_df) <- names(x)
# Fix factors:
factor_layers <- is.factor(x)
if(any(factor_layers))
{
factor_levels <- levels(x)
for(i in seq(nlayers(x))[factor_layers])
{
temp_factor_levels <- factor_levels[[i]][[1]]
temp_factor_levels <- data.frame(ID=temp_factor_levels[,1],code=temp_factor_levels[,2])
temp_df_data <- getvalues_df[[i]]
temp_factor_column <- with(temp_factor_levels,code[match(temp_df_data,ID)])
getvalues_df[[i]] <- temp_factor_column
}
}
names(getvalues_df) <- names(x)
if(format=="data.frame.dims")
{
getvalues_raw_nrows=r2-r1+1
getvalues_raw_ncols=c2-c1+1
getvalues_raw_nlayers=nlayers(x)
getValues.dims <- c(getvalues_raw_ncols,getvalues_raw_nrows,getvalues_raw_nlayers)
getvalues_df <- list(values=getvalues_df,dim=getValues.dims)
}
return(getvalues_df)
}
}
|
#' Machine learning made easy
#'
#' @description Prepare data and train machine learning models.
#'
#' @param d A data frame
#' @param ... Columns to be ignored in model training, e.g. ID columns,
#' unquoted.
#' @param outcome Name of the target column, i.e. what you want to predict.
#' Unquoted. Must be named, i.e. you must specify \code{outcome = }
#' @param models Models to be trained, k-nearest neighbors and random forest by
#' default. See \code{\link{supported_models}} for details.
#' @param tune If TRUE (default) models will be tuned via
#' \code{\link{tune_models}}. If FALSE, models will be trained via
#' \code{\link{flash_models}} which is substantially faster but produces
#' less-predictively powerful models.
#' @param positive_class For classification only, which outcome level is the
#' "yes" case, i.e. should be associated with high probabilities? Defaults to
#' "Y" or "yes" if present, otherwise is the first level of the outcome
#' variable (first alphabetically if the training data outcome was not already
#' a factor).
#' @param n_folds How many folds to use to assess out-of-fold accuracy? Default
#' = 5. Models are evaluated on out-of-fold predictions whether tune is TRUE
#' or FALSE.
#' @param tune_depth How many hyperparameter combinations to try? Defualt = 10.
#' Ignored if tune is FALSE.
#' @param impute Logical, if TRUE (default) missing values will be filled by
#' \code{\link{hcai_impute}}
#'
#' @return model_list object ready to make predictions via
#' \code{\link{predict.model_list}}
#' @export
#'
#' @details This is a high-level wrapper function. For finer control of data
#' cleaning and preparation use \code{\link{prep_data}} or the functions it
#' wraps. For finer control of model tuning use \code{\link{tune_models}}.
#'
#' @examples
#' # Split the data into training and test sets, using just 100 rows for speed
#' d <- split_train_test(d = pima_diabetes[1:100, ],
#' outcome = diabetes,
#' percent_train = .9)
#'
#' ### Classification ###
#'
#' # Clean and prep the training data, specifying that patient_id is an ID column,
#' # and tune algorithms over hyperparameter values to predict diabetes
#' diabetes_models <- machine_learn(d$train, patient_id, outcome = diabetes)
#'
#' # Inspect model specification and performance
#' diabetes_models
#'
#' # Make predictions (predicted probability of diabetes) on test data
#' predict(diabetes_models, d$test)
#'
#' ### Regression ###
#'
#' # If the outcome variable is numeric, regression models will be trained
#' age_model <- machine_learn(d$train, patient_id, outcome = age)
#'
#' # If new data isn't specifed, get predictions on training data
#' predict(age_model)
#'
#' ### Faster model training without tuning hyperparameters ###
#'
#' # Train models at set hyperparameter values by setting tune to FALSE. This is
#' # faster (especially on larger datasets), but produces models with less
#' # predictive accuracy.
#' machine_learn(d$train, patient_id, outcome = diabetes, tune = FALSE)
machine_learn <- function(d, ..., outcome, models, tune = TRUE, positive_class,
n_folds = 5, tune_depth = 10, impute = TRUE) {
if (!is.data.frame(d))
stop("\"d\" must be a data frame.")
dots <- rlang::quos(...)
ignored <- map_chr(dots, rlang::quo_name)
if (length(ignored)) {
not_there <- setdiff(ignored, names(d))
if (length(not_there))
stop("The following variable(s) were passed to the ... argument of machine_learn",
" but are not the names of columns in the data frame: ",
paste(not_there, collapse = ", "))
}
outcome <- rlang::enquo(outcome)
if (rlang::quo_is_missing(outcome)) {
mes <- "You must provide an outcome variable to machine_learn."
if (length(ignored))
mes <- paste(mes, "Did you forget to specify `outcome = `?")
stop(mes)
}
outcome_chr <- rlang::quo_name(outcome)
if (!outcome_chr %in% names(d))
stop("You passed ", outcome_chr, " to the outcome argument of machine_learn,",
"but that isn't a column in d.")
if (missing(models))
models <- get_supported_models()
pd <- prep_data(d, !!!dots, outcome = !!outcome, impute = impute)
m <-
if (tune) {
tune_models(pd, outcome = !!outcome, models = models,
positive_class = positive_class,
n_folds = n_folds, tune_depth = tune_depth)
} else {
flash_models(pd, outcome = !!outcome, models = models,
positive_class = positive_class, n_folds = n_folds)
}
return(m)
}
|
/R/machine_learn.R
|
permissive
|
hughvnguyen/healthcareai-r
|
R
| false
| false
| 4,616
|
r
|
#' Machine learning made easy
#'
#' @description Prepare data and train machine learning models.
#'
#' @param d A data frame
#' @param ... Columns to be ignored in model training, e.g. ID columns,
#' unquoted.
#' @param outcome Name of the target column, i.e. what you want to predict.
#' Unquoted. Must be named, i.e. you must specify \code{outcome = }
#' @param models Models to be trained, k-nearest neighbors and random forest by
#' default. See \code{\link{supported_models}} for details.
#' @param tune If TRUE (default) models will be tuned via
#' \code{\link{tune_models}}. If FALSE, models will be trained via
#' \code{\link{flash_models}} which is substantially faster but produces
#' less-predictively powerful models.
#' @param positive_class For classification only, which outcome level is the
#' "yes" case, i.e. should be associated with high probabilities? Defaults to
#' "Y" or "yes" if present, otherwise is the first level of the outcome
#' variable (first alphabetically if the training data outcome was not already
#' a factor).
#' @param n_folds How many folds to use to assess out-of-fold accuracy? Default
#' = 5. Models are evaluated on out-of-fold predictions whether tune is TRUE
#' or FALSE.
#' @param tune_depth How many hyperparameter combinations to try? Defualt = 10.
#' Ignored if tune is FALSE.
#' @param impute Logical, if TRUE (default) missing values will be filled by
#' \code{\link{hcai_impute}}
#'
#' @return model_list object ready to make predictions via
#' \code{\link{predict.model_list}}
#' @export
#'
#' @details This is a high-level wrapper function. For finer control of data
#' cleaning and preparation use \code{\link{prep_data}} or the functions it
#' wraps. For finer control of model tuning use \code{\link{tune_models}}.
#'
#' @examples
#' # Split the data into training and test sets, using just 100 rows for speed
#' d <- split_train_test(d = pima_diabetes[1:100, ],
#' outcome = diabetes,
#' percent_train = .9)
#'
#' ### Classification ###
#'
#' # Clean and prep the training data, specifying that patient_id is an ID column,
#' # and tune algorithms over hyperparameter values to predict diabetes
#' diabetes_models <- machine_learn(d$train, patient_id, outcome = diabetes)
#'
#' # Inspect model specification and performance
#' diabetes_models
#'
#' # Make predictions (predicted probability of diabetes) on test data
#' predict(diabetes_models, d$test)
#'
#' ### Regression ###
#'
#' # If the outcome variable is numeric, regression models will be trained
#' age_model <- machine_learn(d$train, patient_id, outcome = age)
#'
#' # If new data isn't specifed, get predictions on training data
#' predict(age_model)
#'
#' ### Faster model training without tuning hyperparameters ###
#'
#' # Train models at set hyperparameter values by setting tune to FALSE. This is
#' # faster (especially on larger datasets), but produces models with less
#' # predictive accuracy.
#' machine_learn(d$train, patient_id, outcome = diabetes, tune = FALSE)
machine_learn <- function(d, ..., outcome, models, tune = TRUE, positive_class,
n_folds = 5, tune_depth = 10, impute = TRUE) {
if (!is.data.frame(d))
stop("\"d\" must be a data frame.")
dots <- rlang::quos(...)
ignored <- map_chr(dots, rlang::quo_name)
if (length(ignored)) {
not_there <- setdiff(ignored, names(d))
if (length(not_there))
stop("The following variable(s) were passed to the ... argument of machine_learn",
" but are not the names of columns in the data frame: ",
paste(not_there, collapse = ", "))
}
outcome <- rlang::enquo(outcome)
if (rlang::quo_is_missing(outcome)) {
mes <- "You must provide an outcome variable to machine_learn."
if (length(ignored))
mes <- paste(mes, "Did you forget to specify `outcome = `?")
stop(mes)
}
outcome_chr <- rlang::quo_name(outcome)
if (!outcome_chr %in% names(d))
stop("You passed ", outcome_chr, " to the outcome argument of machine_learn,",
"but that isn't a column in d.")
if (missing(models))
models <- get_supported_models()
pd <- prep_data(d, !!!dots, outcome = !!outcome, impute = impute)
m <-
if (tune) {
tune_models(pd, outcome = !!outcome, models = models,
positive_class = positive_class,
n_folds = n_folds, tune_depth = tune_depth)
} else {
flash_models(pd, outcome = !!outcome, models = models,
positive_class = positive_class, n_folds = n_folds)
}
return(m)
}
|
###############################################
# GSERM 2017 Day Five a.m.
#
# File created June 23, 2017
#
# File last updated June 23, 2017
###############################################
# Set working directory as necessary:
#
setwd("~/Dropbox (Personal)/GSERM/Materials 2017/Notes and Slides")
#
require(RCurl)
# Options:
options(scipen = 6) # bias against scientific notation
options(digits = 3) # show fewer decimal places
###########################
# "Toy" example:
set.seed(7222009)
ystar<-rnorm(100)
y<-ifelse(ystar>0,1,0)
x<-ystar+(0.5*rnorm(100))
data<-data.frame(ystar,y,x)
head(data)
pdf("YstarYX-R.pdf",6,5)
par(mar=c(4,4,2,2))
plot(x,ystar,pch=19,ylab="Y* / Y",xlab="X")
points(x,y,pch=4,col="red")
abline(h=0)
legend("topleft",bty="n",pch=c(19,4),col=c("black","red"),
legend=c("Y*","Y"))
dev.off()
# probits and logits...
myprobit<-glm(y~x,family=binomial(link="probit"),
data=data)
summary(myprobit)
mylogit<-glm(y~x,family=binomial(link="logit"),
data=data)
summary(mylogit)
pdf("LogitProbitHats.pdf",6,5)
plot(mylogit$fitted.values,myprobit$fitted.values,
pch=20,xlab="Logit Predictions",
ylab="Probit Predictions")
dev.off()
#################################
# NAFTA example...
temp<-getURL("https://raw.githubusercontent.com/PrisonRodeo/GSERM-2017-git/master/Data/NAFTA.csv")
NAFTA<-read.csv(text=temp, header=TRUE)
rm(temp)
summary(NAFTA)
# Logit:
NAFTA.GLM.fit<-glm(vote~democrat+pcthispc+cope93+DemXCOPE,
NAFTA,family=binomial)
summary(NAFTA.GLM.fit)
# Interactions...
NAFTA.GLM.fit$coeff[4]+NAFTA.GLM.fit$coeff[5]
(NAFTA.GLM.fit$coeff[4]+NAFTA.GLM.fit$coeff[5]) /
(sqrt(vcov(NAFTA.GLM.fit)[4,4] +
(1)^2*vcov(NAFTA.GLM.fit)[5,5] +
2*1*vcov(NAFTA.GLM.fit)[4,5]))
# Same thing, using -car-:
library(car)
linear.hypothesis(NAFTA.GLM.fit,"cope93+DemXCOPE=0")
# Predicted values:
preds<-NAFTA.GLM.fit$fitted.values
hats<-predict(NAFTA.GLM.fit,se.fit=TRUE)
# Plotting in-sample predictions:
XBUB<-hats$fit + (1.96*hats$se.fit)
XBLB<-hats$fit - (1.96*hats$se.fit)
plotdata<-cbind(as.data.frame(hats),XBUB,XBLB)
plotdata<-data.frame(lapply(plotdata,binomial(link="logit")$linkinv))
par(mfrow=c(1,2))
library(plotrix)
plotCI(cope93[democrat==1],plotdata$fit[democrat==1],ui=plotdata$XBUB[democrat==1],
li=plotdata$XBLB[democrat==1],pch=20,xlab="COPE Score",ylab="Predicted
Pr(Pro-NAFTA Vote)")
plotCI(cope93[democrat==0],plotdata$fit[democrat==0],ui=plotdata$XBUB[democrat==0],
li=plotdata$XBLB[democrat==0],pch=20,xlab="COPE Score",ylab="Predicted
Pr(Pro-NAFTA Vote)")
# Plotting Out-of-sample Predictions:
sim.data<-data.frame(pcthispc=mean(nafta$pcthispc),democrat=rep(0:1,101),
cope93=seq(from=0,to=100,length.out=101))
sim.data$DemXCOPE<-sim.data$democrat*sim.data$cope93
OutHats<-predict(NAFTA.GLM.fit,se.fit=TRUE,newdata=sim.data)
OutHatsUB<-OutHats$fit+(1.96*OutHats$se.fit)
OutHatsLB<-OutHats$fit-(1.96*OutHats$se.fit)
OutHats<-cbind(as.data.frame(OutHats),OutHatsUB,OutHatsLB)
OutHats<-data.frame(lapply(OutHats,binomial(link="logit")$linkinv))
par(mfrow=c(1,2))
both<-cbind(sim.data,OutHats)
both<-both[order(both$cope93,both$democrat),]
plot(both$cope93[democrat==1],both$fit[democrat==1],t="l",lwd=2,ylim=c(0,1),
xlab="COPE Score",ylab="Predicted Pr(Pro-NAFTA Vote)")
lines(both$cope93[democrat==1],both$OutHatsUB[democrat==1],lty=2)
lines(both$cope93[democrat==1],both$OutHatsLB[democrat==1],lty=2)
text(locator(1),label="Democrats")
plot(both$cope93[democrat==0],both$fit[democrat==0],t="l",lwd=2,ylim=c(0,1),
xlab="COPE Score",ylab="Predicted Pr(Pro-NAFTA Vote)")
lines(both$cope93[democrat==0],both$OutHatsUB[democrat==0],lty=2)
lines(both$cope93[democrat==0],both$OutHatsLB[democrat==0],lty=2)
text(locator(1),label="Republicans")
# Odds Ratios:
lreg.or <- function(model)
{
coeffs <- coef(summary(NAFTA.GLM.fit))
lci <- exp(coeffs[ ,1] - 1.96 * coeffs[ ,2])
or <- exp(coeffs[ ,1])
uci <- exp(coeffs[ ,1] + 1.96 * coeffs[ ,2])
lreg.or <- cbind(lci, or, uci)
lreg.or
}
lreg.or(NAFTA.GLM.fit)
####################
# Goodness of fit:
table(NAFTA.GLM.fit$fitted.values>0.5,nafta$vote==1)
chisq.test(NAFTA.GLM.fit$fitted.values>0.5,nafta$vote==1)
# ROC curves, plots, etc.:
library(ROCR)
NAFTA.GLM.logithats<-predict(NAFTA.GLM.fit,
type="response")
preds<-prediction(NAFTA.GLM.logithats,NAFTA$vote)
plot(performance(preds,"tpr","fpr"),lwd=2,lty=2,
col="red",xlab="1 - Specificity",ylab="Sensitivity")
abline(a=0,b=1,lwd=3)
###############################
# Event counts...
#
# Various Poisson histograms
set.seed(7222009)
N<-1000
LP05<-rpois(N,0.5)
LP1<-rpois(N,1)
LP5<-rpois(N,5)
LP10<-rpois(N,10)
pdf("PoissonHistogramsR.pdf",7,6)
par(mfrow=c(2,2))
hist(LP05,col="grey",xlim=c(0,25),breaks=seq(0,25,by=1),
ylim=c(0,1000),xlab="Count",main="Lambda = 0.5")
hist(LP1,col="grey",xlim=c(0,25),breaks=seq(0,25,by=1),
ylim=c(0,1000),xlab="Count",main="Lambda = 1.0")
hist(LP5,col="grey",xlim=c(0,25),breaks=seq(0,25,by=1),
ylim=c(0,1000),xlab="Count",main="Lambda = 5")
hist(LP10,col="grey",xlim=c(0,25),breaks=seq(0,25,by=1),
ylim=c(0,1000),xlab="Count",main="Lambda = 10")
dev.off()
# Get SCOTUS nullifications data:
temp<-getURL("https://raw.githubusercontent.com/PrisonRodeo/GSERM-2017-git/master/Data/nulls.csv")
Nulls<-read.csv(text=temp, header=TRUE)
rm(temp)
# Histogram:
pdf("NullsHist.pdf",6,5)
par(mar=c(4,4,2,2))
with(Nulls,
hist(nulls,main="",xlab="Number of Nullifications",
col="grey"))
dev.off()
# Poisson regression:
nulls.poisson<-glm(nulls~tenure+unified,family="poisson",
data=Nulls)
summary(nulls.poisson)
# IRRs:
library(mfx)
nulls.poisson.IRR<-poissonirr(nulls~tenure+unified,
data=Nulls)
nulls.poisson.IRR
# Predictions:
tenure<-seq(0,20,1)
unified<-1
simdata<-as.data.frame(cbind(tenure,unified))
nullhats<-predict(nulls.poisson,newdata=simdata,se.fit=TRUE)
# NOTE: These are XBs, not predicted counts.
# Transforming:
nullhats$Yhat<-exp(nullhats$fit)
nullhats$UB<-exp(nullhats$fit + 1.96*(nullhats$se.fit))
nullhats$LB<-exp(nullhats$fit - 1.96*(nullhats$se.fit))
# Plot...
pdf("NullsOutOfSampleHatsR.pdf",6,5)
plot(simdata$tenure,nullhats$Yhat,t="l",lwd=3,ylim=c(0,5),ylab=
"Predicted Count", xlab="Mean Tenure")
lines(simdata$tenure,nullhats$UB,lwd=2,lty=2)
lines(simdata$tenure,nullhats$LB,lwd=2,lty=2)
dev.off()
# Offsets with dyadic data...Aggregated counts
# of conflicts between the countries in each
# dyad, 1950-1985...
temp<-getURL("https://raw.githubusercontent.com/PrisonRodeo/GSERM-2017-git/master/Data/offsetIR.csv")
IR<-read.csv(text=temp, header=TRUE)
rm(temp)
summary(IR)
cor(IR,use="complete.obs")
IR.fit1<-glm(disputes~allies+openness,data=IR,family="poisson")
summary(IR.fit1)
IR.fit2<-glm(disputes~allies+openness,data=IR,family="poisson",
offset=log(Ndyads))
summary(IR.fit2)
IR.fit3<-glm(disputes~allies+openness+log(Ndyads),data=IR,
family="poisson")
summary(IR.fit3)
# z-test:
2*pnorm((0.811-1)/.071)
# Wald test:
wald.test(b=coef(IR.fit3),Sigma=vcov(IR.fit3),Terms=4,H0=1)
|
/Code/GSERM-2017-Day-5-am.R
|
no_license
|
anhnguyendepocen/GSERM-2017-git
|
R
| false
| false
| 7,278
|
r
|
###############################################
# GSERM 2017 Day Five a.m.
#
# File created June 23, 2017
#
# File last updated June 23, 2017
###############################################
# Set working directory as necessary:
#
setwd("~/Dropbox (Personal)/GSERM/Materials 2017/Notes and Slides")
#
require(RCurl)
# Options:
options(scipen = 6) # bias against scientific notation
options(digits = 3) # show fewer decimal places
###########################
# "Toy" example:
set.seed(7222009)
ystar<-rnorm(100)
y<-ifelse(ystar>0,1,0)
x<-ystar+(0.5*rnorm(100))
data<-data.frame(ystar,y,x)
head(data)
pdf("YstarYX-R.pdf",6,5)
par(mar=c(4,4,2,2))
plot(x,ystar,pch=19,ylab="Y* / Y",xlab="X")
points(x,y,pch=4,col="red")
abline(h=0)
legend("topleft",bty="n",pch=c(19,4),col=c("black","red"),
legend=c("Y*","Y"))
dev.off()
# probits and logits...
myprobit<-glm(y~x,family=binomial(link="probit"),
data=data)
summary(myprobit)
mylogit<-glm(y~x,family=binomial(link="logit"),
data=data)
summary(mylogit)
pdf("LogitProbitHats.pdf",6,5)
plot(mylogit$fitted.values,myprobit$fitted.values,
pch=20,xlab="Logit Predictions",
ylab="Probit Predictions")
dev.off()
#################################
# NAFTA example...
temp<-getURL("https://raw.githubusercontent.com/PrisonRodeo/GSERM-2017-git/master/Data/NAFTA.csv")
NAFTA<-read.csv(text=temp, header=TRUE)
rm(temp)
summary(NAFTA)
# Logit:
NAFTA.GLM.fit<-glm(vote~democrat+pcthispc+cope93+DemXCOPE,
NAFTA,family=binomial)
summary(NAFTA.GLM.fit)
# Interactions...
NAFTA.GLM.fit$coeff[4]+NAFTA.GLM.fit$coeff[5]
(NAFTA.GLM.fit$coeff[4]+NAFTA.GLM.fit$coeff[5]) /
(sqrt(vcov(NAFTA.GLM.fit)[4,4] +
(1)^2*vcov(NAFTA.GLM.fit)[5,5] +
2*1*vcov(NAFTA.GLM.fit)[4,5]))
# Same thing, using -car-:
library(car)
linear.hypothesis(NAFTA.GLM.fit,"cope93+DemXCOPE=0")
# Predicted values:
preds<-NAFTA.GLM.fit$fitted.values
hats<-predict(NAFTA.GLM.fit,se.fit=TRUE)
# Plotting in-sample predictions:
XBUB<-hats$fit + (1.96*hats$se.fit)
XBLB<-hats$fit - (1.96*hats$se.fit)
plotdata<-cbind(as.data.frame(hats),XBUB,XBLB)
plotdata<-data.frame(lapply(plotdata,binomial(link="logit")$linkinv))
par(mfrow=c(1,2))
library(plotrix)
plotCI(cope93[democrat==1],plotdata$fit[democrat==1],ui=plotdata$XBUB[democrat==1],
li=plotdata$XBLB[democrat==1],pch=20,xlab="COPE Score",ylab="Predicted
Pr(Pro-NAFTA Vote)")
plotCI(cope93[democrat==0],plotdata$fit[democrat==0],ui=plotdata$XBUB[democrat==0],
li=plotdata$XBLB[democrat==0],pch=20,xlab="COPE Score",ylab="Predicted
Pr(Pro-NAFTA Vote)")
# Plotting Out-of-sample Predictions:
sim.data<-data.frame(pcthispc=mean(nafta$pcthispc),democrat=rep(0:1,101),
cope93=seq(from=0,to=100,length.out=101))
sim.data$DemXCOPE<-sim.data$democrat*sim.data$cope93
OutHats<-predict(NAFTA.GLM.fit,se.fit=TRUE,newdata=sim.data)
OutHatsUB<-OutHats$fit+(1.96*OutHats$se.fit)
OutHatsLB<-OutHats$fit-(1.96*OutHats$se.fit)
OutHats<-cbind(as.data.frame(OutHats),OutHatsUB,OutHatsLB)
OutHats<-data.frame(lapply(OutHats,binomial(link="logit")$linkinv))
par(mfrow=c(1,2))
both<-cbind(sim.data,OutHats)
both<-both[order(both$cope93,both$democrat),]
plot(both$cope93[democrat==1],both$fit[democrat==1],t="l",lwd=2,ylim=c(0,1),
xlab="COPE Score",ylab="Predicted Pr(Pro-NAFTA Vote)")
lines(both$cope93[democrat==1],both$OutHatsUB[democrat==1],lty=2)
lines(both$cope93[democrat==1],both$OutHatsLB[democrat==1],lty=2)
text(locator(1),label="Democrats")
plot(both$cope93[democrat==0],both$fit[democrat==0],t="l",lwd=2,ylim=c(0,1),
xlab="COPE Score",ylab="Predicted Pr(Pro-NAFTA Vote)")
lines(both$cope93[democrat==0],both$OutHatsUB[democrat==0],lty=2)
lines(both$cope93[democrat==0],both$OutHatsLB[democrat==0],lty=2)
text(locator(1),label="Republicans")
# Odds Ratios:
lreg.or <- function(model)
{
coeffs <- coef(summary(NAFTA.GLM.fit))
lci <- exp(coeffs[ ,1] - 1.96 * coeffs[ ,2])
or <- exp(coeffs[ ,1])
uci <- exp(coeffs[ ,1] + 1.96 * coeffs[ ,2])
lreg.or <- cbind(lci, or, uci)
lreg.or
}
lreg.or(NAFTA.GLM.fit)
####################
# Goodness of fit:
table(NAFTA.GLM.fit$fitted.values>0.5,nafta$vote==1)
chisq.test(NAFTA.GLM.fit$fitted.values>0.5,nafta$vote==1)
# ROC curves, plots, etc.:
library(ROCR)
NAFTA.GLM.logithats<-predict(NAFTA.GLM.fit,
type="response")
preds<-prediction(NAFTA.GLM.logithats,NAFTA$vote)
plot(performance(preds,"tpr","fpr"),lwd=2,lty=2,
col="red",xlab="1 - Specificity",ylab="Sensitivity")
abline(a=0,b=1,lwd=3)
###############################
# Event counts...
#
# Various Poisson histograms
set.seed(7222009)
N<-1000
LP05<-rpois(N,0.5)
LP1<-rpois(N,1)
LP5<-rpois(N,5)
LP10<-rpois(N,10)
pdf("PoissonHistogramsR.pdf",7,6)
par(mfrow=c(2,2))
hist(LP05,col="grey",xlim=c(0,25),breaks=seq(0,25,by=1),
ylim=c(0,1000),xlab="Count",main="Lambda = 0.5")
hist(LP1,col="grey",xlim=c(0,25),breaks=seq(0,25,by=1),
ylim=c(0,1000),xlab="Count",main="Lambda = 1.0")
hist(LP5,col="grey",xlim=c(0,25),breaks=seq(0,25,by=1),
ylim=c(0,1000),xlab="Count",main="Lambda = 5")
hist(LP10,col="grey",xlim=c(0,25),breaks=seq(0,25,by=1),
ylim=c(0,1000),xlab="Count",main="Lambda = 10")
dev.off()
# Get SCOTUS nullifications data:
temp<-getURL("https://raw.githubusercontent.com/PrisonRodeo/GSERM-2017-git/master/Data/nulls.csv")
Nulls<-read.csv(text=temp, header=TRUE)
rm(temp)
# Histogram:
pdf("NullsHist.pdf",6,5)
par(mar=c(4,4,2,2))
with(Nulls,
hist(nulls,main="",xlab="Number of Nullifications",
col="grey"))
dev.off()
# Poisson regression:
nulls.poisson<-glm(nulls~tenure+unified,family="poisson",
data=Nulls)
summary(nulls.poisson)
# IRRs:
library(mfx)
nulls.poisson.IRR<-poissonirr(nulls~tenure+unified,
data=Nulls)
nulls.poisson.IRR
# Predictions:
tenure<-seq(0,20,1)
unified<-1
simdata<-as.data.frame(cbind(tenure,unified))
nullhats<-predict(nulls.poisson,newdata=simdata,se.fit=TRUE)
# NOTE: These are XBs, not predicted counts.
# Transforming:
nullhats$Yhat<-exp(nullhats$fit)
nullhats$UB<-exp(nullhats$fit + 1.96*(nullhats$se.fit))
nullhats$LB<-exp(nullhats$fit - 1.96*(nullhats$se.fit))
# Plot...
pdf("NullsOutOfSampleHatsR.pdf",6,5)
plot(simdata$tenure,nullhats$Yhat,t="l",lwd=3,ylim=c(0,5),ylab=
"Predicted Count", xlab="Mean Tenure")
lines(simdata$tenure,nullhats$UB,lwd=2,lty=2)
lines(simdata$tenure,nullhats$LB,lwd=2,lty=2)
dev.off()
# Offsets with dyadic data...Aggregated counts
# of conflicts between the countries in each
# dyad, 1950-1985...
temp<-getURL("https://raw.githubusercontent.com/PrisonRodeo/GSERM-2017-git/master/Data/offsetIR.csv")
IR<-read.csv(text=temp, header=TRUE)
rm(temp)
summary(IR)
cor(IR,use="complete.obs")
IR.fit1<-glm(disputes~allies+openness,data=IR,family="poisson")
summary(IR.fit1)
IR.fit2<-glm(disputes~allies+openness,data=IR,family="poisson",
offset=log(Ndyads))
summary(IR.fit2)
IR.fit3<-glm(disputes~allies+openness+log(Ndyads),data=IR,
family="poisson")
summary(IR.fit3)
# z-test:
2*pnorm((0.811-1)/.071)
# Wald test:
wald.test(b=coef(IR.fit3),Sigma=vcov(IR.fit3),Terms=4,H0=1)
|
context("G20 data frame")
test_that("G20 data frame is present", {
expect_equal(
names(G20),
c(
"region",
"country",
"gdp_mil_usd",
"hdi",
"econ_classification",
"hemisphere"
)
)
expect_equal(nrow(G20), 20)
})
|
/tests/testthat/test_G20.R
|
no_license
|
wilkox/treemapify
|
R
| false
| false
| 265
|
r
|
context("G20 data frame")
test_that("G20 data frame is present", {
expect_equal(
names(G20),
c(
"region",
"country",
"gdp_mil_usd",
"hdi",
"econ_classification",
"hemisphere"
)
)
expect_equal(nrow(G20), 20)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/manip.r
\name{slice}
\alias{slice}
\alias{slice_}
\title{Select rows by position.}
\usage{
slice(.data, ...)
slice_(.data, ..., .dots)
}
\arguments{
\item{.data}{A tbl. All main verbs are S3 generics and provide methods
for \code{\link{tbl_df}}, \code{\link[dtplyr]{tbl_dt}} and \code{\link{tbl_sql}}.}
\item{...}{Integer row values}
\item{.dots}{Used to work around non-standard evaluation. See
\code{vignette("nse")} for details.}
}
\description{
Slice does not work with relational databases because they have no
intrinsic notion of row order. If you want to perform the equivalent
operation, use \code{\link{filter}()} and \code{\link{row_number}()}.
}
\examples{
slice(mtcars, 1L)
slice(mtcars, n())
slice(mtcars, 5:n())
by_cyl <- group_by(mtcars, cyl)
slice(by_cyl, 1:2)
# Equivalent code using filter that will also work with databases,
# but won't be as fast for in-memory data. For many databases, you'll
# need to supply an explicit variable to use to compute the row number.
filter(mtcars, row_number() == 1L)
filter(mtcars, row_number() == n())
filter(mtcars, between(row_number(), 5, n()))
}
\seealso{
Other single table verbs: \code{\link{arrange}},
\code{\link{filter}}, \code{\link{mutate}},
\code{\link{select}}, \code{\link{summarise}}
}
|
/man/slice.Rd
|
no_license
|
ravinpoudel/dplyr
|
R
| false
| true
| 1,343
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/manip.r
\name{slice}
\alias{slice}
\alias{slice_}
\title{Select rows by position.}
\usage{
slice(.data, ...)
slice_(.data, ..., .dots)
}
\arguments{
\item{.data}{A tbl. All main verbs are S3 generics and provide methods
for \code{\link{tbl_df}}, \code{\link[dtplyr]{tbl_dt}} and \code{\link{tbl_sql}}.}
\item{...}{Integer row values}
\item{.dots}{Used to work around non-standard evaluation. See
\code{vignette("nse")} for details.}
}
\description{
Slice does not work with relational databases because they have no
intrinsic notion of row order. If you want to perform the equivalent
operation, use \code{\link{filter}()} and \code{\link{row_number}()}.
}
\examples{
slice(mtcars, 1L)
slice(mtcars, n())
slice(mtcars, 5:n())
by_cyl <- group_by(mtcars, cyl)
slice(by_cyl, 1:2)
# Equivalent code using filter that will also work with databases,
# but won't be as fast for in-memory data. For many databases, you'll
# need to supply an explicit variable to use to compute the row number.
filter(mtcars, row_number() == 1L)
filter(mtcars, row_number() == n())
filter(mtcars, between(row_number(), 5, n()))
}
\seealso{
Other single table verbs: \code{\link{arrange}},
\code{\link{filter}}, \code{\link{mutate}},
\code{\link{select}}, \code{\link{summarise}}
}
|
cov_stamp = raster::stack("~/development/aoa_disassembly/tests/testdata/aqb_stamp.grd")
cov_all = raster::stack("data/covariates/predictors.grd")
cov_stamp_v = getValues(cov_all)
m1 = readRDS("data/models/model_allvars_juelichcv.RDS")
p1 = predSD(m1, cov_all)
raster::plot(p1)
writeRaster(p1, "data/predictionSDs/sd_allvars_juelichcv.grd", overwrite = TRUE)
m2 = readRDS("data/models/model_ffs_juelichcv.RDS")
p2 = predSD(m2, cov_all)
raster::plot(p2)
writeRaster(p2, "data/predictionSDs/sd_ffs_juelichcv.grd", overwrite = TRUE)
m3 = readRDS("data/models/model_nolat_juelichcv.RDS")
p3 = predSD(m3, cov_all)
raster::plot(p3)
writeRaster(p3, "data/predictionSDs/sd_nolat_juelichcv.grd", overwrite = TRUE)
m4 = readRDS("data/models/model_ffsnolat_juelichcv.RDS")
p4 = predSD(m4, cov_all)
raster::plot(p4)
writeRaster(p4, "data/predictionSDs/sd_ffsnolat_juelichcv.grd", overwrite = TRUE)
predSD = function(model, cov){
res = cov[[1]]
#p = raster::predict(cov, model)
cov = getValues(cov)
model = model$finalModel
print("predicting now")
psd = predict(model, cov, predict.all = TRUE, num.trees = 100)
print("apply 1")
p = apply(psd$predictions, 1, FUN = mean)
print("apply 2")
psd = apply(psd$predictions, 1, FUN = sd)
res = raster::setValues(x = res, values = round(psd / p, 2)*100)
return(res)
}
df = data.frame(allvars = getValues(p1),
ffs = getValues(p2),
nolat = getValues(p3),
ffsnolat = getValues(p4))
df2 = reshape2::melt(df, value.name = "SD")
ggplot(df2, aes(y = SD, x = variable))+
geom_boxplot()
|
/src/pred_sd.R
|
no_license
|
LOEK-RS/aqbench_ml
|
R
| false
| false
| 1,651
|
r
|
cov_stamp = raster::stack("~/development/aoa_disassembly/tests/testdata/aqb_stamp.grd")
cov_all = raster::stack("data/covariates/predictors.grd")
cov_stamp_v = getValues(cov_all)
m1 = readRDS("data/models/model_allvars_juelichcv.RDS")
p1 = predSD(m1, cov_all)
raster::plot(p1)
writeRaster(p1, "data/predictionSDs/sd_allvars_juelichcv.grd", overwrite = TRUE)
m2 = readRDS("data/models/model_ffs_juelichcv.RDS")
p2 = predSD(m2, cov_all)
raster::plot(p2)
writeRaster(p2, "data/predictionSDs/sd_ffs_juelichcv.grd", overwrite = TRUE)
m3 = readRDS("data/models/model_nolat_juelichcv.RDS")
p3 = predSD(m3, cov_all)
raster::plot(p3)
writeRaster(p3, "data/predictionSDs/sd_nolat_juelichcv.grd", overwrite = TRUE)
m4 = readRDS("data/models/model_ffsnolat_juelichcv.RDS")
p4 = predSD(m4, cov_all)
raster::plot(p4)
writeRaster(p4, "data/predictionSDs/sd_ffsnolat_juelichcv.grd", overwrite = TRUE)
predSD = function(model, cov){
res = cov[[1]]
#p = raster::predict(cov, model)
cov = getValues(cov)
model = model$finalModel
print("predicting now")
psd = predict(model, cov, predict.all = TRUE, num.trees = 100)
print("apply 1")
p = apply(psd$predictions, 1, FUN = mean)
print("apply 2")
psd = apply(psd$predictions, 1, FUN = sd)
res = raster::setValues(x = res, values = round(psd / p, 2)*100)
return(res)
}
df = data.frame(allvars = getValues(p1),
ffs = getValues(p2),
nolat = getValues(p3),
ffsnolat = getValues(p4))
df2 = reshape2::melt(df, value.name = "SD")
ggplot(df2, aes(y = SD, x = variable))+
geom_boxplot()
|
#' Epidemic Algorithm for detection of multivariate outliers in incomplete survey data
#'
#' In \code{EAdet} an epidemic is started at a center of the data. The epidemic
#' spreads out and infects neighbouring points (probabilistically or deterministically).
#' The last points infected are outliers. After running \code{EAdet} an imputation
#' with \code{EAimp} may be run.
#'
#' The form and parameters of the transmission function should be chosen such that the
#' infection times have at least a range of 10. The default cutting point to decide on
#' outliers is the median infection time plus three times the mad of infection times.
#' A better cutpoint may be chosen by visual inspection of the cdf of infection times.
#' \code{EAdet} calls the function \code{EA.dist}, which passes the counterprobabilities
#' of infection (a \eqn{n * (n - 1) / 2} size vector!) and three parameters (sample
#' spatial median index, maximal distance to nearest neighbor and transmission distance =
#' reach) as arguments to \code{EAdet}. The distances vector may be too large to be passed
#' as arguments. Then either the memory size must be increased. Former versions of the
#' code used a global variable to store the distances in order to save memory.
#'
#' @param data a data frame or matrix with data.
#' @param weights a vector of positive sampling weights.
#' @param reach if \code{reach = "max"} the maximal nearest neighbor distance is
#' used as the basis for the transmission function, otherwise the weighted
#' \eqn{(1 - (p + 1) / n)} quantile of the nearest neighbor distances is used.
#' @param transmission.function form of the transmission function of distance d:
#' \code{"step"} is a heaviside function which jumps to \code{1} at \code{d0},
#' \code{"linear"} is linear between \code{0} and \code{d0}, \code{"power"} is
#' \code{(beta*d+1)^(-p)} for \code{p = ncol(data)} and \code{beta <- as.single((0.01^(-1 / power) - 1) / d0))} as default, \code{"root"} is the
#' function \code{1-(1-d/d0)^(1/maxl)}.
#' @param power sets \code{p = power}.
#' @param distance.type distance type in function \code{dist()}.
#' @param maxl maximum number of steps without infection.
#' @param plotting if \code{TRUE}, the cdf of infection times is plotted.
#' @param monitor if \code{TRUE}, verbose output on epidemic.
#' @param prob.quantile if mads fail, take this quantile absolute deviation.
#' @param random.start if \code{TRUE}, take a starting point at random instead of the
#' spatial median.
#' @param fix.start force epidemic to start at a specific observation.
#' @param threshold infect all remaining points with infection probability above
#' the threshold \code{1-0.5^(1/maxl)}.
#' @param deterministic if \code{TRUE}, the number of infections is the expected
#' number and the infected observations are the ones with largest infection probabilities.
#' @param rm.missobs set \code{rm.missobs=TRUE} if completely missing observations
#' should be discarded. This has to be done actively as a safeguard to avoid mismatches
#' when imputing.
#' @param verbose more output with \code{verbose=TRUE}.
#' @return \code{EAdet} returns a list whose first component \code{output} is a sub-list
#' with the following components:
#' \describe{
#' \item{\code{sample.size}}{Number of observations}
#' \item{\code{discarded.observations}}{Indices of discarded observations}
#' \item{\code{missing.observations}}{Indices of completely missing observations}
#' \item{\code{number.of.variables}}{Number of variables}
#' \item{\code{n.complete.records}}{Number of records without missing values}
#' \item{\code{n.usable.records}}{Number of records with less than half of values
#' missing (unusable observations are discarded)}
#' \item{\code{medians}}{Component wise medians}
#' \item{\code{mads}}{Component wise mads}
#' \item{\code{prob.quantile}}{Use this quantile if mads fail, i.e. if one of the mads is 0}
#' \item{\code{quantile.deviations}}{Quantile of absolute deviations}
#' \item{\code{start}}{Starting observation}
#' \item{\code{transmission.function}}{Input parameter}
#' \item{\code{power}}{Input parameter}
#' \item{\code{maxl}}{Maximum number of steps without infection}
#' \item{\code{min.nn.dist}}{Maximal nearest neighbor distance}
#' \item{\code{transmission.distance}}{\code{d0}}
#' \item{\code{threshold}}{Input parameter}
#' \item{\code{distance.type}}{Input parameter}
#' \item{\code{deterministic}}{Input parameter}
#' \item{\code{number.infected}}{Number of infected observations}
#' \item{\code{cutpoint}}{Cutpoint of infection times for outlier definition}
#' \item{\code{number.outliers}}{Number of outliers}
#' \item{\code{outliers}}{Indices of outliers}
#' \item{\code{duration}}{Duration of epidemic}
#' \item{\code{computation.time}}{Elapsed computation time}
#' \item{\code{initialisation.computation.time}}{Elapsed computation time for
#' standardisation and calculation of distance matrix}
#' }
#' The further components returned by \code{EAdet} are:
#' \describe{
#' \item{\code{infected}}{Indicator of infection}
#' \item{\code{infection.time}}{Time of infection}
#' \item{\code{outind}}{Indicator of outliers}
#' }
#' @author Beat Hulliger
#' @references Béguin, C. and Hulliger, B. (2004) Multivariate outlier detection in
#' incomplete survey data: the epidemic algorithm and transformed rank correlations,
#' JRSS-A, 167, Part 2, pp. 275-294.
#' @seealso \code{\link{EAimp}} for imputation with the Epidemic Algorithm.
#' @examples
#' data(bushfirem, bushfire.weights)
#' det.res <- EAdet(bushfirem, bushfire.weights)
#' @export
#' @importFrom stats rbinom
#' @importFrom graphics plot abline
EAdet <- function(data, weights, reach = "max", transmission.function = "root",
power = ncol(data), distance.type = "euclidean", maxl = 5,
plotting = TRUE, monitor = FALSE, prob.quantile = 0.9,
random.start = FALSE, fix.start, threshold = FALSE,
deterministic = TRUE, rm.missobs = FALSE, verbose = FALSE) {
# ------- preparation -------
# transform data to matrix
if (!is.matrix(data)) {data <- data.matrix(data)}
# number of rows
n <- nrow(data)
# number of columns
p <- ncol(data)
# set weights to 1 if missing
if (missing(weights)) {weights <- rep(1, n)}
# finding the unit(s) with all items missing
new.indices <- which(apply(is.na(data), 1, prod) == 0)
miss.indices <- apply(is.na(data), 1, prod) == 1
missobs <- which(miss.indices)
discarded <- NA
nfull <- n
# removing the unit(s) with all items missing
if ((length(new.indices) < n) & rm.missobs) {
discarded <- missobs
cat("Warning: missing observations", discarded, "removed from the data\n")
data <- data[new.indices, ]
weights <- weights[new.indices]
n <- nrow(data)
}
# find complete and usable (valid information for at least half of var.) records
complete.records <- apply(!is.na(data), 1, prod)
usable.records <- apply(!is.na(data), 1, sum) >= p / 2
# print progress to console
if (verbose) {
cat("\n Dimensions (n,p):", n, p)
cat("\n Number of complete records ", sum(complete.records))
cat("\n Number of records with maximum p/2 variables missing ",
sum(usable.records), "\n")
}
# transform parameter power to double
power <- as.single(power)
# standardization of weights to sum to sample size
np <- sum(weights)
weights <- as.single((n * weights) / np)
# start computation time
calc.time <- proc.time()[1]
# ------- calibraton and setup -------
# compute medians
medians <- apply(data, 2, weighted.quantile, w = weights, prob = 0.5)
# sweep median from data
data <- sweep(data, 2, medians, "-")
# compute median absolute deviations
mads <- apply(abs(data), 2, weighted.quantile, w = weights, prob = 0.5)
# compute quantile absolute deviations
qads <- apply(abs(data), 2, weighted.quantile, w = weights, prob = prob.quantile)
# standardization
if(sum(mads == 0) > 0) {
# output to console if some mads are 0
cat("\n Some mads are 0. Standardizing with ", prob.quantile,
" quantile absolute deviations!")
if(sum(qads == 0) > 0) {
# if some qads are 0, no standardization
cat("\n Some quantile absolute deviations are 0. No standardization!")
} else {
# if no qads are 0, standardize with qads
data <- sweep(data, 2, qads, "/")
}
} else {
# if no mads are 0, standardize with mads
data <- sweep(data, 2, mads, "/")
}
# calculation of distances
EA.dist.res <- EA.dist(data, n = n, p = p, weights = weights, reach = reach,
transmission.function = transmission.function,
power = power, distance.type = distance.type,
maxl = maxl)
# print progress to console
if (monitor) {cat("\n\n Distances finished")}
# print progress to console
# The distances calculated by EA.dist are the
# counterprobabilities in single precision.
if (monitor) {
cat("\n Index of sample spatial median is ", EA.dist.res$sample.spatial.median.index)
cat("\n Maximal distance to nearest neighbor is ", EA.dist.res$max.min.di)
cat("\n Transmission distance is ", EA.dist.res$transmission.distance, "\n")
}
# ------- initialisation -------
# print progress to console
if (verbose) {cat("\n\n Initialisation of epidemic")}
# initialization time
comp.time.init <- proc.time()[1] - calc.time
# print initialization time to console
if(monitor) {cat("\n Initialisation time is ", comp.time.init)}
# define starting point of infection
if(random.start) {
# random starting point
start.point <- sample(1:n, 1, prob = weights)
} else {
if(!missing(fix.start)) {
# if fix.start is defined, then this is the starting point
start.point <- fix.start
} else {
# else start with sample spatial median index
start.point <- EA.dist.res$sample.spatial.median.index
}
}
# set time to 1
time <- 1
# initialize infected vector with FALSE
infected <- rep(FALSE, n)
# set starting point of inection to TRUE
infected[c(start.point)] <- TRUE
# initialize various things
new.infected <- infected
n.infected <- sum(infected)
hprod <- rep(1, n)
infection.time <- rep(0, n)
infection.time[c(start.point)] <- time
# ------- main loop -------
repeat {
# print progress to console
if (monitor) {cat("\n time = ", time, " , infected = ", n.infected)}
time <- time + 1
old.infected <- infected
if(sum(new.infected) > 1) {
hprod[!infected] <-
hprod[!infected] * apply(sweep(
sweep(matrix(EA.dist.res$counterprobs[apply(
as.matrix(which(!infected)), 1, ind.dijs,
js = which(new.infected), n = n)], sum(new.infected),
n - n.infected), 1, weights[new.infected], "^"), 2,
weights[!infected], "^"), 2, prod)
} else {
if(sum(new.infected) == 1) {
hprod[!infected] <-
hprod[!infected] * EA.dist.res$counterprobs[apply(
as.matrix(which(!infected)), 1, ind.dijs,
js = which(new.infected), n = n)] ^ (weights[new.infected] *
weights[!infected])
}
}
if (deterministic) {
n.to.infect <- sum(1 - hprod[!infected]) # HRK: expected number of infections
# do maxl trials for very small inf. prob.
if (n.to.infect < 0.5) {
n.to.infect <- sum(1 - hprod[!infected] ^ maxl)
}
n.to.infect <- round(n.to.infect)
infected[!infected] <-
rank(1 - hprod[!infected]) >= n - n.infected - n.to.infect
} else {
if (threshold) {
infected[!infected] <- hprod[!infected] <= 0.5 ^ (1 / maxl)
} else {
infected[!infected] <-
as.logical(rbinom(n - n.infected, 1, 1 - hprod[!infected]))
}
}
new.infected <- infected & (!old.infected)
n.infected <- sum(infected)
infection.time[new.infected] <- time
# if all are infected, stop loop
if(n.infected == n) {break}
# if max. infection steps is reached, stop loop
if((time - max(infection.time)) > maxl) {break}
# start next iteration of loop
next
}
# duration of infection
duration <- max(infection.time)
# stop computation time
calc.time <- round(proc.time()[1] - calc.time, 5)
# default cutpoint
med.infection.time <- weighted.quantile(infection.time, weights, 0.5)
mad.infection.time <-
weighted.quantile(abs(infection.time - med.infection.time), weights, 0.5)
# print progress to console
if (verbose) {
cat("\n med and mad of infection times: ", med.infection.time,
" and ", mad.infection.time)
}
if (mad.infection.time == 0) {
mad.infection.time <- med.infection.time
}
cutpoint <- min(med.infection.time + 3 * mad.infection.time, duration)
# print progress to console
if (verbose) {cat("\n Proposed cutpoint is ", min(cutpoint, duration))}
# blowing up to full length
infectedn <- logical(n)
infectedn[infected] <- TRUE
infectednfull <- logical(nfull)
if (nfull > n) {
infectednfull[new.indices] <- infectedn
} else {
infectednfull <- infectedn
}
# initialize empty vector for infection times
inf.time <- rep(NA, nfull)
# get infection times
if (nfull > n) {
inf.time[new.indices] <- infection.time
} else {
inf.time <- infection.time
}
# set infection time of completely missing to NA
inf.time[!infectednfull] <- NA
# outliers full sample
outlier <- (inf.time >= cutpoint)
# get indices of outliers
outlier.ind <- which(outlier)
# ------- plotting -------
# not infected are set to high inf.time to show better
# the healthy on a graph of infection times
if(plotting) plotIT(inf.time, weights, cutpoint)
# ------- results -------
# output to console
message(paste0("EA detection has finished with ", n.infected,
" infected points in ", round(calc.time[1], 2), " seconds."))
# return output
return(
structure(
list(
sample.size = n,
discarded.observations = discarded,
missing.observations = missobs,
number.of.variables = p,
n.complete.records = sum(complete.records),
n.usable.records = sum(usable.records),
medians = medians,
mads = mads,
prob.quantile = prob.quantile,
quantile.deviations = qads,
start = start.point,
transmission.function = transmission.function,
power = power,
maxl = maxl,
max.min.di = EA.dist.res$max.min.di,
transmission.distance = EA.dist.res$transmission.distance,
threshold = threshold,
distance.type = distance.type,
deterministic = deterministic,
number.infected = n.infected,
cutpoint = cutpoint,
number.outliers = sum(outlier),
outliers = outlier.ind,
duration = duration,
computation.time = calc.time,
initialisation.computation.time = comp.time.init,
infected = infectednfull,
infection.time = inf.time,
outind = outlier), class = "EAdet.r"))
}
|
/R/EAdet.R
|
no_license
|
cran/modi
|
R
| false
| false
| 15,483
|
r
|
#' Epidemic Algorithm for detection of multivariate outliers in incomplete survey data
#'
#' In \code{EAdet} an epidemic is started at a center of the data. The epidemic
#' spreads out and infects neighbouring points (probabilistically or deterministically).
#' The last points infected are outliers. After running \code{EAdet} an imputation
#' with \code{EAimp} may be run.
#'
#' The form and parameters of the transmission function should be chosen such that the
#' infection times have at least a range of 10. The default cutting point to decide on
#' outliers is the median infection time plus three times the mad of infection times.
#' A better cutpoint may be chosen by visual inspection of the cdf of infection times.
#' \code{EAdet} calls the function \code{EA.dist}, which passes the counterprobabilities
#' of infection (a \eqn{n * (n - 1) / 2} size vector!) and three parameters (sample
#' spatial median index, maximal distance to nearest neighbor and transmission distance =
#' reach) as arguments to \code{EAdet}. The distances vector may be too large to be passed
#' as arguments. Then either the memory size must be increased. Former versions of the
#' code used a global variable to store the distances in order to save memory.
#'
#' @param data a data frame or matrix with data.
#' @param weights a vector of positive sampling weights.
#' @param reach if \code{reach = "max"} the maximal nearest neighbor distance is
#' used as the basis for the transmission function, otherwise the weighted
#' \eqn{(1 - (p + 1) / n)} quantile of the nearest neighbor distances is used.
#' @param transmission.function form of the transmission function of distance d:
#' \code{"step"} is a heaviside function which jumps to \code{1} at \code{d0},
#' \code{"linear"} is linear between \code{0} and \code{d0}, \code{"power"} is
#' \code{(beta*d+1)^(-p)} for \code{p = ncol(data)} and \code{beta <- as.single((0.01^(-1 / power) - 1) / d0))} as default, \code{"root"} is the
#' function \code{1-(1-d/d0)^(1/maxl)}.
#' @param power sets \code{p = power}.
#' @param distance.type distance type in function \code{dist()}.
#' @param maxl maximum number of steps without infection.
#' @param plotting if \code{TRUE}, the cdf of infection times is plotted.
#' @param monitor if \code{TRUE}, verbose output on epidemic.
#' @param prob.quantile if mads fail, take this quantile absolute deviation.
#' @param random.start if \code{TRUE}, take a starting point at random instead of the
#' spatial median.
#' @param fix.start force epidemic to start at a specific observation.
#' @param threshold infect all remaining points with infection probability above
#' the threshold \code{1-0.5^(1/maxl)}.
#' @param deterministic if \code{TRUE}, the number of infections is the expected
#' number and the infected observations are the ones with largest infection probabilities.
#' @param rm.missobs set \code{rm.missobs=TRUE} if completely missing observations
#' should be discarded. This has to be done actively as a safeguard to avoid mismatches
#' when imputing.
#' @param verbose more output with \code{verbose=TRUE}.
#' @return \code{EAdet} returns a list whose first component \code{output} is a sub-list
#' with the following components:
#' \describe{
#' \item{\code{sample.size}}{Number of observations}
#' \item{\code{discarded.observations}}{Indices of discarded observations}
#' \item{\code{missing.observations}}{Indices of completely missing observations}
#' \item{\code{number.of.variables}}{Number of variables}
#' \item{\code{n.complete.records}}{Number of records without missing values}
#' \item{\code{n.usable.records}}{Number of records with less than half of values
#' missing (unusable observations are discarded)}
#' \item{\code{medians}}{Component wise medians}
#' \item{\code{mads}}{Component wise mads}
#' \item{\code{prob.quantile}}{Use this quantile if mads fail, i.e. if one of the mads is 0}
#' \item{\code{quantile.deviations}}{Quantile of absolute deviations}
#' \item{\code{start}}{Starting observation}
#' \item{\code{transmission.function}}{Input parameter}
#' \item{\code{power}}{Input parameter}
#' \item{\code{maxl}}{Maximum number of steps without infection}
#' \item{\code{min.nn.dist}}{Maximal nearest neighbor distance}
#' \item{\code{transmission.distance}}{\code{d0}}
#' \item{\code{threshold}}{Input parameter}
#' \item{\code{distance.type}}{Input parameter}
#' \item{\code{deterministic}}{Input parameter}
#' \item{\code{number.infected}}{Number of infected observations}
#' \item{\code{cutpoint}}{Cutpoint of infection times for outlier definition}
#' \item{\code{number.outliers}}{Number of outliers}
#' \item{\code{outliers}}{Indices of outliers}
#' \item{\code{duration}}{Duration of epidemic}
#' \item{\code{computation.time}}{Elapsed computation time}
#' \item{\code{initialisation.computation.time}}{Elapsed computation time for
#' standardisation and calculation of distance matrix}
#' }
#' The further components returned by \code{EAdet} are:
#' \describe{
#' \item{\code{infected}}{Indicator of infection}
#' \item{\code{infection.time}}{Time of infection}
#' \item{\code{outind}}{Indicator of outliers}
#' }
#' @author Beat Hulliger
#' @references Béguin, C. and Hulliger, B. (2004) Multivariate outlier detection in
#' incomplete survey data: the epidemic algorithm and transformed rank correlations,
#' JRSS-A, 167, Part 2, pp. 275-294.
#' @seealso \code{\link{EAimp}} for imputation with the Epidemic Algorithm.
#' @examples
#' data(bushfirem, bushfire.weights)
#' det.res <- EAdet(bushfirem, bushfire.weights)
#' @export
#' @importFrom stats rbinom
#' @importFrom graphics plot abline
EAdet <- function(data, weights, reach = "max", transmission.function = "root",
power = ncol(data), distance.type = "euclidean", maxl = 5,
plotting = TRUE, monitor = FALSE, prob.quantile = 0.9,
random.start = FALSE, fix.start, threshold = FALSE,
deterministic = TRUE, rm.missobs = FALSE, verbose = FALSE) {
# ------- preparation -------
# transform data to matrix
if (!is.matrix(data)) {data <- data.matrix(data)}
# number of rows
n <- nrow(data)
# number of columns
p <- ncol(data)
# set weights to 1 if missing
if (missing(weights)) {weights <- rep(1, n)}
# finding the unit(s) with all items missing
new.indices <- which(apply(is.na(data), 1, prod) == 0)
miss.indices <- apply(is.na(data), 1, prod) == 1
missobs <- which(miss.indices)
discarded <- NA
nfull <- n
# removing the unit(s) with all items missing
if ((length(new.indices) < n) & rm.missobs) {
discarded <- missobs
cat("Warning: missing observations", discarded, "removed from the data\n")
data <- data[new.indices, ]
weights <- weights[new.indices]
n <- nrow(data)
}
# find complete and usable (valid information for at least half of var.) records
complete.records <- apply(!is.na(data), 1, prod)
usable.records <- apply(!is.na(data), 1, sum) >= p / 2
# print progress to console
if (verbose) {
cat("\n Dimensions (n,p):", n, p)
cat("\n Number of complete records ", sum(complete.records))
cat("\n Number of records with maximum p/2 variables missing ",
sum(usable.records), "\n")
}
# transform parameter power to double
power <- as.single(power)
# standardization of weights to sum to sample size
np <- sum(weights)
weights <- as.single((n * weights) / np)
# start computation time
calc.time <- proc.time()[1]
# ------- calibraton and setup -------
# compute medians
medians <- apply(data, 2, weighted.quantile, w = weights, prob = 0.5)
# sweep median from data
data <- sweep(data, 2, medians, "-")
# compute median absolute deviations
mads <- apply(abs(data), 2, weighted.quantile, w = weights, prob = 0.5)
# compute quantile absolute deviations
qads <- apply(abs(data), 2, weighted.quantile, w = weights, prob = prob.quantile)
# standardization
if(sum(mads == 0) > 0) {
# output to console if some mads are 0
cat("\n Some mads are 0. Standardizing with ", prob.quantile,
" quantile absolute deviations!")
if(sum(qads == 0) > 0) {
# if some qads are 0, no standardization
cat("\n Some quantile absolute deviations are 0. No standardization!")
} else {
# if no qads are 0, standardize with qads
data <- sweep(data, 2, qads, "/")
}
} else {
# if no mads are 0, standardize with mads
data <- sweep(data, 2, mads, "/")
}
# calculation of distances
EA.dist.res <- EA.dist(data, n = n, p = p, weights = weights, reach = reach,
transmission.function = transmission.function,
power = power, distance.type = distance.type,
maxl = maxl)
# print progress to console
if (monitor) {cat("\n\n Distances finished")}
# print progress to console
# The distances calculated by EA.dist are the
# counterprobabilities in single precision.
if (monitor) {
cat("\n Index of sample spatial median is ", EA.dist.res$sample.spatial.median.index)
cat("\n Maximal distance to nearest neighbor is ", EA.dist.res$max.min.di)
cat("\n Transmission distance is ", EA.dist.res$transmission.distance, "\n")
}
# ------- initialisation -------
# print progress to console
if (verbose) {cat("\n\n Initialisation of epidemic")}
# initialization time
comp.time.init <- proc.time()[1] - calc.time
# print initialization time to console
if(monitor) {cat("\n Initialisation time is ", comp.time.init)}
# define starting point of infection
if(random.start) {
# random starting point
start.point <- sample(1:n, 1, prob = weights)
} else {
if(!missing(fix.start)) {
# if fix.start is defined, then this is the starting point
start.point <- fix.start
} else {
# else start with sample spatial median index
start.point <- EA.dist.res$sample.spatial.median.index
}
}
# set time to 1
time <- 1
# initialize infected vector with FALSE
infected <- rep(FALSE, n)
# set starting point of inection to TRUE
infected[c(start.point)] <- TRUE
# initialize various things
new.infected <- infected
n.infected <- sum(infected)
hprod <- rep(1, n)
infection.time <- rep(0, n)
infection.time[c(start.point)] <- time
# ------- main loop -------
repeat {
# print progress to console
if (monitor) {cat("\n time = ", time, " , infected = ", n.infected)}
time <- time + 1
old.infected <- infected
if(sum(new.infected) > 1) {
hprod[!infected] <-
hprod[!infected] * apply(sweep(
sweep(matrix(EA.dist.res$counterprobs[apply(
as.matrix(which(!infected)), 1, ind.dijs,
js = which(new.infected), n = n)], sum(new.infected),
n - n.infected), 1, weights[new.infected], "^"), 2,
weights[!infected], "^"), 2, prod)
} else {
if(sum(new.infected) == 1) {
hprod[!infected] <-
hprod[!infected] * EA.dist.res$counterprobs[apply(
as.matrix(which(!infected)), 1, ind.dijs,
js = which(new.infected), n = n)] ^ (weights[new.infected] *
weights[!infected])
}
}
if (deterministic) {
n.to.infect <- sum(1 - hprod[!infected]) # HRK: expected number of infections
# do maxl trials for very small inf. prob.
if (n.to.infect < 0.5) {
n.to.infect <- sum(1 - hprod[!infected] ^ maxl)
}
n.to.infect <- round(n.to.infect)
infected[!infected] <-
rank(1 - hprod[!infected]) >= n - n.infected - n.to.infect
} else {
if (threshold) {
infected[!infected] <- hprod[!infected] <= 0.5 ^ (1 / maxl)
} else {
infected[!infected] <-
as.logical(rbinom(n - n.infected, 1, 1 - hprod[!infected]))
}
}
new.infected <- infected & (!old.infected)
n.infected <- sum(infected)
infection.time[new.infected] <- time
# if all are infected, stop loop
if(n.infected == n) {break}
# if max. infection steps is reached, stop loop
if((time - max(infection.time)) > maxl) {break}
# start next iteration of loop
next
}
# duration of infection
duration <- max(infection.time)
# stop computation time
calc.time <- round(proc.time()[1] - calc.time, 5)
# default cutpoint
med.infection.time <- weighted.quantile(infection.time, weights, 0.5)
mad.infection.time <-
weighted.quantile(abs(infection.time - med.infection.time), weights, 0.5)
# print progress to console
if (verbose) {
cat("\n med and mad of infection times: ", med.infection.time,
" and ", mad.infection.time)
}
if (mad.infection.time == 0) {
mad.infection.time <- med.infection.time
}
cutpoint <- min(med.infection.time + 3 * mad.infection.time, duration)
# print progress to console
if (verbose) {cat("\n Proposed cutpoint is ", min(cutpoint, duration))}
# blowing up to full length
infectedn <- logical(n)
infectedn[infected] <- TRUE
infectednfull <- logical(nfull)
if (nfull > n) {
infectednfull[new.indices] <- infectedn
} else {
infectednfull <- infectedn
}
# initialize empty vector for infection times
inf.time <- rep(NA, nfull)
# get infection times
if (nfull > n) {
inf.time[new.indices] <- infection.time
} else {
inf.time <- infection.time
}
# set infection time of completely missing to NA
inf.time[!infectednfull] <- NA
# outliers full sample
outlier <- (inf.time >= cutpoint)
# get indices of outliers
outlier.ind <- which(outlier)
# ------- plotting -------
# not infected are set to high inf.time to show better
# the healthy on a graph of infection times
if(plotting) plotIT(inf.time, weights, cutpoint)
# ------- results -------
# output to console
message(paste0("EA detection has finished with ", n.infected,
" infected points in ", round(calc.time[1], 2), " seconds."))
# return output
return(
structure(
list(
sample.size = n,
discarded.observations = discarded,
missing.observations = missobs,
number.of.variables = p,
n.complete.records = sum(complete.records),
n.usable.records = sum(usable.records),
medians = medians,
mads = mads,
prob.quantile = prob.quantile,
quantile.deviations = qads,
start = start.point,
transmission.function = transmission.function,
power = power,
maxl = maxl,
max.min.di = EA.dist.res$max.min.di,
transmission.distance = EA.dist.res$transmission.distance,
threshold = threshold,
distance.type = distance.type,
deterministic = deterministic,
number.infected = n.infected,
cutpoint = cutpoint,
number.outliers = sum(outlier),
outliers = outlier.ind,
duration = duration,
computation.time = calc.time,
initialisation.computation.time = comp.time.init,
infected = infectednfull,
infection.time = inf.time,
outind = outlier), class = "EAdet.r"))
}
|
#!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
library(rmarkdown)
render(args[1], md_document(variant = 'gfm', preserve_yaml=TRUE))
|
/scripts/processRmds.R
|
no_license
|
bartek-blog/bartek-blog.github.io
|
R
| false
| false
| 147
|
r
|
#!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
library(rmarkdown)
render(args[1], md_document(variant = 'gfm', preserve_yaml=TRUE))
|
revenue.dea <- function(base = NULL, frontier = NULL,
noutput = 1, output.price = NULL) {
## output.price: c(p1', p2', ..., ps')
if(is.null(frontier))
frontier <- base
if(!is.null(base) & !is.null(frontier)){
base <- as.matrix(base)
frontier <- as.matrix(frontier)
}
if(ncol(base) != ncol(frontier))
stop("Number of columns in base matrix and frontier matrix should be the same!")
if(!is.vector(output.price))
stop("Fixed output price (vector) must be provided by user!!")
s <- noutput
m <- ncol(base) - s
n <- nrow(base)
nf <- nrow(frontier)
front.Y <- t(frontier[, 1:s])
front.X <- t(frontier[, (s+1):(s+m)])
base.Y <- t(base[, 1:s])
base.X <- t(base[, (s+1):(s+m)])
ps <- output.price
each.revenue <- base.Y * output.price
total.revenue <- apply(each.revenue, 2, sum)
re <- data.frame(matrix(0, nrow = n, ncol = s + 6))
names(re) <- c(paste("y.star", 1:s, sep = ""),
"OE.revenue", "TE.revenue", "Revealed.revenue", "OE",
"AE", "TE")
thetas <- dea(base = base, frontier = frontier, noutput = noutput,
orientation = 2, rts = 1)[, 1]
for(i in 1:n){
f.obj <- c(ps, rep(0, nf))
f.dir <- c(rep(">=", s), rep("<=", m))
f.rhs <- c(rep(0, s), base.X[,i])
f.con1 <- cbind(-diag(s), front.Y)
f.con2 <- cbind(matrix(0, m, s), front.X)
f.con <- rbind(f.con1, f.con2)
re.tmp <- lp("max", f.obj, f.con, f.dir, f.rhs)
max.revenue<- re.tmp$objval
y.star <- re.tmp$solution[1:s]
re[i, 1:s] <- y.star
re[i, s+1] <- max.revenue
re[i, s+2] <- thetas[i] * sum(base.Y[,i]*ps) # technical efficiency
re[i, s+3] <- sum(base.Y[,i]*ps)
re[i, s+4] <- re[i, s+3]/re[i, s+1]
re[i, s+5] <- re[i, s+2]/re[i, s+1]
re[i, s+6] <- 1/thetas[i]
}
return(re)
}
|
/R/revenue_dea.r
|
no_license
|
cran/nonparaeff
|
R
| false
| false
| 1,852
|
r
|
revenue.dea <- function(base = NULL, frontier = NULL,
noutput = 1, output.price = NULL) {
## output.price: c(p1', p2', ..., ps')
if(is.null(frontier))
frontier <- base
if(!is.null(base) & !is.null(frontier)){
base <- as.matrix(base)
frontier <- as.matrix(frontier)
}
if(ncol(base) != ncol(frontier))
stop("Number of columns in base matrix and frontier matrix should be the same!")
if(!is.vector(output.price))
stop("Fixed output price (vector) must be provided by user!!")
s <- noutput
m <- ncol(base) - s
n <- nrow(base)
nf <- nrow(frontier)
front.Y <- t(frontier[, 1:s])
front.X <- t(frontier[, (s+1):(s+m)])
base.Y <- t(base[, 1:s])
base.X <- t(base[, (s+1):(s+m)])
ps <- output.price
each.revenue <- base.Y * output.price
total.revenue <- apply(each.revenue, 2, sum)
re <- data.frame(matrix(0, nrow = n, ncol = s + 6))
names(re) <- c(paste("y.star", 1:s, sep = ""),
"OE.revenue", "TE.revenue", "Revealed.revenue", "OE",
"AE", "TE")
thetas <- dea(base = base, frontier = frontier, noutput = noutput,
orientation = 2, rts = 1)[, 1]
for(i in 1:n){
f.obj <- c(ps, rep(0, nf))
f.dir <- c(rep(">=", s), rep("<=", m))
f.rhs <- c(rep(0, s), base.X[,i])
f.con1 <- cbind(-diag(s), front.Y)
f.con2 <- cbind(matrix(0, m, s), front.X)
f.con <- rbind(f.con1, f.con2)
re.tmp <- lp("max", f.obj, f.con, f.dir, f.rhs)
max.revenue<- re.tmp$objval
y.star <- re.tmp$solution[1:s]
re[i, 1:s] <- y.star
re[i, s+1] <- max.revenue
re[i, s+2] <- thetas[i] * sum(base.Y[,i]*ps) # technical efficiency
re[i, s+3] <- sum(base.Y[,i]*ps)
re[i, s+4] <- re[i, s+3]/re[i, s+1]
re[i, s+5] <- re[i, s+2]/re[i, s+1]
re[i, s+6] <- 1/thetas[i]
}
return(re)
}
|
#' Install / Uninstall GluonTS
#'
#' @description
#' `install_gluonts()`: Installs `GluonTS` Probabilisitic Deep Learning Time Series Forecasting Software
#' using `reticulate::py_install()`.
#'
#' - A `Python` Environment will be created named `r-gluonts`.
#' - When loaded with `library(modeltime.gluonts)`, the `modeltime.gluonts` R package
#' will connect to the `r-gluonts` Python environment by default. See "Details" for
#' connecting to custom python environments.
#' - If `fresh_install`, will remove any prior installations of the "r-gluonts" python environment
#' - If `include_pytorch`, will install additional dependencies needed for the optional
#' pytorch backend that is available in some algorithms.
#'
#' `uninstall_gluonts()`: Will remove the "r-gluonts" python environment and python packages
#'
#' @param include_pytorch If `TRUE`, will install `torch`. Needed for Torch implementation
#' of `deep_ar()`. Default: `FALSE`.
#' @param fresh_install If `TRUE`, will remove prior installations of the `r-glounts`
#' conda environment to setup for a fresh installation. This can be useful if
#' errors appear during upgrades. Default: `FALSE`.
#'
#' @details
#'
#' __Options for Connecting to Python__
#'
#' - __Recommended__ _Use Pre-Configured Python Environment:_ Use `install_gluonts()` to
#' install GluonTS Python Libraries into a conda environment named 'r-gluonts'.
#' - __Advanced__ _Use a Custom Python Environment:_ Before running `library(modeltime.gluonts)`,
#' use `Sys.setenv(GLUONTS_PYTHON = 'path/to/python')` to set the path of your
#' python executable in an environment that has 'gluonts', 'mxnet', 'numpy', 'pandas',
#' and 'pathlib' available as dependencies.
#'
#' __Package Manager Support (Python Environment)__
#'
#' - __Conda Environments:__ Currently, `install_gluonts()` supports Conda and Miniconda Environments.
#'
#' - __Virtual Environments:__ are not currently supported with the default installation method, `install_gluonts()`.
#' However, you can connect to virtual environment that you have created using
#' `Sys.setenv(GLUONTS_PYTHON = 'path/to/python')` prior to running `library(modeltime.ensemble)`.
#'
#' @examples
#' \dontrun{
#' install_gluonts()
#' }
#'
#'
#' @export
install_gluonts <- function(
fresh_install = FALSE,
include_pytorch = FALSE
) {
# Check for Anaconda
if (!check_conda()) {
return()
}
# REMOVE PREVIOUS ENV
if (fresh_install) {
cli::cli_alert_info("Removing conda env `r-gluonts` to setup for fresh install...")
reticulate::conda_remove("r-gluonts")
}
# PYTHON SPEC
python_version <- "3.7.1"
# GLUONTS INSTALLATION
message("\n")
cli::cli_alert_info("Installing gluonts dependencies...")
message("\n")
default_pkgs <- c(
"mxnet~=1.7",
"gluonts==0.8.0",
"numpy",
"pandas==1.0.5",
"pathlib==1.0.1",
"ujson==4.0.2",
"brotlipy"
)
reticulate::py_install(
packages = default_pkgs,
envname = "r-gluonts",
method = "conda",
conda = "auto",
python_version = python_version,
pip = TRUE
)
# TORCH INSTALLATION
if (include_pytorch) {
message("\n")
cli::cli_alert_info("Installing torch dependencies...")
message("\n")
torch_pkgs <- c(
"torch~=1.6",
"pytorch-lightning~=1.1"
)
reticulate::py_install(
packages = torch_pkgs,
envname = "r-gluonts",
method = "conda",
conda = "auto",
python_version = "3.7.1",
pip = TRUE
)
}
# PROCESS CHECKS
env_exists <- !is.null(detect_default_gluonts_env())
gluonts_failure <- FALSE
message("\n")
if (env_exists) {
cli::cli_alert_success("The {.field r-gluonts} conda environment has been created.")
} else {
cli::cli_alert_danger("The {.field r-gluonts} conda environment could not be created.")
gluonts_failure <- TRUE
}
# default_pkgs_exist <- check_gluonts_dependencies()
# if (default_pkgs_exist) {
# cli::cli_alert_success("Installing gluonts dependencies... ...Success.")
# } else {
# cli::cli_alert_danger("Installing gluonts dependencies... ...Failed. One or more of the following packages are not available: gluonts, mxnet, numpy, pandas, pathlib")
# gluonts_failure <- TRUE
# }
#
# pytorch_failure <- FALSE
# if (include_pytorch) {
# pytorch_exists <- check_pytorch_dependencies()
#
# if (pytorch_exists) {
# cli::cli_alert_success("Installing torch dependencies... ...Success.")
# } else {
# cli::cli_alert_danger("Installing torch dependencies... ...Failed. One or more of the following packages are not available: torch, pytorch_lightning")
# pytorch_failure <- TRUE
# }
#
#
# }
if (env_exists) {
cli::cli_alert_info("Please restart your R Session and run {.code library(modeltime.gluonts)} to activate the {.field r-gluonts} environment.")
} else {
cli::cli_alert_info("For installation failure reports, please copy the python error message(s). Search your error(s) using Google. If none are found, create new issues here: https://github.com/business-science/modeltime.gluonts/issues")
}
}
#' @export
#' @rdname install_gluonts
uninstall_gluonts <- function() {
cli::cli_alert_info("Removing conda env `r-gluonts`...")
reticulate::conda_remove("r-gluonts")
message("\n")
cli::cli_alert_success("The `r-gluonts` env has been removed.")
}
check_conda <- function() {
conda_list_nrow <- nrow(reticulate::conda_list())
if (is.null(conda_list_nrow) || conda_list_nrow == 0L) {
# No conda
message("Could not detect Conda or Miniconda Package Managers, one of which is required for 'install_gluonts()'. \nAvailable options:\n",
" - [Preferred] You can install Miniconda (light-weight) using 'reticulate::install_miniconda()'. \n",
" - Or, you can install the full Aniconda distribution (1000+ packages) using 'reticulate::conda_install()'. \n\n",
"Then use 'install_gluonts()' to set up the GluonTS python environment.")
conda_found <- FALSE
} else {
conda_found <- TRUE
}
return(conda_found)
}
|
/R/core-install.R
|
permissive
|
StatMixedML/modeltime.gluonts
|
R
| false
| false
| 6,548
|
r
|
#' Install / Uninstall GluonTS
#'
#' @description
#' `install_gluonts()`: Installs `GluonTS` Probabilisitic Deep Learning Time Series Forecasting Software
#' using `reticulate::py_install()`.
#'
#' - A `Python` Environment will be created named `r-gluonts`.
#' - When loaded with `library(modeltime.gluonts)`, the `modeltime.gluonts` R package
#' will connect to the `r-gluonts` Python environment by default. See "Details" for
#' connecting to custom python environments.
#' - If `fresh_install`, will remove any prior installations of the "r-gluonts" python environment
#' - If `include_pytorch`, will install additional dependencies needed for the optional
#' pytorch backend that is available in some algorithms.
#'
#' `uninstall_gluonts()`: Will remove the "r-gluonts" python environment and python packages
#'
#' @param include_pytorch If `TRUE`, will install `torch`. Needed for Torch implementation
#' of `deep_ar()`. Default: `FALSE`.
#' @param fresh_install If `TRUE`, will remove prior installations of the `r-glounts`
#' conda environment to setup for a fresh installation. This can be useful if
#' errors appear during upgrades. Default: `FALSE`.
#'
#' @details
#'
#' __Options for Connecting to Python__
#'
#' - __Recommended__ _Use Pre-Configured Python Environment:_ Use `install_gluonts()` to
#' install GluonTS Python Libraries into a conda environment named 'r-gluonts'.
#' - __Advanced__ _Use a Custom Python Environment:_ Before running `library(modeltime.gluonts)`,
#' use `Sys.setenv(GLUONTS_PYTHON = 'path/to/python')` to set the path of your
#' python executable in an environment that has 'gluonts', 'mxnet', 'numpy', 'pandas',
#' and 'pathlib' available as dependencies.
#'
#' __Package Manager Support (Python Environment)__
#'
#' - __Conda Environments:__ Currently, `install_gluonts()` supports Conda and Miniconda Environments.
#'
#' - __Virtual Environments:__ are not currently supported with the default installation method, `install_gluonts()`.
#' However, you can connect to virtual environment that you have created using
#' `Sys.setenv(GLUONTS_PYTHON = 'path/to/python')` prior to running `library(modeltime.ensemble)`.
#'
#' @examples
#' \dontrun{
#' install_gluonts()
#' }
#'
#'
#' @export
install_gluonts <- function(
fresh_install = FALSE,
include_pytorch = FALSE
) {
# Check for Anaconda
if (!check_conda()) {
return()
}
# REMOVE PREVIOUS ENV
if (fresh_install) {
cli::cli_alert_info("Removing conda env `r-gluonts` to setup for fresh install...")
reticulate::conda_remove("r-gluonts")
}
# PYTHON SPEC
python_version <- "3.7.1"
# GLUONTS INSTALLATION
message("\n")
cli::cli_alert_info("Installing gluonts dependencies...")
message("\n")
default_pkgs <- c(
"mxnet~=1.7",
"gluonts==0.8.0",
"numpy",
"pandas==1.0.5",
"pathlib==1.0.1",
"ujson==4.0.2",
"brotlipy"
)
reticulate::py_install(
packages = default_pkgs,
envname = "r-gluonts",
method = "conda",
conda = "auto",
python_version = python_version,
pip = TRUE
)
# TORCH INSTALLATION
if (include_pytorch) {
message("\n")
cli::cli_alert_info("Installing torch dependencies...")
message("\n")
torch_pkgs <- c(
"torch~=1.6",
"pytorch-lightning~=1.1"
)
reticulate::py_install(
packages = torch_pkgs,
envname = "r-gluonts",
method = "conda",
conda = "auto",
python_version = "3.7.1",
pip = TRUE
)
}
# PROCESS CHECKS
env_exists <- !is.null(detect_default_gluonts_env())
gluonts_failure <- FALSE
message("\n")
if (env_exists) {
cli::cli_alert_success("The {.field r-gluonts} conda environment has been created.")
} else {
cli::cli_alert_danger("The {.field r-gluonts} conda environment could not be created.")
gluonts_failure <- TRUE
}
# default_pkgs_exist <- check_gluonts_dependencies()
# if (default_pkgs_exist) {
# cli::cli_alert_success("Installing gluonts dependencies... ...Success.")
# } else {
# cli::cli_alert_danger("Installing gluonts dependencies... ...Failed. One or more of the following packages are not available: gluonts, mxnet, numpy, pandas, pathlib")
# gluonts_failure <- TRUE
# }
#
# pytorch_failure <- FALSE
# if (include_pytorch) {
# pytorch_exists <- check_pytorch_dependencies()
#
# if (pytorch_exists) {
# cli::cli_alert_success("Installing torch dependencies... ...Success.")
# } else {
# cli::cli_alert_danger("Installing torch dependencies... ...Failed. One or more of the following packages are not available: torch, pytorch_lightning")
# pytorch_failure <- TRUE
# }
#
#
# }
if (env_exists) {
cli::cli_alert_info("Please restart your R Session and run {.code library(modeltime.gluonts)} to activate the {.field r-gluonts} environment.")
} else {
cli::cli_alert_info("For installation failure reports, please copy the python error message(s). Search your error(s) using Google. If none are found, create new issues here: https://github.com/business-science/modeltime.gluonts/issues")
}
}
#' @export
#' @rdname install_gluonts
uninstall_gluonts <- function() {
cli::cli_alert_info("Removing conda env `r-gluonts`...")
reticulate::conda_remove("r-gluonts")
message("\n")
cli::cli_alert_success("The `r-gluonts` env has been removed.")
}
check_conda <- function() {
conda_list_nrow <- nrow(reticulate::conda_list())
if (is.null(conda_list_nrow) || conda_list_nrow == 0L) {
# No conda
message("Could not detect Conda or Miniconda Package Managers, one of which is required for 'install_gluonts()'. \nAvailable options:\n",
" - [Preferred] You can install Miniconda (light-weight) using 'reticulate::install_miniconda()'. \n",
" - Or, you can install the full Aniconda distribution (1000+ packages) using 'reticulate::conda_install()'. \n\n",
"Then use 'install_gluonts()' to set up the GluonTS python environment.")
conda_found <- FALSE
} else {
conda_found <- TRUE
}
return(conda_found)
}
|
#!/usr/bin/Rscript
# ©Santiago Sanchez-Ramirez
args <- commandArgs(trailingOnly=TRUE)
if (length(grep("help", args)) != 0){
stop("\n\nTry:\nRscript RplotEBS.R path=PATH/TO/CSV/FILES pattern=.csv trim.x=-2.0 trim.y=0.6 y.eq=TRUE
Defaults: path=. pattern=csv trim.x=NULL trim.y=NULL y.eq=FALSE\n\n")
}
if (length(grep("path=", args)) != 0){
path <- strsplit(args[grep("path=", args)], "=")[[1]][2]
} else {
path = '.'
}
if (length(grep("pattern=", args)) != 0){
pattern <- strsplit(args[grep("pattern=", args)], "=")[[1]][2]
} else {
pattern = "csv"
}
if (length(grep("trim.x=", args)) != 0){
trim.x <- as.numeric(strsplit(args[grep("trim.x=", args)], "=")[[1]][2])
} else {
trim.x = NULL
}
if (length(grep("trim.y=", args)) != 0){
trim.y <- as.numeric(strsplit(args[grep("trim.y=", args)], "=")[[1]][2])
} else {
trim.y = NULL
}
if (length(grep("y.eq=", args)) != 0){
y.eq <- as.logical(strsplit(args[grep("y.eq=", args)], "=")[[1]][2])
if (y.eq != TRUE)
y.eq == NULL
} else {
y.eq = NULL
}
if (length(grep("panel=", args)) != 0){
panel <- strsplit(args[grep("panel=", args)], "=")[[1]][2]
n <- as.numeric(strsplit(panel, ",")[[1]][1])
t <- as.numeric(strsplit(panel, ",")[[1]][2])
} else {
panel = NULL
}
file.numb <- length(dir(paste(path), pattern=pattern))
if (file.numb == 0){
stop('Pattern not found in directory')
}
### main function
RplotEBS <- function(path, pattern, file.numb, trim.x, trim.y, ...){
files = dir(paste(path), pattern=paste(pattern))
if (length(files) == 0)
stop('the path to the csv files is incorrect')
names = sub(".csv", "", files)
data = list("data.frame", file.numb)
min.l = vector()
max.u = vector()
min.t = vector()
for (i in 1:file.numb){
data[[i]] = read.csv(paste(path, "/", names[i], ".csv", sep=""))
time = -data[[i]]$time
data[[i]]$time = time
min.l[i] = min(data[[i]]$hpd.lower.95)
max.u[i] = max(data[[i]]$hpd.upper.95)
min.t[i] = min(time)
}
for (i in 1:file.numb){
if (!is.null(trim.x) && !is.null(y.eq)){
plot(data[[i]]$hpd.lower.95~data[[i]]$time, cex=1, type="n", ylim=c(min(min.l),max(max.u)), xlim=c(trim.x,0), ylab=expression(paste("Population (", theta, ")")), xlab="Time", main=names[i], ...)
} else if (is.null(trim.x) && is.null(y.eq) && is.null(trim.y)) {
plot(data[[i]]$hpd.lower.95~data[[i]]$time, cex=1, type="n", xlim=c(min(min.t),0), ylim=c(min(data[[i]]$hpd.lower.95),max(data[[i]]$hpd.upper.95)), ylab=expression(paste("Population (", theta, ")")), xlab="Time", main=names[i], ...)
} else if (!is.null(trim.x) && is.null(y.eq) && is.null(trim.y)){
plot(data[[i]]$hpd.lower.95~data[[i]]$time, cex=1, type="n", xlim=c(trim.x,0), ylim=c(min(data[[i]]$hpd.lower.95),max(data[[i]]$hpd.upper.95)), ylab=expression(paste("Population (", theta, ")")), xlab="Time", main=names[i], ...)
} else if (is.null(trim.x) && !is.null(y.eq) && is.null(trim.y)){
plot(data[[i]]$hpd.lower.95~data[[i]]$time, cex=1, type="n", ylim=c(min(min.l),max(max.u)), xlim=c(min(min.t),0), ylab=expression(paste("Population (", theta, ")")), xlab="Time", main=names[i], ...)
} else if (is.null(trim.x) && !is.null(trim.y) && is.null(y.eq)){
plot(data[[i]]$hpd.lower.95~data[[i]]$time, cex=1, type="n", ylim=c(0,trim.y), xlim=c(min(min.t),0), ylab=expression(paste("Population (", theta, ")")), xlab="Time", main=names[i], ...)
} else if (!is.null(trim.x) && is.null(trim.y) && is.null(y.eq)){
plot(data[[i]]$hpd.lower.95~data[[i]]$time, cex=1, type="n", xlim=c(trim.x,0), ylim=c(min(data[[i]]$hpd.lower.95),max(data[[i]]$hpd.upper.95)), ylab=expression(paste("Population (", theta, ")")), xlab="Time", main=names[i], ...)
} else if (!is.null(trim.x) && !is.null(trim.y) && is.null(y.eq)){
plot(data[[i]]$hpd.lower.95~data[[i]]$time, cex=1, type="n", xlim=c(trim.x,0), ylim=c(0,trim.y), ylab=expression(paste("Population (", theta, ")")), xlab="Time", main=names[i], ...)
}
x = c(data[[i]]$time, rev(data[[i]]$time))
y = c(data[[i]]$hpd.lower.95, rev(data[[i]]$hpd.upper.95))
polygon(x = x, y= y, col="grey60", border=NA)
lines(data[[i]]$mean~data[[i]]$time)
lines(data[[i]]$median~data[[i]]$time, lty=2)
axis(4, labels=F)
}
}
###
quartz()
if (is.null(panel)){
if (file.numb==2){
#pdf(file=paste(path,'/',"EBPS.pdf", sep=''), height=3, width=3*file.numb)
par(mfrow=c(1,2))
} else if (file.numb==3){
#pdf(file=paste(path,'/',"EBPS.pdf", sep=''), height=7, width=7*file.numb)
par(mfrow=c(1,3))
} else if (file.numb==4){
#pdf(file=paste(path,'/',"EBPS.pdf", sep=''), height=7*(file.numb/2), width=7*(file.numb/2))
par(mfrow=c(2,2))
} else if (file.numb>4){
stop("\n\nWith more than 4 files panel configuration needs to be specified through the argument \"panel\"\ne.g. panel=2,3\n\n")
}
} else {
par(mfrow=c(n,t))
}
RplotEBS(path,pattern,file.numb,trim.x,trim.y,las=1)
cat('Press <control><c> when ready to close', "\n")
Sys.sleep(Inf)
dev.off()
|
/RplotEBS.R
|
no_license
|
santiagosnchez/microsatellites
|
R
| false
| false
| 4,898
|
r
|
#!/usr/bin/Rscript
# ©Santiago Sanchez-Ramirez
args <- commandArgs(trailingOnly=TRUE)
if (length(grep("help", args)) != 0){
stop("\n\nTry:\nRscript RplotEBS.R path=PATH/TO/CSV/FILES pattern=.csv trim.x=-2.0 trim.y=0.6 y.eq=TRUE
Defaults: path=. pattern=csv trim.x=NULL trim.y=NULL y.eq=FALSE\n\n")
}
if (length(grep("path=", args)) != 0){
path <- strsplit(args[grep("path=", args)], "=")[[1]][2]
} else {
path = '.'
}
if (length(grep("pattern=", args)) != 0){
pattern <- strsplit(args[grep("pattern=", args)], "=")[[1]][2]
} else {
pattern = "csv"
}
if (length(grep("trim.x=", args)) != 0){
trim.x <- as.numeric(strsplit(args[grep("trim.x=", args)], "=")[[1]][2])
} else {
trim.x = NULL
}
if (length(grep("trim.y=", args)) != 0){
trim.y <- as.numeric(strsplit(args[grep("trim.y=", args)], "=")[[1]][2])
} else {
trim.y = NULL
}
if (length(grep("y.eq=", args)) != 0){
y.eq <- as.logical(strsplit(args[grep("y.eq=", args)], "=")[[1]][2])
if (y.eq != TRUE)
y.eq == NULL
} else {
y.eq = NULL
}
if (length(grep("panel=", args)) != 0){
panel <- strsplit(args[grep("panel=", args)], "=")[[1]][2]
n <- as.numeric(strsplit(panel, ",")[[1]][1])
t <- as.numeric(strsplit(panel, ",")[[1]][2])
} else {
panel = NULL
}
file.numb <- length(dir(paste(path), pattern=pattern))
if (file.numb == 0){
stop('Pattern not found in directory')
}
### main function
RplotEBS <- function(path, pattern, file.numb, trim.x, trim.y, ...){
files = dir(paste(path), pattern=paste(pattern))
if (length(files) == 0)
stop('the path to the csv files is incorrect')
names = sub(".csv", "", files)
data = list("data.frame", file.numb)
min.l = vector()
max.u = vector()
min.t = vector()
for (i in 1:file.numb){
data[[i]] = read.csv(paste(path, "/", names[i], ".csv", sep=""))
time = -data[[i]]$time
data[[i]]$time = time
min.l[i] = min(data[[i]]$hpd.lower.95)
max.u[i] = max(data[[i]]$hpd.upper.95)
min.t[i] = min(time)
}
for (i in 1:file.numb){
if (!is.null(trim.x) && !is.null(y.eq)){
plot(data[[i]]$hpd.lower.95~data[[i]]$time, cex=1, type="n", ylim=c(min(min.l),max(max.u)), xlim=c(trim.x,0), ylab=expression(paste("Population (", theta, ")")), xlab="Time", main=names[i], ...)
} else if (is.null(trim.x) && is.null(y.eq) && is.null(trim.y)) {
plot(data[[i]]$hpd.lower.95~data[[i]]$time, cex=1, type="n", xlim=c(min(min.t),0), ylim=c(min(data[[i]]$hpd.lower.95),max(data[[i]]$hpd.upper.95)), ylab=expression(paste("Population (", theta, ")")), xlab="Time", main=names[i], ...)
} else if (!is.null(trim.x) && is.null(y.eq) && is.null(trim.y)){
plot(data[[i]]$hpd.lower.95~data[[i]]$time, cex=1, type="n", xlim=c(trim.x,0), ylim=c(min(data[[i]]$hpd.lower.95),max(data[[i]]$hpd.upper.95)), ylab=expression(paste("Population (", theta, ")")), xlab="Time", main=names[i], ...)
} else if (is.null(trim.x) && !is.null(y.eq) && is.null(trim.y)){
plot(data[[i]]$hpd.lower.95~data[[i]]$time, cex=1, type="n", ylim=c(min(min.l),max(max.u)), xlim=c(min(min.t),0), ylab=expression(paste("Population (", theta, ")")), xlab="Time", main=names[i], ...)
} else if (is.null(trim.x) && !is.null(trim.y) && is.null(y.eq)){
plot(data[[i]]$hpd.lower.95~data[[i]]$time, cex=1, type="n", ylim=c(0,trim.y), xlim=c(min(min.t),0), ylab=expression(paste("Population (", theta, ")")), xlab="Time", main=names[i], ...)
} else if (!is.null(trim.x) && is.null(trim.y) && is.null(y.eq)){
plot(data[[i]]$hpd.lower.95~data[[i]]$time, cex=1, type="n", xlim=c(trim.x,0), ylim=c(min(data[[i]]$hpd.lower.95),max(data[[i]]$hpd.upper.95)), ylab=expression(paste("Population (", theta, ")")), xlab="Time", main=names[i], ...)
} else if (!is.null(trim.x) && !is.null(trim.y) && is.null(y.eq)){
plot(data[[i]]$hpd.lower.95~data[[i]]$time, cex=1, type="n", xlim=c(trim.x,0), ylim=c(0,trim.y), ylab=expression(paste("Population (", theta, ")")), xlab="Time", main=names[i], ...)
}
x = c(data[[i]]$time, rev(data[[i]]$time))
y = c(data[[i]]$hpd.lower.95, rev(data[[i]]$hpd.upper.95))
polygon(x = x, y= y, col="grey60", border=NA)
lines(data[[i]]$mean~data[[i]]$time)
lines(data[[i]]$median~data[[i]]$time, lty=2)
axis(4, labels=F)
}
}
###
quartz()
if (is.null(panel)){
if (file.numb==2){
#pdf(file=paste(path,'/',"EBPS.pdf", sep=''), height=3, width=3*file.numb)
par(mfrow=c(1,2))
} else if (file.numb==3){
#pdf(file=paste(path,'/',"EBPS.pdf", sep=''), height=7, width=7*file.numb)
par(mfrow=c(1,3))
} else if (file.numb==4){
#pdf(file=paste(path,'/',"EBPS.pdf", sep=''), height=7*(file.numb/2), width=7*(file.numb/2))
par(mfrow=c(2,2))
} else if (file.numb>4){
stop("\n\nWith more than 4 files panel configuration needs to be specified through the argument \"panel\"\ne.g. panel=2,3\n\n")
}
} else {
par(mfrow=c(n,t))
}
RplotEBS(path,pattern,file.numb,trim.x,trim.y,las=1)
cat('Press <control><c> when ready to close', "\n")
Sys.sleep(Inf)
dev.off()
|
################################################################################
# Copyright 2017-2018 Gabriele Valentini, Douglas G. Moore. All rights reserved.
# Use of this source code is governed by a MIT license that can be found in the
# LICENSE file.
################################################################################
library(rinform)
context("Entropy Rate")
test_that("entropy_rate checks parameters", {
xs <- sample(0:1, 10, T)
expect_error(entropy_rate("series", k = 1, local = !T))
expect_error(entropy_rate(NULL, k = 1, local = !T))
expect_error(entropy_rate(NA, k = 1, local = !T))
expect_error(entropy_rate(xs, k = "k", local = !T))
expect_error(entropy_rate(xs, k = NULL, local = !T))
expect_error(entropy_rate(xs, k = NA, local = !T))
expect_error(entropy_rate(xs, k = 0, local = !T))
expect_error(entropy_rate(xs, k = -1, local = !T))
expect_error(entropy_rate(xs, k = 1, local = "TRUE"))
expect_error(entropy_rate(xs, k = 1, local = NULL))
expect_error(entropy_rate(xs, k = 1, local = NA))
})
test_that("entropy_rate on single series", {
expect_equal(entropy_rate(c(1, 1, 0, 0, 1, 0, 0, 1),
k = 2, local = !T), 0.000000, tolerance = 1e-6)
expect_equal(entropy_rate(c(1, 0, 0, 0, 0, 0, 0, 0, 0),
k = 2, local = !T), 0.000000, tolerance = 1e-6)
expect_equal(entropy_rate(c(0, 0, 1, 1, 1, 1, 0, 0, 0),
k = 2, local = !T), 0.679270, tolerance = 1e-6)
expect_equal(entropy_rate(c(1, 0, 0, 0, 0, 0, 0, 1, 1),
k = 2, local = !T), 0.515663, tolerance = 1e-6)
expect_equal(entropy_rate(c(3, 3, 3, 2, 1, 0, 0, 0, 1),
k = 2, local = !T), 0.571428, tolerance = 1e-6)
expect_equal(entropy_rate(c(2, 2, 3, 3, 3, 3, 2, 1, 0),
k = 2, local = !T), 0.393556, tolerance = 1e-6)
expect_equal(entropy_rate(c(2, 2, 2, 2, 2, 2, 1, 1, 1),
k = 2, local = !T), 0.515662, tolerance = 1e-6)
})
test_that("entropy_rate on ensamble of series", {
series <- matrix(0, nrow = 8, ncol = 2)
series[, 1] <- c(1, 1, 0, 0, 1, 0, 0, 1)
series[, 2] <- c(0, 0, 0, 1, 0, 0, 0, 1)
expect_equal(entropy_rate(series, k = 2, local = !T), 0.459148, tolerance = 1e-6)
series <- matrix(0, nrow = 9, ncol = 9)
series[, 1] <- c(1, 0, 0, 0, 0, 0, 0, 0, 0)
series[, 2] <- c(0, 0, 1, 1, 1, 1, 0, 0, 0)
series[, 3] <- c(1, 0, 0, 0, 0, 0, 0, 1, 1)
series[, 4] <- c(1, 0, 0, 0, 0, 0, 0, 1, 1)
series[, 5] <- c(0, 0, 0, 0, 0, 1, 1, 0, 0)
series[, 6] <- c(0, 0, 0, 0, 1, 1, 0, 0, 0)
series[, 7] <- c(1, 1, 1, 0, 0, 0, 0, 1, 1)
series[, 8] <- c(0, 0, 0, 1, 1, 1, 1, 0, 0)
series[, 9] <- c(0, 0, 0, 0, 0, 0, 1, 1, 0)
expect_equal(entropy_rate(series, k = 2, local = !T),
0.610249, tolerance = 1e-6)
series <- matrix(0, nrow = 9, ncol = 4)
series[, 1] <- c(3, 3, 3, 2, 1, 0, 0, 0, 1)
series[, 2] <- c(2, 2, 3, 3, 3, 3, 2, 1, 0)
series[, 3] <- c(0, 0, 0, 0, 1, 1, 0, 0, 0)
series[, 4] <- c(1, 1, 0, 0, 0, 1, 1, 2, 2)
expect_equal(entropy_rate(series, k = 2, local = !T), 0.544468, tolerance = 1e-6)
})
test_that("entropy_rate local on single series", {
expect_equal(mean(entropy_rate(c(1, 0, 0, 0, 0, 0, 0, 0, 0),
k = 2, local = T)), 0.000000, tolerance = 1e-6)
expect_equal(mean(entropy_rate(c(0, 0, 1, 1, 1, 1, 0, 0, 0),
k = 2, local = T)), 0.679270, tolerance = 1e-6)
expect_equal(mean(entropy_rate(c(1, 0, 0, 0, 0, 0, 0, 1, 1),
k = 2, local = T)), 0.515663, tolerance = 1e-6)
expect_equal(mean(entropy_rate(c(3, 3, 3, 2, 1, 0, 0, 0, 1),
k = 2, local = T)), 0.571428, tolerance = 1e-6)
expect_equal(mean(entropy_rate(c(2, 2, 3, 3, 3, 3, 2, 1, 0),
k = 2, local = T)), 0.393556, tolerance = 1e-6)
expect_equal(mean(entropy_rate(c(2, 2, 2, 2, 2, 2, 1, 1, 1),
k = 2, local = T)), 0.515662, tolerance = 1e-6)
})
test_that("entropy_rate local on ensamble of series", {
series <- matrix(0, nrow = 8, ncol = 2)
series[, 1] <- c(1, 1, 0, 0, 1, 0, 0, 1)
series[, 2] <- c(0, 0, 0, 1, 0, 0, 0, 1)
expect_equal(mean(entropy_rate(series, k = 2, local = T)),
0.459148, tolerance = 1e-6)
series <- matrix(0, nrow = 9, ncol = 9)
series[, 1] <- c(1, 0, 0, 0, 0, 0, 0, 0, 0)
series[, 2] <- c(0, 0, 1, 1, 1, 1, 0, 0, 0)
series[, 3] <- c(1, 0, 0, 0, 0, 0, 0, 1, 1)
series[, 4] <- c(1, 0, 0, 0, 0, 0, 0, 1, 1)
series[, 5] <- c(0, 0, 0, 0, 0, 1, 1, 0, 0)
series[, 6] <- c(0, 0, 0, 0, 1, 1, 0, 0, 0)
series[, 7] <- c(1, 1, 1, 0, 0, 0, 0, 1, 1)
series[, 8] <- c(0, 0, 0, 1, 1, 1, 1, 0, 0)
series[, 9] <- c(0, 0, 0, 0, 0, 0, 1, 1, 0)
expect_equal(mean(entropy_rate(series, k = 2, local = T)),
0.610249, tolerance = 1e-6)
series <- matrix(0, nrow = 9, ncol = 4)
series[, 1] <- c(3, 3, 3, 2, 1, 0, 0, 0, 1)
series[, 2] <- c(2, 2, 3, 3, 3, 3, 2, 1, 0)
series[, 3] <- c(0, 0, 0, 0, 1, 1, 0, 0, 0)
series[, 4] <- c(1, 1, 0, 0, 0, 1, 1, 2, 2)
expect_equal(mean(entropy_rate(series, k = 2, local = T)),
0.544468, tolerance = 1e-6)
})
|
/tests/testthat/test_entropy_rate.R
|
permissive
|
ELIFE-ASU/rinform
|
R
| false
| false
| 5,298
|
r
|
################################################################################
# Copyright 2017-2018 Gabriele Valentini, Douglas G. Moore. All rights reserved.
# Use of this source code is governed by a MIT license that can be found in the
# LICENSE file.
################################################################################
library(rinform)
context("Entropy Rate")
test_that("entropy_rate checks parameters", {
xs <- sample(0:1, 10, T)
expect_error(entropy_rate("series", k = 1, local = !T))
expect_error(entropy_rate(NULL, k = 1, local = !T))
expect_error(entropy_rate(NA, k = 1, local = !T))
expect_error(entropy_rate(xs, k = "k", local = !T))
expect_error(entropy_rate(xs, k = NULL, local = !T))
expect_error(entropy_rate(xs, k = NA, local = !T))
expect_error(entropy_rate(xs, k = 0, local = !T))
expect_error(entropy_rate(xs, k = -1, local = !T))
expect_error(entropy_rate(xs, k = 1, local = "TRUE"))
expect_error(entropy_rate(xs, k = 1, local = NULL))
expect_error(entropy_rate(xs, k = 1, local = NA))
})
test_that("entropy_rate on single series", {
expect_equal(entropy_rate(c(1, 1, 0, 0, 1, 0, 0, 1),
k = 2, local = !T), 0.000000, tolerance = 1e-6)
expect_equal(entropy_rate(c(1, 0, 0, 0, 0, 0, 0, 0, 0),
k = 2, local = !T), 0.000000, tolerance = 1e-6)
expect_equal(entropy_rate(c(0, 0, 1, 1, 1, 1, 0, 0, 0),
k = 2, local = !T), 0.679270, tolerance = 1e-6)
expect_equal(entropy_rate(c(1, 0, 0, 0, 0, 0, 0, 1, 1),
k = 2, local = !T), 0.515663, tolerance = 1e-6)
expect_equal(entropy_rate(c(3, 3, 3, 2, 1, 0, 0, 0, 1),
k = 2, local = !T), 0.571428, tolerance = 1e-6)
expect_equal(entropy_rate(c(2, 2, 3, 3, 3, 3, 2, 1, 0),
k = 2, local = !T), 0.393556, tolerance = 1e-6)
expect_equal(entropy_rate(c(2, 2, 2, 2, 2, 2, 1, 1, 1),
k = 2, local = !T), 0.515662, tolerance = 1e-6)
})
test_that("entropy_rate on ensamble of series", {
series <- matrix(0, nrow = 8, ncol = 2)
series[, 1] <- c(1, 1, 0, 0, 1, 0, 0, 1)
series[, 2] <- c(0, 0, 0, 1, 0, 0, 0, 1)
expect_equal(entropy_rate(series, k = 2, local = !T), 0.459148, tolerance = 1e-6)
series <- matrix(0, nrow = 9, ncol = 9)
series[, 1] <- c(1, 0, 0, 0, 0, 0, 0, 0, 0)
series[, 2] <- c(0, 0, 1, 1, 1, 1, 0, 0, 0)
series[, 3] <- c(1, 0, 0, 0, 0, 0, 0, 1, 1)
series[, 4] <- c(1, 0, 0, 0, 0, 0, 0, 1, 1)
series[, 5] <- c(0, 0, 0, 0, 0, 1, 1, 0, 0)
series[, 6] <- c(0, 0, 0, 0, 1, 1, 0, 0, 0)
series[, 7] <- c(1, 1, 1, 0, 0, 0, 0, 1, 1)
series[, 8] <- c(0, 0, 0, 1, 1, 1, 1, 0, 0)
series[, 9] <- c(0, 0, 0, 0, 0, 0, 1, 1, 0)
expect_equal(entropy_rate(series, k = 2, local = !T),
0.610249, tolerance = 1e-6)
series <- matrix(0, nrow = 9, ncol = 4)
series[, 1] <- c(3, 3, 3, 2, 1, 0, 0, 0, 1)
series[, 2] <- c(2, 2, 3, 3, 3, 3, 2, 1, 0)
series[, 3] <- c(0, 0, 0, 0, 1, 1, 0, 0, 0)
series[, 4] <- c(1, 1, 0, 0, 0, 1, 1, 2, 2)
expect_equal(entropy_rate(series, k = 2, local = !T), 0.544468, tolerance = 1e-6)
})
test_that("entropy_rate local on single series", {
expect_equal(mean(entropy_rate(c(1, 0, 0, 0, 0, 0, 0, 0, 0),
k = 2, local = T)), 0.000000, tolerance = 1e-6)
expect_equal(mean(entropy_rate(c(0, 0, 1, 1, 1, 1, 0, 0, 0),
k = 2, local = T)), 0.679270, tolerance = 1e-6)
expect_equal(mean(entropy_rate(c(1, 0, 0, 0, 0, 0, 0, 1, 1),
k = 2, local = T)), 0.515663, tolerance = 1e-6)
expect_equal(mean(entropy_rate(c(3, 3, 3, 2, 1, 0, 0, 0, 1),
k = 2, local = T)), 0.571428, tolerance = 1e-6)
expect_equal(mean(entropy_rate(c(2, 2, 3, 3, 3, 3, 2, 1, 0),
k = 2, local = T)), 0.393556, tolerance = 1e-6)
expect_equal(mean(entropy_rate(c(2, 2, 2, 2, 2, 2, 1, 1, 1),
k = 2, local = T)), 0.515662, tolerance = 1e-6)
})
test_that("entropy_rate local on ensamble of series", {
series <- matrix(0, nrow = 8, ncol = 2)
series[, 1] <- c(1, 1, 0, 0, 1, 0, 0, 1)
series[, 2] <- c(0, 0, 0, 1, 0, 0, 0, 1)
expect_equal(mean(entropy_rate(series, k = 2, local = T)),
0.459148, tolerance = 1e-6)
series <- matrix(0, nrow = 9, ncol = 9)
series[, 1] <- c(1, 0, 0, 0, 0, 0, 0, 0, 0)
series[, 2] <- c(0, 0, 1, 1, 1, 1, 0, 0, 0)
series[, 3] <- c(1, 0, 0, 0, 0, 0, 0, 1, 1)
series[, 4] <- c(1, 0, 0, 0, 0, 0, 0, 1, 1)
series[, 5] <- c(0, 0, 0, 0, 0, 1, 1, 0, 0)
series[, 6] <- c(0, 0, 0, 0, 1, 1, 0, 0, 0)
series[, 7] <- c(1, 1, 1, 0, 0, 0, 0, 1, 1)
series[, 8] <- c(0, 0, 0, 1, 1, 1, 1, 0, 0)
series[, 9] <- c(0, 0, 0, 0, 0, 0, 1, 1, 0)
expect_equal(mean(entropy_rate(series, k = 2, local = T)),
0.610249, tolerance = 1e-6)
series <- matrix(0, nrow = 9, ncol = 4)
series[, 1] <- c(3, 3, 3, 2, 1, 0, 0, 0, 1)
series[, 2] <- c(2, 2, 3, 3, 3, 3, 2, 1, 0)
series[, 3] <- c(0, 0, 0, 0, 1, 1, 0, 0, 0)
series[, 4] <- c(1, 1, 0, 0, 0, 1, 1, 2, 2)
expect_equal(mean(entropy_rate(series, k = 2, local = T)),
0.544468, tolerance = 1e-6)
})
|
library(testthat)
library(Dengue)
test_check("Dengue")
|
/dengue/pkgs/Dengue/tests/testthat.R
|
no_license
|
gwenrino/CSX415.1-project
|
R
| false
| false
| 56
|
r
|
library(testthat)
library(Dengue)
test_check("Dengue")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dimtrackdata.R
\name{dim.trackdata}
\alias{dim.trackdata}
\alias{dim}
\title{A method of the generic function dim for objects of class 'trackdata'}
\usage{
\method{dim}{trackdata}(x)
}
\arguments{
\item{x}{a track data object}
}
\description{
The function returns the dimension attributes of a track data object.
}
\details{
The function returns the dimension attributes of a track data object as the
number of segments x number of tracks. c(nrow(x$index), ncol(x$data))
}
\examples{
#isol.fdat is the formant track of the segment list isol
#write out the dimension of the track data object
dim(isol.fdat)
#because there are 13 segments
isol.fdat$ftime
#and there are 4 rows for each segment (see here for the first segment)
isol.fdat$data[1,]
}
\author{
Jonathan Harrington
}
\keyword{methods}
|
/man/dim.trackdata.Rd
|
no_license
|
IPS-LMU/emuR
|
R
| false
| true
| 901
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dimtrackdata.R
\name{dim.trackdata}
\alias{dim.trackdata}
\alias{dim}
\title{A method of the generic function dim for objects of class 'trackdata'}
\usage{
\method{dim}{trackdata}(x)
}
\arguments{
\item{x}{a track data object}
}
\description{
The function returns the dimension attributes of a track data object.
}
\details{
The function returns the dimension attributes of a track data object as the
number of segments x number of tracks. c(nrow(x$index), ncol(x$data))
}
\examples{
#isol.fdat is the formant track of the segment list isol
#write out the dimension of the track data object
dim(isol.fdat)
#because there are 13 segments
isol.fdat$ftime
#and there are 4 rows for each segment (see here for the first segment)
isol.fdat$data[1,]
}
\author{
Jonathan Harrington
}
\keyword{methods}
|
#' Add tables to a [`dm`]
#'
#' @description
#' `cdm_add_tbl()` adds one or more tables to a [`dm`].
#' It uses [mutate()] semantics.
#'
#' @return The initial `dm` with the additional table(s).
#'
#' @seealso [cdm_rm_tbl()]
#'
#' @param dm A [`dm`] object.
#' @param ... One or more tables to add to the `dm`.
#' If no explicit name is given, the name of the expression is used.
#' @inheritParams vctrs::vec_as_names
#'
#' @export
cdm_add_tbl <- function(dm, ..., repair = "unique", quiet = FALSE) {
check_dm(dm)
new_names <- names(exprs(..., .named = TRUE))
new_tables <- list(...)
check_new_tbls(dm, new_tables)
old_names <- src_tbls(dm)
names_list <- repair_table_names(old_names, new_names, repair, quiet)
# rename old tables in case name repair changed their names
dm <- cdm_select_tbl_impl(dm, names_list$new_old_names)
cdm_add_tbl_impl(dm, new_tables, names_list$new_names)
}
repair_table_names <- function(old_names, new_names, repair = "check_unique", quiet = FALSE) {
all_names <- tryCatch(
vctrs::vec_as_names(c(old_names, new_names), repair = repair, quiet = quiet),
error = function(e) {
if (inherits(e, "vctrs_error_names_must_be_unique")) abort_need_unique_names(intersect(old_names, new_names))
abort(e)
}
)
new_old_names <- set_names(old_names, all_names[seq_along(old_names)])
new_names <-
all_names[seq2(length(old_names) + 1, length(all_names))]
list(new_old_names = new_old_names, new_names = new_names)
}
cdm_add_tbl_impl <- function(dm, tbls, table_name, filters = vctrs::list_of(new_filter())) {
def <- cdm_get_def(dm)
def_0 <- def[rep_along(table_name, NA_integer_), ]
def_0$table <- table_name
def_0$data <- tbls
def_0$pks <- vctrs::list_of(new_pk())
def_0$fks <- vctrs::list_of(new_fk())
def_0$filters <- filters
new_dm3(vctrs::vec_rbind(def, def_0))
}
#' Remove tables from a [`dm`]
#'
#' @description
#' Removes one or more tables from a [`dm`].
#'
#' @return The dm without the removed table(s) that were present in the initial `dm`.
#'
#' @seealso [cdm_add_tbl()], [cdm_select_tbl()]
#'
#' @param dm A [`dm`] object.
#' @param ... One or more unquoted table names to remove from the `dm`.
#'
#' @export
cdm_rm_tbl <- function(dm, ...) {
check_dm(dm)
table_names <-
ensyms(..., .named = FALSE) %>%
map_chr(~ as_name(.))
check_correct_input(dm, table_names)
cdm_select_tbl(dm, -one_of(!!!table_names))
}
check_new_tbls <- function(dm, tbls) {
orig_tbls <- cdm_get_tables(dm)
# are all new tables on the same source as the original ones?
if (has_length(orig_tbls) && !all_same_source(c(orig_tbls[1], tbls))) {
abort_not_same_src()
}
}
|
/R/add-tbl.R
|
permissive
|
bbecane/dm
|
R
| false
| false
| 2,684
|
r
|
#' Add tables to a [`dm`]
#'
#' @description
#' `cdm_add_tbl()` adds one or more tables to a [`dm`].
#' It uses [mutate()] semantics.
#'
#' @return The initial `dm` with the additional table(s).
#'
#' @seealso [cdm_rm_tbl()]
#'
#' @param dm A [`dm`] object.
#' @param ... One or more tables to add to the `dm`.
#' If no explicit name is given, the name of the expression is used.
#' @inheritParams vctrs::vec_as_names
#'
#' @export
cdm_add_tbl <- function(dm, ..., repair = "unique", quiet = FALSE) {
check_dm(dm)
new_names <- names(exprs(..., .named = TRUE))
new_tables <- list(...)
check_new_tbls(dm, new_tables)
old_names <- src_tbls(dm)
names_list <- repair_table_names(old_names, new_names, repair, quiet)
# rename old tables in case name repair changed their names
dm <- cdm_select_tbl_impl(dm, names_list$new_old_names)
cdm_add_tbl_impl(dm, new_tables, names_list$new_names)
}
repair_table_names <- function(old_names, new_names, repair = "check_unique", quiet = FALSE) {
all_names <- tryCatch(
vctrs::vec_as_names(c(old_names, new_names), repair = repair, quiet = quiet),
error = function(e) {
if (inherits(e, "vctrs_error_names_must_be_unique")) abort_need_unique_names(intersect(old_names, new_names))
abort(e)
}
)
new_old_names <- set_names(old_names, all_names[seq_along(old_names)])
new_names <-
all_names[seq2(length(old_names) + 1, length(all_names))]
list(new_old_names = new_old_names, new_names = new_names)
}
cdm_add_tbl_impl <- function(dm, tbls, table_name, filters = vctrs::list_of(new_filter())) {
def <- cdm_get_def(dm)
def_0 <- def[rep_along(table_name, NA_integer_), ]
def_0$table <- table_name
def_0$data <- tbls
def_0$pks <- vctrs::list_of(new_pk())
def_0$fks <- vctrs::list_of(new_fk())
def_0$filters <- filters
new_dm3(vctrs::vec_rbind(def, def_0))
}
#' Remove tables from a [`dm`]
#'
#' @description
#' Removes one or more tables from a [`dm`].
#'
#' @return The dm without the removed table(s) that were present in the initial `dm`.
#'
#' @seealso [cdm_add_tbl()], [cdm_select_tbl()]
#'
#' @param dm A [`dm`] object.
#' @param ... One or more unquoted table names to remove from the `dm`.
#'
#' @export
cdm_rm_tbl <- function(dm, ...) {
check_dm(dm)
table_names <-
ensyms(..., .named = FALSE) %>%
map_chr(~ as_name(.))
check_correct_input(dm, table_names)
cdm_select_tbl(dm, -one_of(!!!table_names))
}
check_new_tbls <- function(dm, tbls) {
orig_tbls <- cdm_get_tables(dm)
# are all new tables on the same source as the original ones?
if (has_length(orig_tbls) && !all_same_source(c(orig_tbls[1], tbls))) {
abort_not_same_src()
}
}
|
##' Subset datasets and extract variables
##'
##' @param x a CrunchDataset
##' @param i As with a \code{data.frame}, there are two cases: (1) if no other
##' arguments are supplied (i.e \code{x[i]}), \code{i} provides for
##' \code{as.list} extraction: columns of the dataset rather than rows. If
##' character, identifies variables to extract based on their aliases (by
##' default: set \code{options(crunch.namekey.dataset="name")} to use variable
##' names); if numeric or logical,
##' extracts variables accordingly. Alternatively, (2) if \code{j} is specified
##' (as either \code{x[i, j]} or \code{x[i,]}), \code{i} is an object of class
##' \code{CrunchLogicalExpr} that will define a subset of rows.
##' @param j columnar extraction, as described above
##' @param name columnar extraction for \code{$}
##' @param drop logical: autmatically simplify a 1-column Dataset to a Variable?
##' Default is FALSE, and the TRUE option is in fact not implemented.
##' @param ... additional arguments
##' @return \code{[} yields a Dataset; \code{[[} and \code{$} return a Variable
##' @name dataset-extract
##' @aliases dataset-extract
NULL
##' @rdname dataset-extract
##' @export
setMethod("[", c("CrunchDataset", "ANY"), function (x, i, ..., drop=FALSE) {
x@variables <- variables(x)[i]
return(x)
})
##' @rdname dataset-extract
##' @export
setMethod("[", c("CrunchDataset", "character"), function (x, i, ..., drop=FALSE) {
allnames <- getIndexSlot(allVariables(x), namekey(x)) ## Include hidden
w <- match(i, allnames)
if (any(is.na(w))) {
halt("Undefined columns selected: ", serialPaste(i[is.na(w)]))
}
x@variables <- allVariables(x)[w]
return(x)
})
##' @rdname dataset-extract
##' @export
setMethod("[", c("CrunchDataset", "missing", "ANY"), function (x, i, j, ..., drop=FALSE) {
x[j]
})
##' @rdname dataset-extract
##' @export
setMethod("[", c("CrunchDataset", "CrunchLogicalExpr", "missing"), function (x, i, j, ..., drop=FALSE) {
f <- activeFilter(x)
if (length(zcl(f))) {
i <- f & i
}
activeFilter(x) <- i
return(x)
})
##' @rdname dataset-extract
##' @export
setMethod("[", c("CrunchDataset", "CrunchLogicalExpr", "ANY"), function (x, i, j, ..., drop=FALSE) {
## Do the filtering of rows, then cols
x <- x[i,]
return(x[j])
})
##' @rdname dataset-extract
##' @export
setMethod("subset", "CrunchDataset", function (x, ...) {
x[..1,]
})
##' @rdname dataset-extract
##' @export
setMethod("[[", c("CrunchDataset", "ANY"), function (x, i, ..., drop=FALSE) {
out <- variables(x)[[i]]
if (!is.null(out)) {
out <- CrunchVariable(out, filter=activeFilter(x))
}
return(out)
})
##' @rdname dataset-extract
##' @export
setMethod("[[", c("CrunchDataset", "character"), function (x, i, ..., drop=FALSE) {
stopifnot(length(i) == 1)
n <- match(i, names(x))
if (is.na(n)) {
## See if the variable in question is hidden
hvars <- hidden(x)
hnames <- getIndexSlot(hvars, namekey(x))
n <- match(i, hnames)
if (is.na(n)) {
return(NULL)
} else {
## If so, return it with a warning
out <- hvars[[n]]
if (!is.null(out)) {
out <- CrunchVariable(out, filter=activeFilter(x))
}
warning("Variable ", i, " is hidden", call.=FALSE)
return(out)
}
} else {
return(callNextMethod(x, n, ..., drop=drop))
}
})
##' @rdname dataset-extract
##' @export
setMethod("$", "CrunchDataset", function (x, name) x[[name]])
## Things that set
.addVariableSetter <- function (x, i, value) {
if (i %in% names(x)) {
## We're not adding, we're updating.
return(.updateValues(x, i, value))
} else {
if (inherits(value, "VariableDefinition")) {
## Just update its alias with the one we're setting
value$alias <- i
## But also check to make sure it has a name, and use `i` if not
value$name <- value$name %||% i
} else {
## Create a VarDef and use `i` as name and alias
value <- VariableDefinition(value, name=i, alias=i)
}
addVariables(x, value)
}
}
.updateValues <- function (x, i, value, filter=NULL) {
if (length(i) != 1) {
halt("Can only update one variable at a time (for the moment)")
}
variable <- x[[i]]
if (is.null(filter)) {
variable[] <- value
} else {
variable[filter] <- value
}
return(x)
}
.updateVariableMetadata <- function (x, i, value) {
## Confirm that x[[i]] has the same URL as value
v <- Filter(function (a) a[[namekey(x)]] == i,
index(allVariables(x)))
if (length(v) == 0) {
## We may have a new variable, and it's not
## yet in our variable catalog. Let's check.
x <- refresh(x)
if (!(self(value) %in% urls(allVariables(x)))) {
halt("This variable does not belong to this dataset")
}
## Update value with `i` if it is
## different. I.e. set the alias based on i if not otherwise
## specified. (setTupleSlot does the checking)
tuple(value) <- setTupleSlot(tuple(value), namekey(x), i)
} else if (!identical(names(v), self(value))) {
## x[[i]] exists but is a different variable than value
halt("Cannot overwrite one Variable with another")
}
allVariables(x)[[self(value)]] <- value
return(x)
}
##' Update a variable or variables in a dataset
##'
##' @param x a CrunchDataset
##' @param i For \code{[}, a \code{CrunchLogicalExpr}, numeric, or logical
##' vector defining a subset of the rows of \code{x}. For \code{[[}, see
##' \code{j} for the as.list column subsetting.
##' @param j if character, identifies variables to extract based on their
##' aliases (by default: set \code{options(crunch.namekey.dataset="name")}
##' to use variable names); if numeric or
##' logical, extracts variables accordingly. Note that this is the as.list
##' extraction, columns of the dataset rather than rows.
##' @param name like \code{j} but for \code{$}
##' @param value replacement values to insert. These can be \code{crunchExpr}s
##' or R vectors of the corresponding type
##' @return \code{x}, modified.
##' @aliases dataset-update
##' @name dataset-update
NULL
##' @rdname dataset-update
##' @export
setMethod("[[<-",
c("CrunchDataset", "character", "missing", "CrunchVariable"),
.updateVariableMetadata)
##' @rdname dataset-update
##' @export
setMethod("[[<-",
c("CrunchDataset", "ANY", "missing", "CrunchVariable"),
function (x, i, value) .updateVariableMetadata(x, names(x)[i], value))
##' @rdname dataset-update
##' @export
setMethod("[[<-",
c("CrunchDataset", "character", "missing", "ANY"),
.addVariableSetter)
##' @rdname dataset-update
##' @export
setMethod("[[<-",
c("CrunchDataset", "character", "missing", "CrunchLogicalExpr"),
function (x, i, value) {
halt("Cannot currently derive a logical variable")
})
##' @rdname dataset-update
##' @export
setMethod("[[<-",
c("CrunchDataset", "ANY"),
function (x, i, value) {
halt("Only character (name) indexing supported for [[<-")
})
##' @rdname dataset-update
##' @export
setMethod("[[<-",
c("CrunchDataset", "character", "missing", "NULL"),
function (x, i, value) {
allnames <- getIndexSlot(allVariables(x), namekey(x)) ## Include hidden
if (!(i %in% allnames)) {
message(dQuote(i), " is not a variable; nothing to delete by assigning NULL")
return(x)
}
return(deleteVariables(x, i))
})
##' @rdname dataset-update
##' @export
setMethod("[[<-",
c("CrunchDataset", "ANY", "missing", "NULL"),
function (x, i, value) deleteVariables(x, names(x)[i]))
##' @rdname dataset-update
##' @export
setMethod("$<-", c("CrunchDataset"), function (x, name, value) {
x[[name]] <- value
return(x)
})
##' @rdname dataset-update
##' @export
setMethod("[<-", c("CrunchDataset", "ANY", "missing", "list"),
function (x, i, j, value) {
## For lapplying over variables to edit metadata
stopifnot(length(i) == length(value),
all(vapply(value, is.variable, logical(1))))
for (z in seq_along(i)) {
x[[i[z]]] <- value[[z]]
}
return(x)
})
## TODO: add similar [<-.CrunchDataset, CrunchDataset/VariableCatalog
##' @rdname dataset-update
##' @export
setMethod("[<-", c("CrunchDataset", "CrunchExpr", "ANY", "ANY"),
function (x, i, j, value) {
if (j %in% names(x)) {
return(.updateValues(x, j, value, filter=i))
} else {
halt("Cannot add variable to dataset with a row index specified")
}
})
|
/R/dataset-extract.R
|
no_license
|
digideskio/rcrunch
|
R
| false
| false
| 8,788
|
r
|
##' Subset datasets and extract variables
##'
##' @param x a CrunchDataset
##' @param i As with a \code{data.frame}, there are two cases: (1) if no other
##' arguments are supplied (i.e \code{x[i]}), \code{i} provides for
##' \code{as.list} extraction: columns of the dataset rather than rows. If
##' character, identifies variables to extract based on their aliases (by
##' default: set \code{options(crunch.namekey.dataset="name")} to use variable
##' names); if numeric or logical,
##' extracts variables accordingly. Alternatively, (2) if \code{j} is specified
##' (as either \code{x[i, j]} or \code{x[i,]}), \code{i} is an object of class
##' \code{CrunchLogicalExpr} that will define a subset of rows.
##' @param j columnar extraction, as described above
##' @param name columnar extraction for \code{$}
##' @param drop logical: autmatically simplify a 1-column Dataset to a Variable?
##' Default is FALSE, and the TRUE option is in fact not implemented.
##' @param ... additional arguments
##' @return \code{[} yields a Dataset; \code{[[} and \code{$} return a Variable
##' @name dataset-extract
##' @aliases dataset-extract
NULL
##' @rdname dataset-extract
##' @export
setMethod("[", c("CrunchDataset", "ANY"), function (x, i, ..., drop=FALSE) {
x@variables <- variables(x)[i]
return(x)
})
##' @rdname dataset-extract
##' @export
setMethod("[", c("CrunchDataset", "character"), function (x, i, ..., drop=FALSE) {
allnames <- getIndexSlot(allVariables(x), namekey(x)) ## Include hidden
w <- match(i, allnames)
if (any(is.na(w))) {
halt("Undefined columns selected: ", serialPaste(i[is.na(w)]))
}
x@variables <- allVariables(x)[w]
return(x)
})
##' @rdname dataset-extract
##' @export
setMethod("[", c("CrunchDataset", "missing", "ANY"), function (x, i, j, ..., drop=FALSE) {
x[j]
})
##' @rdname dataset-extract
##' @export
setMethod("[", c("CrunchDataset", "CrunchLogicalExpr", "missing"), function (x, i, j, ..., drop=FALSE) {
f <- activeFilter(x)
if (length(zcl(f))) {
i <- f & i
}
activeFilter(x) <- i
return(x)
})
##' @rdname dataset-extract
##' @export
setMethod("[", c("CrunchDataset", "CrunchLogicalExpr", "ANY"), function (x, i, j, ..., drop=FALSE) {
## Do the filtering of rows, then cols
x <- x[i,]
return(x[j])
})
##' @rdname dataset-extract
##' @export
setMethod("subset", "CrunchDataset", function (x, ...) {
x[..1,]
})
##' @rdname dataset-extract
##' @export
setMethod("[[", c("CrunchDataset", "ANY"), function (x, i, ..., drop=FALSE) {
out <- variables(x)[[i]]
if (!is.null(out)) {
out <- CrunchVariable(out, filter=activeFilter(x))
}
return(out)
})
##' @rdname dataset-extract
##' @export
setMethod("[[", c("CrunchDataset", "character"), function (x, i, ..., drop=FALSE) {
stopifnot(length(i) == 1)
n <- match(i, names(x))
if (is.na(n)) {
## See if the variable in question is hidden
hvars <- hidden(x)
hnames <- getIndexSlot(hvars, namekey(x))
n <- match(i, hnames)
if (is.na(n)) {
return(NULL)
} else {
## If so, return it with a warning
out <- hvars[[n]]
if (!is.null(out)) {
out <- CrunchVariable(out, filter=activeFilter(x))
}
warning("Variable ", i, " is hidden", call.=FALSE)
return(out)
}
} else {
return(callNextMethod(x, n, ..., drop=drop))
}
})
##' @rdname dataset-extract
##' @export
setMethod("$", "CrunchDataset", function (x, name) x[[name]])
## Things that set
.addVariableSetter <- function (x, i, value) {
if (i %in% names(x)) {
## We're not adding, we're updating.
return(.updateValues(x, i, value))
} else {
if (inherits(value, "VariableDefinition")) {
## Just update its alias with the one we're setting
value$alias <- i
## But also check to make sure it has a name, and use `i` if not
value$name <- value$name %||% i
} else {
## Create a VarDef and use `i` as name and alias
value <- VariableDefinition(value, name=i, alias=i)
}
addVariables(x, value)
}
}
.updateValues <- function (x, i, value, filter=NULL) {
if (length(i) != 1) {
halt("Can only update one variable at a time (for the moment)")
}
variable <- x[[i]]
if (is.null(filter)) {
variable[] <- value
} else {
variable[filter] <- value
}
return(x)
}
.updateVariableMetadata <- function (x, i, value) {
## Confirm that x[[i]] has the same URL as value
v <- Filter(function (a) a[[namekey(x)]] == i,
index(allVariables(x)))
if (length(v) == 0) {
## We may have a new variable, and it's not
## yet in our variable catalog. Let's check.
x <- refresh(x)
if (!(self(value) %in% urls(allVariables(x)))) {
halt("This variable does not belong to this dataset")
}
## Update value with `i` if it is
## different. I.e. set the alias based on i if not otherwise
## specified. (setTupleSlot does the checking)
tuple(value) <- setTupleSlot(tuple(value), namekey(x), i)
} else if (!identical(names(v), self(value))) {
## x[[i]] exists but is a different variable than value
halt("Cannot overwrite one Variable with another")
}
allVariables(x)[[self(value)]] <- value
return(x)
}
##' Update a variable or variables in a dataset
##'
##' @param x a CrunchDataset
##' @param i For \code{[}, a \code{CrunchLogicalExpr}, numeric, or logical
##' vector defining a subset of the rows of \code{x}. For \code{[[}, see
##' \code{j} for the as.list column subsetting.
##' @param j if character, identifies variables to extract based on their
##' aliases (by default: set \code{options(crunch.namekey.dataset="name")}
##' to use variable names); if numeric or
##' logical, extracts variables accordingly. Note that this is the as.list
##' extraction, columns of the dataset rather than rows.
##' @param name like \code{j} but for \code{$}
##' @param value replacement values to insert. These can be \code{crunchExpr}s
##' or R vectors of the corresponding type
##' @return \code{x}, modified.
##' @aliases dataset-update
##' @name dataset-update
NULL
##' @rdname dataset-update
##' @export
setMethod("[[<-",
c("CrunchDataset", "character", "missing", "CrunchVariable"),
.updateVariableMetadata)
##' @rdname dataset-update
##' @export
setMethod("[[<-",
c("CrunchDataset", "ANY", "missing", "CrunchVariable"),
function (x, i, value) .updateVariableMetadata(x, names(x)[i], value))
##' @rdname dataset-update
##' @export
setMethod("[[<-",
c("CrunchDataset", "character", "missing", "ANY"),
.addVariableSetter)
##' @rdname dataset-update
##' @export
setMethod("[[<-",
c("CrunchDataset", "character", "missing", "CrunchLogicalExpr"),
function (x, i, value) {
halt("Cannot currently derive a logical variable")
})
##' @rdname dataset-update
##' @export
setMethod("[[<-",
c("CrunchDataset", "ANY"),
function (x, i, value) {
halt("Only character (name) indexing supported for [[<-")
})
##' @rdname dataset-update
##' @export
setMethod("[[<-",
c("CrunchDataset", "character", "missing", "NULL"),
function (x, i, value) {
allnames <- getIndexSlot(allVariables(x), namekey(x)) ## Include hidden
if (!(i %in% allnames)) {
message(dQuote(i), " is not a variable; nothing to delete by assigning NULL")
return(x)
}
return(deleteVariables(x, i))
})
##' @rdname dataset-update
##' @export
setMethod("[[<-",
c("CrunchDataset", "ANY", "missing", "NULL"),
function (x, i, value) deleteVariables(x, names(x)[i]))
##' @rdname dataset-update
##' @export
setMethod("$<-", c("CrunchDataset"), function (x, name, value) {
x[[name]] <- value
return(x)
})
##' @rdname dataset-update
##' @export
setMethod("[<-", c("CrunchDataset", "ANY", "missing", "list"),
function (x, i, j, value) {
## For lapplying over variables to edit metadata
stopifnot(length(i) == length(value),
all(vapply(value, is.variable, logical(1))))
for (z in seq_along(i)) {
x[[i[z]]] <- value[[z]]
}
return(x)
})
## TODO: add similar [<-.CrunchDataset, CrunchDataset/VariableCatalog
##' @rdname dataset-update
##' @export
setMethod("[<-", c("CrunchDataset", "CrunchExpr", "ANY", "ANY"),
function (x, i, j, value) {
if (j %in% names(x)) {
return(.updateValues(x, j, value, filter=i))
} else {
halt("Cannot add variable to dataset with a row index specified")
}
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interface.R
\name{mean_prob_tox}
\alias{mean_prob_tox}
\title{Mean toxicity rate at each dose.}
\usage{
mean_prob_tox(x, ...)
}
\arguments{
\item{x}{Object of class \code{\link{selector}}}
\item{...}{arguments passed to other methods}
}
\value{
a numerical vector
}
\description{
Get the estimated mean toxicity rate at each dose under investigation. This
is a set of modelled statistics. The underlying models estimate toxicity
probabilities in different ways. If no model-based estimate of the mean is
available, this function will return a vector of NAs.
}
\examples{
# CRM example
skeleton <- c(0.05, 0.1, 0.25, 0.4, 0.6)
target <- 0.25
outcomes <- '1NNN 2NTN'
fit <- get_dfcrm(skeleton = skeleton, target = target) \%>\% fit(outcomes)
fit \%>\% mean_prob_tox()
}
|
/man/mean_prob_tox.Rd
|
no_license
|
brockk/escalation
|
R
| false
| true
| 847
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interface.R
\name{mean_prob_tox}
\alias{mean_prob_tox}
\title{Mean toxicity rate at each dose.}
\usage{
mean_prob_tox(x, ...)
}
\arguments{
\item{x}{Object of class \code{\link{selector}}}
\item{...}{arguments passed to other methods}
}
\value{
a numerical vector
}
\description{
Get the estimated mean toxicity rate at each dose under investigation. This
is a set of modelled statistics. The underlying models estimate toxicity
probabilities in different ways. If no model-based estimate of the mean is
available, this function will return a vector of NAs.
}
\examples{
# CRM example
skeleton <- c(0.05, 0.1, 0.25, 0.4, 0.6)
target <- 0.25
outcomes <- '1NNN 2NTN'
fit <- get_dfcrm(skeleton = skeleton, target = target) \%>\% fit(outcomes)
fit \%>\% mean_prob_tox()
}
|
## The two functions "makeCacheMatrix" and "cacheSolve" work in conjunction.
## The first function "makeCacheMatrix" is used to convert the given matrix into a special object and
## returns a list.
## The second function "cacheSolve", using the special object returned by the "makeCacheMatrix" function, computes
## the inverse of the matrix and caches it in the list created by the first function for later retrievel.
## This function takes the matrix to be inversed and create a list and stores the matrix along with the functions to set and get the result.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function takes the object (list) returned by the "makeCaheMatrix" function and checks if the inverse is already computed.
## If not, gets the matrix, computes and returns the inverse at the same time it caches it in the list created by "makeCacheMatrix" function for later retrivel.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
Rmkkr79/ProgrammingAssignment2
|
R
| false
| false
| 1,351
|
r
|
## The two functions "makeCacheMatrix" and "cacheSolve" work in conjunction.
## The first function "makeCacheMatrix" is used to convert the given matrix into a special object and
## returns a list.
## The second function "cacheSolve", using the special object returned by the "makeCacheMatrix" function, computes
## the inverse of the matrix and caches it in the list created by the first function for later retrievel.
## This function takes the matrix to be inversed and create a list and stores the matrix along with the functions to set and get the result.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function takes the object (list) returned by the "makeCaheMatrix" function and checks if the inverse is already computed.
## If not, gets the matrix, computes and returns the inverse at the same time it caches it in the list created by "makeCacheMatrix" function for later retrivel.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
library( data.table)
library( raster)
# library( spBayes)
library( disperseR)
library( ggplot2)
library( viridis)
library( lubridate)
library( mgcv)
# library( xgboost)
library( pbmcapply)
`%ni%` <- Negate(`%in%`)
#======================================================================#
## ddm to grid raster
#======================================================================#
ddm_to_zip <- function( ddm_coal_file,
Year,
avg.period = 'year'){
p4s <- "+proj=lcc +lat_1=33 +lat_2=45 +lat_0=40 +lon_0=-97 +a=6370000 +b=6370000"
#read data,
ddm_coal <- fread(ddm_coal_file)
# melt, extract year, month, day
ddm_coal.m <- melt( ddm_coal, id.vars = c( 'X', 'Y'),
variable.name = 'date.in', value.name = 'coal_pm25')
ddm_coal.m[, `:=` ( date.in = as.Date( date.in, format = '%m/%d/%y'))]
ddm_coal.m[, `:=` ( year.in = year( date.in),
month.in = month( date.in))]
# remove blow up values (greater than 30)
ddm_coal.m[ coal_pm25 < 0 | coal_pm25 > 30, coal_pm25 := NA]
#rasterize as brick
names.ddm <- unique( ddm_coal.m$date.in)
ddm_coal.b <- brick( lapply( names.ddm,
function( name, dt.m){
r <- rasterFromXYZ(dt.m[ date.in == name,
.(x = X, y = Y, z = coal_pm25)],
crs = CRS(p4s))
names( r) <- name
return( r)
}, ddm_coal.m))
# fill NA's with linear interpolation across days
ddm_coal.b <- approxNA( ddm_coal.b, rule=2)
names( ddm_coal.b) <- names.ddm
# take monthly averages
if( avg.period == 'month'){
ddm_coal.month <- lapply( 1:12,
function( m, ddm_raster.b){
names.dates <- as.Date( gsub( '\\.', '-', gsub( 'X', '', names( ddm_raster.b))))
id <- which( month( names.dates) == m)
ddm_coal.mon <- mean( subset( ddm_raster.b, id))
return( ddm_coal.mon)
}, ddm_coal.b)
ddm_coal.z <- brick( ddm_coal.month)
names( ddm_coal.z) <- paste( Year, formatC( 1:12, width = 2, flag = '0'), sep = '.')
# take annual averages
} else if( avg.period == 'year'){
# take annual average
ddm_coal.z <- mean( ddm_coal.b)
names( ddm_coal.z) <- Year
}
return( ddm_coal.z)
}
#======================================================================#
## functions to get meteorology data
# download the necessary met files, 20th century reanalysis
#======================================================================#
downloader.fn <- function( filename,
destination = file.path('~', 'Dropbox', 'Harvard', 'RFMeval_Local',
'Comparisons_Intermodel', 'Global_meteorology'),
dataset = c( '20thC_ReanV2c', 'ncep.reanalysis.derived', 'NARR')){
if( length( dataset) > 1)
dataset <- dataset[1]
fileloc <- file.path( destination, dataset)
# create directory to store in
dir.create( fileloc,
recursive = T,
showWarnings = F)
# name variable, filenames
varname_NOAA <- gsub( "\\..*", "", filename)
file_NOAA <- file.path( fileloc, filename)
# define URL
if( dataset == '20thC_ReanV2c'){
# https://www.esrl.noaa.gov/psd/data/gridded/data.20thC_ReanV2c.monolevel.mm.html
url_NOAA <- paste0( "ftp://ftp.cdc.noaa.gov/Datasets/20thC_ReanV2c/Monthlies/gaussian/monolevel/", filename)
} else if( dataset == 'ncep.reanalysis.derived'){
url_NOAA <- paste0( "ftp://ftp.cdc.noaa.gov/Datasets/ncep.reanalysis.derived/surface/", filename)
}else if( dataset == 'NARR'){
url_NOAA <- paste0( "ftp://ftp.cdc.noaa.gov/Datasets/NARR/Monthlies/monolevel/", filename)
}
if( !file.exists( file_NOAA))
download.file( url = url_NOAA,
destfile = file_NOAA)
hpbl_rasterin <- brick( x = file_NOAA,
varname = varname_NOAA)
return( hpbl_rasterin)
}
#======================================================================#
## functions to get meteorology data
# extract the year of interest, average by year or return months
#======================================================================#
extract_year.fn <- function( raster.in = list.met[[1]],
year.in = 2005,
avg.period = 'year',
dataset = c( '20thC_ReanV2c', 'ncep.reanalysis.derived', 'NARR')){
# default to 20th cent reanalysis
if( length( dataset) > 1){
dataset <- dataset[1]
print( paste( 'No dataset specified, defaulting to', dataset))
}
# name months 1:12 for extracting from raster
names.months <- paste0( year.in, '-',
formatC( 1:12, width = 2, flag = '0'), '-',
'01')
# extract monthly dates using function from hyspdisp
raster.sub <- brick( subset_nc_date( hpbl_brick = raster.in,
vardate = names.months))
#NARR dataset requires rotating
if( dataset != 'NARR')
raster.sub <- rotate( raster.sub)
# take annual mean
if( avg.period == 'year'){
out <- stackApply( raster.sub, indices = rep( 1, 12), fun = mean)
} else
out <- raster.sub
return( out)
}
#======================================================================#
## functions to get meteorology data
# trim data over US, create raster object
#======================================================================#
usa.functioner <- function( year.in = 2005,
list.met,
dataset = c( '20thC_ReanV2c', 'ncep.reanalysis.derived', 'NARR'),
avg.period = 'year',
return.usa.mask = F,
return.usa.sub = T){
# extract year
if( avg.period == 'year'){
mets <- lapply( list.met,
extract_year.fn,
year.in = year.in,
avg.period = avg.period,
dataset = dataset)
} else
mets <- lapply( list.met,
extract_year.fn,
year.in = year.in,
avg.period = avg.period,
dataset = dataset)
crs.str <- projection( list.met[[1]])
crs.usa <- crs( crs.str)
# convert temp to celcius
mets$temp <- mets$temp - 273.15
# calculate windspeed
# calculate meteorology wind angle (0 is north wind)
# http://weatherclasses.com/uploads/3/6/2/3/36231461/computing_wind_direction_and_speed_from_u_and_v.pdf
if( 'uwnd' %in% names( list.met) & 'vwnd' %in% names( list.met)){
mets$wspd <- sqrt( mets$uwnd ^ 2 + mets$vwnd ^ 2)
mets$phi <- atan2( mets$uwnd, mets$vwnd) * 180 / pi + 180
names( mets$wspd) <- names(mets$vwnd)
names( mets$phi) <- names(mets$vwnd)
}
# download USA polygon from rnaturalearth
us_states.names <- state.abb[!(state.abb %in% c( 'HI', 'AK'))]
us_states <- st_transform( USAboundaries::us_states(), crs.str)
us_states.sp <- sf::as_Spatial(us_states)[ us_states$state_abbr %in% us_states.names,]
if( return.usa.mask){
return( us_states.sp)
}
if( return.usa.sub){
mets_crop <- rasterize( us_states.sp, mets[[1]], getCover=TRUE)
mets_crop[mets_crop==0] <- NA
mets_crop[!is.na( mets_crop)] <- 1
# crop to USA
if( avg.period == 'year'){
mets.out.l <- lapply( mets, function( x){
trim( mask(x, mets_crop, maskvalue = NA),
padding = 1)
})
mets.out <- brick( mets.out.l)
} else{
names.months <- names( mets[[1]])
mets.out.l <- lapply( mets,
function( x){
lapply( names.months,
function( y){
r <- subset( x, y)
trim( mask(r, mets_crop, maskvalue = NA),
padding = 1)
})})
mets.out.b <- lapply( names.months,
function( n){
lapply( mets.out.l, function( l){
b <- brick( l)
subset( b, n)
})
})
# each month is a brick
mets.out <- lapply( mets.out.b, brick)
names( mets.out) <- names.months
}
return( mets.out)
} else{
if( avg.period == 'year'){
mets.out <- brick( mets)
} else{
# reorganize - list of months
names.months <- names( mets[[1]])
mets.out <- lapply( names.months,
function( name.ext, X){
brick( lapply( X, '[[', name.ext))
}, mets)
names( mets.out) <- names.months
}
return( mets.out)
}
}
#======================================================================#
# define the spBayes model function
#======================================================================#
splM.hyads.ddm <- function( dummy.n = 1,
coords = as.matrix( hyads2005.dt[,.( x, y)]),
Y = ddm2005.dt$X2005,
X = data.table( intercept = 1, hyads = hyads2005.dt$X2005),
seed.n = NULL,
...){
set.seed( seed.n)
quants <- function(x){
quantile(x, prob=c(0.5, 0.025, 0.975))
}
# holdout fraction
ho.frac <- .1
# define number of samples
n.samples <- 5000
# define priors
starting <- list("tau.sq"=1, "sigma.sq"=1, "phi"=6)
tuning <- list("tau.sq"=0.01, "sigma.sq"=0.01, "phi"=0.1)
priors <- list("beta.Flat", "tau.sq.IG"=c(2, 1),
"sigma.sq.IG"=c(2, 1), "phi.Unif"=c(3, 30))
# define holdout parameters
ho <- sample( 1:length( Y), ceiling( ho.frac * length( Y)))
#convert X & Y to matrices
X.m <- as.matrix( X)
Y.m <- as.matrix( Y)
# define inputs
coords.ho <- coords[ho,]
coords.tr <- coords[-ho,]
Y.ho <- Y.m[ho]
Y.tr <- Y.m[-ho]
X.ho <- X.m[ho,]
X.tr <- X.m[-ho,]
# define burn in
burn.in <- floor(0.75*n.samples)
# train the model
m.i <- spLM( Y.tr ~ X.tr - 1, coords = coords.tr,
modified.pp = TRUE, ...,
starting = starting, tuning = tuning, priors = priors,
cov.model = "exponential",
n.samples = n.samples, n.report = 2500)
# recover estimates of beta and theta
rt.rec <- system.time( {
m.i.rec <- spRecover(m.i, start=burn.in, thin=5, n.report=100)
})
# return simulated y.hat's
rt.y.hat <- system.time( {
m.i.pred <- spPredict( m.i, start=burn.in, thin=2, pred.covars = X.ho,
pred.coords=coords.ho, verbose=FALSE)
})
# find estimates of beta, theta, w.hat, and y.hat
beta.hat <- round(summary(m.i.rec$p.beta.recover.samples)$quantiles[c(3,1,5)],6)
theta.hat <- round( summary( window( m.i$p.theta.samples,
start = burn.in))$quantiles[, c( 3, 1, 5)], 2)
w.hat <- apply(m.i.rec$p.w.recover.samples, 1, median)
y.hat <- data.table( t( apply(m.i.pred$p.y.predictive.samples, 1, quants)))
# set up evaluation data.table
Y.ho.tr.dt <- data.table( cbind( coords.ho, y.hat[,`50%`], Y.ho))
setnames( Y.ho.tr.dt, 'V3', 'Y.hat')
# rasterize output for plots
w.hat.raster <- rasterFromXYZ( cbind( coords.tr, w.hat))
y.hat.raster <- rasterFromXYZ( Y.ho.tr.dt[, .( x, y, Y.hat)])
Y.tr.raster <- rasterFromXYZ( cbind( coords.tr, Y.tr))
Y.ho.raster <- rasterFromXYZ( Y.ho.tr.dt[, .( x, y, Y.ho)])
# mcmc plots
# plot( m.i$p.theta.samples)
# plot( m.i.rec$p.beta.recover.samples)
# spatial areas of y.hat - Y.ho
par(mfrow=c(1,3))
plot( w.hat.raster, main = "w.hat (spatial adjustment term)")
points( m.i$knot.coords, cex=1)
plot( y.hat.raster - Y.ho.raster, main = "Y.hat - Y.ho")
plot( (y.hat.raster - Y.ho.raster) / Y.ho.raster, main = "(Y.hat - Y.ho) / Y.ho")
par(mfrow=c(1,1))
# check out simpler models - set up inputs
names.covars <- names( X)
XY.tr <- data.table( Y = Y.tr, X.tr)
XY.ho <- data.table( Y = Y.ho, X.ho)
setnames( XY.tr, names(XY.tr)[names(XY.tr) %ni% 'Y'], names.covars)
setnames( XY.ho, names(XY.tr)[names(XY.tr) %ni% 'Y'], names.covars)
# check out simpler models - define them
form <- as.formula( paste( 'Y ~ -1 +', paste( names.covars, collapse = '+')))
m.lm <- lm( form, data = XY.tr)
m.mean <- mean( Y.tr / XY.tr$hyads)
# check out simpler models - get predicted Y.ho
y.hat.lm <- predict( m.lm, newdata = XY.ho)
y.hat.mean <- XY.ho$hyads * m.mean
# calculate evaluation metrics
eval.fn <- function( Yhat, Yact, mod.name){
num.diff <- sum( Yhat - Yact)
abs.diff <- sum( abs( Yhat - Yact))
denom <- sum( Yact)
metrics <- data.table( mod.name = mod.name,
NMB = num.diff / denom,
NME = abs.diff / denom,
MB = num.diff / length( ho),
RMSE = sqrt( sum( ( Yhat - Yact) ^ 2) / length( Y.ho)),
R = cor( Yhat, Yact))
return( metrics)
}
metrics.out <- rbind( eval.fn( Y.ho.tr.dt$Y.hat, Y.ho, 'spLM'),
eval.fn( y.hat.lm, Y.ho, 'lm'),
eval.fn( y.hat.mean, Y.ho, 'mean'))
out <- list( metrics = metrics.out,
runtimes = data.table( ncells.tr = length( Y.tr),
train = round(m.i$run.time[3]/60,3),
recover = round(rt.rec[3]/60,3),
est.y.hat = round(rt.y.hat[3]/60,3)))
return( out)
}
#======================================================================#
# define the linear model holdout function
#======================================================================#
lm.hyads.ddm.holdout <- function( seed.n = NULL,
dat.stack = dats2005.s.small,
dat.stack.pred = NULL,
y.name = 'cmaq.ddm',
x.name = 'hyads',
name.idwe = 'tot.sum',
covars.names = NULL, #c( 'temp', 'apcp'),
ho.frac = .1,
return.mods = F,
...){
# define eval function
eval.fn <- function( Yhat, Yact, mod.name){
num.diff <- sum( Yhat - Yact, na.rm = T)
abs.diff <- sum( abs( Yhat - Yact), na.rm = T)
denom <- sum( Yact, na.rm = T)
metrics <- data.table( mod.name = mod.name,
NMB = num.diff / denom,
NME = abs.diff / denom,
MB = num.diff / length( Yhat),
RMSE = sqrt( sum( ( Yhat - Yact) ^ 2, na.rm = T) / length( Yhat)),
R = cor( Yhat, Yact, use = 'complete.obs'),
R.s = cor( Yhat, Yact, use = 'complete.obs', method = 'spearman'))
return( metrics)
}
set.seed( seed.n)
# if no covar names provided, use all covariates that aren't x or y
if( is.null( covars.names))
covars.names <- names( dat.stack)[names( dat.stack) %ni% c( x.name, y.name)]
# define holdout parameters
N <- ncell( dat.stack)
ho <- sample( 1:N, ceiling( ho.frac * N))
# extract coordinates
dat.coords <- coordinates( dat.stack)
dat.coords.ho <- data.table( dat.coords[ho,])
dat.coords.tr <- data.table( dat.coords[-ho,])
# define inputs
dat.stack.ho <- data.table( values( dat.stack)[ ho,])
dat.stack.tr <- data.table( values( dat.stack)[-ho,])
# special case for ho is zero
if( length( ho) == 0){
# extract coordinates
dat.coords.ho <- data.table( dat.coords)
dat.coords.tr <- data.table( dat.coords)
# define inputs
dat.stack.ho <- data.table( dat.coords.ho, values( dat.stack))
dat.stack.tr <- data.table( dat.coords.tr, values( dat.stack))
}
if( !is.null( dat.stack.pred)){
# extract coordinates
dat.coords.ho <- data.table( coordinates( dat.stack.pred))
# define inputs
dat.stack.ho <- data.table( dat.coords.ho, values( dat.stack.pred))
}
# create log variables
dat.stack.tr[, y.name.log := log( get( y.name))]
dat.stack.ho[, y.name.log := log( get( y.name))]
# check out linear regression models - define them
form.ncv <- as.formula( paste( 'y.name.log', '~', x.name))
form.cv_single_poly <-
as.formula( paste( 'y.name.log', '~ poly(', x.name, ', 2) +', x.name, ': (',
paste( c( covars.names),
collapse = '+'), ')')) #, '+ s( x, y, k = 20)'
form.cv_single <-
as.formula( paste( 'y.name.log', '~ ', x.name, '+', x.name, ': (',
paste( c( covars.names),
collapse = '+'), ')'))
form.cv_five <-
as.formula( paste( 'y.name.log', '~ ', x.name, '+', x.name, ': (',
paste( c( covars.names),
collapse = '+'), ')^5'))
# train the models
lm.ncv <- lm( form.ncv, data = dat.stack.tr)
lm.cv_single <- lm( form.cv_single, data = dat.stack.tr)
lm.cv_single_poly <- lm( form.cv_single_poly, data = na.omit( dat.stack.tr))
lm.cv_five <- lm( form.cv_five, data = dat.stack.tr)
# check out simpler models - get predicted Y.ho
y.ho <- unlist( dat.stack.ho[,..y.name])
y.hat.lm.ncv <- predict( lm.ncv, type = 'response', newdata = dat.stack.ho, se.fit = T)
y.hat.lm.cv_single <- predict( lm.cv_single, type = 'response', newdata = dat.stack.ho, se.fit = T)
y.hat.lm.cv_single_poly <- predict( lm.cv_single_poly, type = 'response', newdata = dat.stack.ho, se.fit = T)
y.hat.lm.cv_five <- predict( lm.cv_five, type = 'response', newdata = dat.stack.ho, se.fit = T)
# set up evaluation data.table
Y.ho.hat <- data.table( dat.coords.ho, y.ho,
y.hat.lm.ncv = y.hat.lm.ncv$fit,
y.hat.lm.cv_single = y.hat.lm.cv_single$fit,
y.hat.lm.cv_single_poly = y.hat.lm.cv_single_poly$fit,
y.hat.lm.cv_five = y.hat.lm.cv_five$fit)
Y.ho.hat.bias <- data.table( dat.coords.ho, y.ho,
y.hat.lm.ncv = y.hat.lm.ncv$fit - y.ho,
y.hat.lm.cv_single = y.hat.lm.cv_single$fit - y.ho,
y.hat.lm.cv_single_poly = y.hat.lm.cv_single_poly$fit - y.ho,
y.hat.lm.cv_five = y.hat.lm.cv_five$fit - y.ho)
Y.ho.hat.se <- data.table( dat.coords.ho,
y.hat.lm.ncv = y.hat.lm.ncv$se.fit,
y.hat.lm.cv_single = y.hat.lm.cv_single$se.fit,
y.hat.lm.cv_single_poly = y.hat.lm.cv_single_poly$se.fit,
y.hat.lm.cv_five = y.hat.lm.cv_five$se.fit)
# rasterize output for plots
crs.in <- crs( dat.stack)
Y.ho.hat.raster <- projectRaster( rasterFromXYZ( Y.ho.hat, crs = crs.in), dat.stack)
Y.ho.hat.se.raster <- projectRaster( rasterFromXYZ( Y.ho.hat.se, crs = crs.in), dat.stack)
Y.ho.hat.bias.raster <- projectRaster( rasterFromXYZ( Y.ho.hat.bias, crs = crs.in), dat.stack)
# calculate evaluation metrics
metrics.out <- rbind( eval.fn( exp( Y.ho.hat$y.hat.lm.ncv), y.ho, 'lm.ncv'),
eval.fn( exp( Y.ho.hat$y.hat.lm.cv_single), y.ho, 'lm.cv_single'),
eval.fn( exp( Y.ho.hat$y.hat.lm.cv_single_poly), y.ho, 'lm.cv_single_poly'),
eval.fn( exp( Y.ho.hat$y.hat.lm.cv_five), y.ho, 'lm.cv_five'))
# listify the models
if( return.mods)
out <- list( metrics = metrics.out,
model.lm.ncv = lm.ncv,
model.lm.cv_single = lm.cv_single,
model.lm.cv_single_poly = lm.cv_single_poly,
model.lm.cv_five = lm.cv_five,
Y.ho.hat.raster = Y.ho.hat.raster,
Y.ho.hat.se.raster = Y.ho.hat.se.raster,
Y.ho.hat.bias.raster = Y.ho.hat.bias.raster)
if( !return.mods)
out <- list( metrics = metrics.out,
Y.ho.hat.raster = Y.ho.hat.raster,
Y.ho.hat.se.raster = Y.ho.hat.se.raster,
Y.ho.hat.bias.raster = Y.ho.hat.bias.raster)
return( out)
}
#======================================================================#
# extract a mean from a list of lists of rasters
#======================================================================#
mean.lol <- function( lol, layer1, layer2, plot.out = TRUE){
first.lol <- lapply( lol, '[[', layer1)
second.lol <- lapply( first.lol, '[[', layer2)
mean.lol <- mean( stack( second.lol), na.rm = T)
if( plot.out)
plot( mean.lol, main = layer2)
return( mean.lol)
}
#======================================================================#
# ggplot a raster
#======================================================================#
ggplot.a.raster <- function( ..., bounds = NULL, facet.names = NULL, mask.raster = NULL,
nrow. = NULL, ncol. = NULL, legend.name = NULL, theme.obj = theme()){
in.x <- list( ...)
if( length( in.x) == 1)
in.x <- in.x[[1]]
if( is.null( facet.names))
facet.names <- lapply( in.x, names)
if( is.null( names( in.x)) & !is.null( facet.names))
names( in.x) <- facet.names
in.x.crop <- in.x
if( !is.null( mask.raster) & is.list( in.x))
in.x.crop <- lapply( in.x, function( X, mask.raster.){
X.mask <- mask( X, mask.raster.)
X.crop <- crop( X.mask, mask.raster.)
return( X.crop)
}, mask.raster)
if( !is.null( mask.raster) & !is.list( in.x)){
in.x.mask <- mask( in.x, mask.raster)
in.x.crop <- crop( in.x.mask, mask.raster)
}
dat.dt <- rbindlist( lapply( facet.names, function( x.name, x.list) {
x <- x.list[[x.name]]
r_points <- rasterToPoints( x)
r_dt <- data.table( r_points)[, name.in := x.name]
setnames( r_dt, names( x), 'z')
return( r_dt)
}, in.x.crop))
if( !is.null( facet.names))
dat.dt[, name.in := factor( name.in, levels = facet.names)]
ggplot( dat.dt) +
geom_tile( aes( x = x, y = y, fill = z)) +
scale_fill_viridis( name = legend.name, limits = bounds, oob = scales::squish) +
facet_wrap( . ~ name.in, nrow = nrow., ncol = ncol.) +
# expand_limits( fill = 0) +
theme_bw() +
theme( axis.text = element_blank(),
axis.title = element_blank(),
axis.ticks = element_blank(),
legend.position = 'bottom',
legend.key.width = unit( ncol( in.x[[1]])/3000, 'npc'),
panel.grid = element_blank(),
strip.background = element_blank()) +
theme.obj
}
#======================================================================#
# do the predictions for many months
#======================================================================#
month.trainer <- function( name.m = names( mets2005.m)[1],
name.p = names( mets2006.m)[1],
name.x,
y.m,
ddm.m = ddm.m.all,
mets.m = mets.m.all,
emiss.m = d_nonegu.r,
idwe.m = idwe.m,
.mask.use = NULL,
cov.names = c( "temp", "rhum", "vwnd", "uwnd", "wspd")){ #, names( d_nonegu.r))){
# create training dataset
ddm.use <- ddm.m[[name.m]]
hyads.use <- y.m[[name.m]]
mets.use <- mets.m[[name.m]]
emiss.use <- emiss.m
idwe.use <- idwe.m[[name.m]]
# create prediction dataset
ddm.use.p <- ddm.m[[name.p]]
hyads.use.p <- y.m[[name.p]]
mets.use.p <- mets.m[[name.p]]
idwe.use.p <- idwe.m[[name.p]]
# fix names
names( ddm.use) <- 'cmaq.ddm'
names( ddm.use.p) <- 'cmaq.ddm'
names( hyads.use) <- name.x
names( hyads.use.p) <- name.x
names( idwe.use) <- 'tot.sum.idwe'
names( idwe.use.p) <- 'tot.sum.idwe'
# combine each dataset as stacks
dat.s <- project_and_stack( hyads.use, ddm.use, idwe.use,
mets.use, emiss.use, mask.use = .mask.use)
dat.p <- project_and_stack( hyads.use.p, ddm.use.p, idwe.use.p,
mets.use.p, emiss.use, mask.use = .mask.use)
# do the modeling
pred <- lm.hyads.ddm.holdout( dat.stack = dat.s, dat.stack.pred = dat.p, x.name = name.x,
ho.frac = 0, covars.names = cov.names,
name.idwe = 'tot.sum.idwe', return.mods = T)
return( pred)
}
#======================================================================#
# project and stack in one command
# ## need to update masking in all functions - cells with centroid not covered are cropped
## https://gis.stackexchange.com/questions/255025/r-raster-masking-a-raster-by-polygon-also-remove-cells-partially-covered
## should probably update usa mask too - need to use USAboundaries for consistency
#======================================================================#
project_and_stack <- function( ..., mask.use = NULL){
list.r <- list( ...)
if( length( list.r) > 1)
for( r in 2: length( list.r)){
list.r[[r]] <- projectRaster( list.r[[r]], list.r[[1]], alignOnly = F)
}
# mask over usa
if( !is.null( mask.use)){
mask.use <- spTransform( mask.use, crs( list.r[[1]]))
mask_crop <- rasterize( mask.use, list.r[[1]][[1]], getCover=TRUE)
mask_crop[mask_crop==0] <- NA
mask_crop[!is.na( mask_crop)] <- 1
list.out <- lapply( list.r, function( x){
x1 <- reclassify( x, c( NA, NA, 0))
# x[is.na(x)] <- 0
trim( mask(x1, mask_crop, maskvalue = NA),
padding = 1)
})
} else
list.out <- list.r
return( stack( list.out))
}
#======================================================================#
## function for converting individual unit concentrations to ugm3
# inputs - filename of grid w/ unit or summed impacts
# - model
# - covariate raster
# - 'x' name in the raster from model
#
# output - data table of state, pop-wgted impact for all input units
#======================================================================#
state_exposurer <- function(
month.n,
fstart,
year.m = 2006,
model.dataset = preds.mon.idwe06w05,
model.name = 'model.cv', #'model.gam'
name.x = 'idwe',
mask.use = mask.usa,
ddm.m = ddm.m.all,
mets.m = mets.m.all,
emiss.m = d_nonegu.r,
idwe.m. = idwe.m,
hyads.m = hyads.m.all,
grid_pop.r = grid_popwgt.r,
state_pops = copy( us_states.pop.dt),
p4s,
take.diff = F,
xboost = F
){
message( paste( 'Converting', month.name[month.n]))
month.N <- formatC( month.n, width = 2, flag = '0')
name.m <- paste0( 'X', year.m, '.', month.N, '.01')
model.m <- paste0( 'X2005.', month.N, '.01')
popyr.name <- paste0( 'X', year.m)
name.Date <- as.Date( name.m, format = 'X%Y.%m.%d')
if( name.x == 'idwe'){
fname <- paste0( fstart, year.m, '_', month.n, '.csv')
name.dat <- 'idwe'
} else{
fname <- paste0( fstart, year.m, '_', month.N, '.csv')
name.dat <- 'hyads'
}
# create prediction dataset
# ddm.use.p <- ddm.m[[name.m]]
mets.use.p <- mets.m[[name.m]]
idwe.use.p <- idwe.m.[[name.m]]
hyads.use.p <- hyads.m[[name.m]]
# fix names
# names( ddm.use.p) <- 'cmaq.ddm'
names( hyads.use.p) <- 'hyads'
names( idwe.use.p) <- 'idwe'
# rename population raster
grid_pop.r <- grid_pop.r[[popyr.name]]
names( grid_pop.r) <- 'pop'
dat.s <- project_and_stack( #ddm.use.p,
hyads.use.p, idwe.use.p,
mets.use.p, emiss.m, grid_pop.r, mask.use = mask.use)
# assign state names to raster
mask.r <- rasterize( mask.use[,'state_abbr'], dat.s[[1]])
mask.a <- levels( mask.r)[[1]]
dat.s$ID <- mask.r
# read in, remove
x.in1 <- fread( fname, drop = c( 'V1'))
x.in1[is.na( x.in1)] <- 0
suppressWarnings( x.in1[, `:=` ( yearmon = NULL, yearmonth = NULL)])
x.in2 <- x.in1[, colSums(x.in1) != 0, with = F]
suppressWarnings( x.in2[, `:=` ( x = NULL, y = NULL)])
x.in <- cbind( x.in1[, .( x, y)], x.in2)
# rasterize new x file
x.r <- rasterFromXYZ( x.in, crs = p4s)
x.n <- paste0( 'X', names( x.in)[!(names( x.in) %in% c( 'x', 'y'))])
x.n <- gsub( '^XX', 'X', x.n)
names( x.r) <- x.n
x.proj <- project_and_stack( dat.s[[1]], x.r, mask.use = mask.use)
names( x.proj)[2:dim( x.proj)[3]] <- x.n
# pick out the appropriate model
model.use <- model.dataset[ model.name, model.m][[1]]
# predict the base scenario (zero hyads/idwe)
dat.use0<- copy( dat.s)
dat.use0[[name.dat]] <- 0
dat.coords <- coordinates( dat.s)
dat_raw0.dt <- data.table( cbind( dat.coords, values( dat.use0)))
# xboost requires special treatment
if( xboost){
dat_raw0.dt.trim <- dat_raw0.dt[, model.use$feature_names, with = F]
xhold1c <- xgb.DMatrix( as.matrix( dat_raw0.dt.trim))
dat.pred0 <- predict( model.use, newdata = xhold1c)
} else
dat.pred0 <- predict( model.use, newdata = dat_raw0.dt)
dats0.r <- rasterFromXYZ( data.table( dat.coords, dat.pred0), crs = p4s)
# do the predictions
pred_pm.r <- brick( pbmcapply::pbmclapply( x.n, function( n){
gc()
# assign unit to prediction dataset
dat.use <- copy( dat.s)
dat.use[[name.dat]] <- x.proj[[n]]
# if zero impacts, return raster with only zeros
if( sum( values(x.proj[[n]]), na.rm = T) == 0)
return( x.proj[[n]])
# set up the dataset
dat_raw.dt <- data.table( cbind( dat.coords, values( dat.use)))
# do the predictions
if( xboost){
dat_raw.dt.trim <- dat_raw.dt[, model.use$feature_names, with = F]
xhold.pred <- xgb.DMatrix( as.matrix( dat_raw.dt.trim))
dat.pred <- predict( model.use, newdata = xhold.pred)
} else
dat.pred <- predict( model.use, newdata = dat_raw.dt)
# rasterize
dats.r <- rasterFromXYZ( data.table( dat.coords, dat.pred), crs = p4s)
#take difference from base
if( take.diff){
dats.r2 <- dats.r - dats0.r
} else
dats.r2 <- dats.r
names( dats.r2) <- n
return( dats.r2)
}))
# multiply by population
pred_popwgt.r <- pred_pm.r * dat.s[['pop']]
names( pred_popwgt.r) <- names( pred_pm.r)
#calculate raw average for the entire domain
pred_pm.us <- colMeans( data.table( values( pred_pm.r)), na.rm = T)
pred_pm.us.dt <- data.table( uID = names( pred_pm.us),
mean_pm = pred_pm.us,
state_abbr = 'US',
ID = 100)
#calculate pop-weightedverage for the entire domain
pred_pm.pw.us <- colSums( na.omit( data.table( values( pred_popwgt.r)), na.rm = T))
pred_pm.pw.us.dt <- data.table( uID = names( pred_pm.pw.us),
mean_popwgt = pred_pm.pw.us,
state_abbr = 'US',
ID = 100)
# calculate raw average by state
pred_pm.r$ID <- mask.r
pred_pm.dt <- data.table( values( pred_pm.r))
pred_pm.dt.m <- na.omit( melt( pred_pm.dt, id.vars = 'ID', variable.name = 'uID'))
pred_pm.dt.s <- pred_pm.dt.m[, .( mean_pm = mean( value)), by = .( ID, uID)]
pred_pm.dt.s <- merge( pred_pm.dt.s, mask.a, by = 'ID')
# calculate population-weighted by state
pred_popwgt.r$ID <- mask.r
pred_popwgt.dt <- data.table( values( pred_popwgt.r))
pred_popwgt.dt.m <- na.omit( melt( pred_popwgt.dt, id.vars = 'ID', variable.name = 'uID'))
pred_popwgt.dt.s <- pred_popwgt.dt.m[, .( mean_popwgt = sum( value)), by = .( ID, uID)]
pred_popwgt.dt.s <- merge( pred_popwgt.dt.s, mask.a, by = 'ID')
# bind with united states pops
pred_pm <- rbind( pred_pm.dt.s, pred_pm.us.dt)
pred_pm.pw <- rbind( pred_popwgt.dt.s, pred_pm.pw.us.dt)
# now just divide by each state's total population
setnames( state_pops, popyr.name, 'pop_amnt')
state_pops_lite <- state_pops[, .( state_abbr, pop_amnt)]
pop.tot <- data.table( state_abbr = 'US', pop_amnt = sum( state_pops$pop_amnt))
state_pops_lite <- rbind( state_pops_lite, pop.tot)
pred_popwgt.out <- merge( pred_pm.pw, state_pops_lite, by = 'state_abbr')
# divide by total population
pred_popwgt.out[, `:=` ( popwgt = mean_popwgt / pop_amnt,
month = name.Date)]
# merge pop-weighted and raw dataset
out <- merge( pred_popwgt.out, pred_pm, by = c( 'state_abbr', 'ID', 'uID'))
return( list( popwgt_states = out, pred_pm.r = pred_pm.r, zero_out.r = dats0.r))
}
#======================================================================#
## same as above, but for a year
#======================================================================#
state_exposurer.year <- function(
fname,
year.m = 2006,
model.use = preds.ann.hyads06w05$model.gam,
name.x = 'idwe',
mask.use = mask.usa,
dat.a = dats2006.a,
grid_pop.r = grid_popwgt.r,
state_pops = copy( us_states.pop.dt),
p4s,
take.diff = F,
xboost = F,
raw = F
){
message( paste( 'Converting', year.m))
# rename population raster
popyr.name <- paste0( 'X', year.m)
grid_pop.r <- grid_pop.r[[popyr.name]]
names( grid_pop.r) <- 'pop'
# assign state names to raster
mask.r <- rasterize( mask.use[,'state_abbr'], dat.a[[1]])
mask.a <- levels( mask.r)[[1]]
dat.a$ID <- mask.r
dat.a <- project_and_stack( dat.a, grid_pop.r)
# read in, rasterize new x file
if( name.x == 'hyads'){
x.in <- fread( fname, drop = c( 'V1', 'year.E', 'year.H'))
x.cast <- dcast( x.in, x + y ~ uID, value.var = 'hyads')
name.dat <- 'hyads'
}
if( name.x == 'idwe'){
x.cast <- fread( fname, drop = c( 'V1'))
name.dat <- 'idwe'
}
# cast and rasterize
x.r <- rasterFromXYZ( x.cast, crs = p4s)
x.n <- paste0( 'X', names( x.cast)[!(names( x.cast) %in% c( 'x', 'y'))])
x.n <- gsub( '^XX', 'X', x.n)
names( x.r) <- x.n
x.proj <- project_and_stack( dat.a[[1]], x.r, mask.use = mask.use)
# predict the base scenario (zero hyads/idwe)
dat.use0<- copy( dat.a)
dat.use0[[name.dat]] <- 0
dat.coords <- coordinates( dat.a)
dat_raw0.dt <- data.table( cbind( dat.coords, values( dat.use0)))
# do the prediction if not taking raw values
if( !raw){
# xboost requires special treatment
if( xboost){
dat_raw0.dt.trim <- dat_raw0.dt[, model.use$feature_names, with = F]
xhold1c <- xgb.DMatrix( as.matrix( dat_raw0.dt.trim))
dat.pred0 <- predict( model.use, newdata = xhold1c)
} else
dat.pred0 <- predict( model.use, newdata = dat_raw0.dt)
dats0.r <- rasterFromXYZ( data.table( dat.coords, dat.pred0), crs = p4s)
} else{
dats0.r <- rasterFromXYZ( dat_raw0.dt[, c( 'x', 'y', name.dat), with = F], crs = p4s)
}
# do the predictions
pred_pm.r <- brick( pbmcapply::pbmclapply( x.n, function( n){
gc()
# if zero impacts, return raster with only zeros
if( sum( values(x.proj[[n]]), na.rm = T) == 0 | raw)
return( x.proj[[n]])
# assign unit to prediction dataset
dat.use <- copy( dat.a)
dat.use[[name.dat]] <- x.proj[[n]]
# set up the dataset
dat_raw.dt <- data.table( cbind( dat.coords, values( dat.use)))
# do the predictions
if( xboost){
dat_raw.dt.trim <- dat_raw.dt[, model.use$feature_names, with = F]
xhold.pred <- xgb.DMatrix( as.matrix( dat_raw.dt.trim))
dat.pred <- predict( model.use, newdata = xhold.pred)
} else
dat.pred <- predict( model.use, newdata = dat_raw.dt)
# rasterize
dats.r <- rasterFromXYZ( data.table( dat.coords, dat.pred), crs = p4s)
#take difference from base
if( take.diff){
dats.r2 <- dats.r - dats0.r
} else
dats.r2 <- dats.r
names( dats.r2) <- n
return( dats.r2)
}))
# multiply by population
pred_popwgt.r <- pred_pm.r * dat.a[['pop']]
names( pred_popwgt.r) <- names( pred_pm.r)
#calculate raw average for the entire domain
pred_pm.us <- colMeans( data.table( values( pred_pm.r)), na.rm = T)
pred_pm.us.dt <- data.table( uID = names( pred_pm.us),
mean_pm = pred_pm.us,
state_abbr = 'US',
ID = 100)
#calculate pop-weightedverage for the entire domain
pred_pm.pw.us <- colSums( na.omit( data.table( values( pred_popwgt.r)), na.rm = T))
pred_pm.pw.us.dt <- data.table( uID = names( pred_pm.pw.us),
mean_popwgt = pred_pm.pw.us,
state_abbr = 'US',
ID = 100)
# calculate raw average by state
pred_pm.r$ID <- mask.r
pred_pm.dt <- data.table( values( pred_pm.r))
pred_pm.dt.m <- na.omit( melt( pred_pm.dt, id.vars = 'ID', variable.name = 'uID'))
pred_pm.dt.s <- pred_pm.dt.m[, .( mean_pm = mean( value)), by = .( ID, uID)]
pred_pm.dt.s <- merge( pred_pm.dt.s, mask.a, by = 'ID')
# calculate population-weighted by state
pred_popwgt.r$ID <- mask.r
pred_popwgt.dt <- data.table( values( pred_popwgt.r))
pred_popwgt.dt.m <- na.omit( melt( pred_popwgt.dt, id.vars = 'ID', variable.name = 'uID'))
pred_popwgt.dt.s <- pred_popwgt.dt.m[, .( mean_popwgt = sum( value)), by = .( ID, uID)]
pred_popwgt.dt.s <- merge( pred_popwgt.dt.s, mask.a, by = 'ID')
# bind with united states pops
pred_pm <- rbind( pred_pm.dt.s, pred_pm.us.dt)
pred_pm.pw <- rbind( pred_popwgt.dt.s, pred_pm.pw.us.dt)
# now just divide by each state's total population
setnames( state_pops, popyr.name, 'pop_amnt')
state_pops_lite <- state_pops[, .( state_abbr, pop_amnt)]
pop.tot <- data.table( state_abbr = 'US', pop_amnt = sum( state_pops$pop_amnt))
state_pops_lite <- rbind( state_pops_lite, pop.tot)
pred_popwgt.out <- merge( pred_pm.pw, state_pops_lite, by = 'state_abbr')
# divide by total population
pred_popwgt.out[, `:=` (popwgt = mean_popwgt / pop_amnt,
year = year.m)]
# merge pop-weighted and raw dataset
out <- merge( pred_popwgt.out, pred_pm, by = c( 'state_abbr', 'ID', 'uID'))
return( list( popwgt_states = out, pred_pm.r = pred_pm.r, zero_out.r = dats0.r))
}
#======================================================================#
## calculate evaluation metrics
#======================================================================#
evals.fn <- function( Yhat, Yact){
num.diff <- sum( Yhat - Yact)
abs.diff <- sum( abs( Yhat - Yact))
denom <- sum( Yact)
metrics <- data.table( NMB = num.diff / denom,
NME = abs.diff / denom,
MB = num.diff / length( Yhat),
ME = abs.diff / length( Yhat),
RMSE = sqrt( sum( ( Yhat - Yact) ^ 2) / length( Yhat)),
R.p = cor( Yhat, Yact, method = 'pearson'),
R.s = cor( Yhat, Yact, method = 'spearman'))
return( metrics)
}
#======================================================================#
## function for converting individual unit concentrations to ugm3
# inputs - filename of grid w/ unit or summed impacts
# - model
# - covariate raster
# - 'x' name in the raster from model
#
# output - data table of state, pop-wgted impact for all input units
#======================================================================#
hyads_to_pm25 <- function(
month.n = NULL,
fstart,
year.m = 2006,
model.dataset = preds.mon.idwe06w05,
model.name = 'model.cv', #'model.gam'
name.x = 'idwe',
mask.use = mask.usa,
ddm.m = ddm.m.all,
mets.m = mets.m.all,
emiss.m = d_nonegu.r,
idwe.m. = idwe.m,
hyads.m = hyads.m.all,
take.diff = T,
p4s
){
message( paste( 'Converting', month.name[month.n]))
month.N <- formatC( month.n, width = 2, flag = '0')
name.m <- paste0( 'X', year.m, '.', month.N, '.01')
model.m <- paste0( 'X2005.', month.N, '.01')
popyr.name <- paste0( 'X', year.m)
name.Date <- as.Date( name.m, format = 'X%Y.%m.%d')
if( name.x == 'idwe'){
fname <- paste0( fstart, year.m, '_', month.n, '.csv')
name.dat <- 'idwe'
} else{
fname <- paste0( fstart, year.m, '_', month.N, '.csv')
name.dat <- 'hyads'
}
# create prediction dataset
# ddm.use.p <- ddm.m[[name.m]]
mets.use.p <- mets.m[[name.m]]
idwe.use.p <- idwe.m.[[name.m]]
hyads.use.p <- hyads.m[[name.m]]
# fix names
# names( ddm.use.p) <- 'cmaq.ddm'
names( hyads.use.p) <- 'hyads'
names( idwe.use.p) <- 'idwe'
# rename population raster
grid_pop.r <- grid_pop.r[[popyr.name]]
names( grid_pop.r) <- 'pop'
dat.s <- project_and_stack( #ddm.use.p,
hyads.use.p, idwe.use.p,
mets.use.p, emiss.m, grid_pop.r, mask.use = mask.use)
# assign state names to raster
mask.r <- rasterize( mask.use[,'state_abbr'], dat.s[[1]])
mask.a <- levels( mask.r)[[1]]
dat.s$ID <- mask.r
# read in, remove
x.in1 <- fread( fname, drop = c( 'V1'))
x.in1[is.na( x.in1)] <- 0
suppressWarnings( x.in1[, `:=` ( yearmon = NULL, yearmonth = NULL)])
x.in2 <- x.in1[, colSums(x.in1) != 0, with = F]
suppressWarnings( x.in2[, `:=` ( x = NULL, y = NULL)])
x.in <- cbind( x.in1[, .( x, y)], x.in2)
# rasterize new x file
x.r <- rasterFromXYZ( x.in, crs = p4s)
x.n <- paste0( 'X', names( x.in)[!(names( x.in) %in% c( 'x', 'y'))])
x.n <- gsub( '^XX', 'X', x.n)
names( x.r) <- x.n
x.proj <- project_and_stack( dat.s[[1]], x.r, mask.use = mask.use)
names( x.proj)[2:dim( x.proj)[3]] <- x.n
# pick out the appropriate model
model.use <- model.dataset[ model.name, model.m][[1]]
# predict the base scenario (zero hyads/idwe)
dat.use0<- copy( dat.s)
dat.use0[[name.dat]] <- 0
dat.coords <- coordinates( dat.s)
dat_raw0.dt <- data.table( cbind( dat.coords, values( dat.use0)))
# xboost requires special treatment
if( xboost){
dat_raw0.dt.trim <- dat_raw0.dt[, model.use$feature_names, with = F]
xhold1c <- xgb.DMatrix( as.matrix( dat_raw0.dt.trim))
dat.pred0 <- predict( model.use, newdata = xhold1c)
} else
dat.pred0 <- predict( model.use, newdata = dat_raw0.dt)
dats0.r <- rasterFromXYZ( data.table( dat.coords, dat.pred0), crs = p4s)
# do the predictions
pred_pm.r <- brick( pbmcapply::pbmclapply( x.n, function( n){
gc()
# assign unit to prediction dataset
dat.use <- copy( dat.s)
dat.use[[name.dat]] <- x.proj[[n]]
# if zero impacts, return raster with only zeros
if( sum( values(x.proj[[n]]), na.rm = T) == 0)
return( x.proj[[n]])
# set up the dataset
dat_raw.dt <- data.table( cbind( dat.coords, values( dat.use)))
# do the predictions
if( xboost){
dat_raw.dt.trim <- dat_raw.dt[, model.use$feature_names, with = F]
xhold.pred <- xgb.DMatrix( as.matrix( dat_raw.dt.trim))
dat.pred <- predict( model.use, newdata = xhold.pred)
} else
dat.pred <- predict( model.use, newdata = dat_raw.dt)
# rasterize
dats.r <- rasterFromXYZ( data.table( dat.coords, dat.pred), crs = p4s)
#take difference from base
if( take.diff){
dats.r2 <- dats.r - dats0.r
} else
dats.r2 <- dats.r
names( dats.r2) <- n
return( dats.r2)
}))
# multiply by population
pred_popwgt.r <- pred_pm.r * dat.s[['pop']]
names( pred_popwgt.r) <- names( pred_pm.r)
#calculate raw average for the entire domain
pred_pm.us <- colMeans( data.table( values( pred_pm.r)), na.rm = T)
pred_pm.us.dt <- data.table( uID = names( pred_pm.us),
mean_pm = pred_pm.us,
state_abbr = 'US',
ID = 100)
#calculate pop-weightedverage for the entire domain
pred_pm.pw.us <- colSums( na.omit( data.table( values( pred_popwgt.r)), na.rm = T))
pred_pm.pw.us.dt <- data.table( uID = names( pred_pm.pw.us),
mean_popwgt = pred_pm.pw.us,
state_abbr = 'US',
ID = 100)
# calculate raw average by state
pred_pm.r$ID <- mask.r
pred_pm.dt <- data.table( values( pred_pm.r))
pred_pm.dt.m <- na.omit( melt( pred_pm.dt, id.vars = 'ID', variable.name = 'uID'))
pred_pm.dt.s <- pred_pm.dt.m[, .( mean_pm = mean( value)), by = .( ID, uID)]
pred_pm.dt.s <- merge( pred_pm.dt.s, mask.a, by = 'ID')
# calculate population-weighted by state
pred_popwgt.r$ID <- mask.r
pred_popwgt.dt <- data.table( values( pred_popwgt.r))
pred_popwgt.dt.m <- na.omit( melt( pred_popwgt.dt, id.vars = 'ID', variable.name = 'uID'))
pred_popwgt.dt.s <- pred_popwgt.dt.m[, .( mean_popwgt = sum( value)), by = .( ID, uID)]
pred_popwgt.dt.s <- merge( pred_popwgt.dt.s, mask.a, by = 'ID')
# bind with united states pops
pred_pm <- rbind( pred_pm.dt.s, pred_pm.us.dt)
pred_pm.pw <- rbind( pred_popwgt.dt.s, pred_pm.pw.us.dt)
# now just divide by each state's total population
setnames( state_pops, popyr.name, 'pop_amnt')
state_pops_lite <- state_pops[, .( state_abbr, pop_amnt)]
pop.tot <- data.table( state_abbr = 'US', pop_amnt = sum( state_pops$pop_amnt))
state_pops_lite <- rbind( state_pops_lite, pop.tot)
pred_popwgt.out <- merge( pred_pm.pw, state_pops_lite, by = 'state_abbr')
# divide by total population
pred_popwgt.out[, `:=` ( popwgt = mean_popwgt / pop_amnt,
month = name.Date)]
# merge pop-weighted and raw dataset
out <- merge( pred_popwgt.out, pred_pm, by = c( 'state_abbr', 'ID', 'uID'))
return( list( popwgt_states = out, pred_pm.r = pred_pm.r, zero_out.r = dats0.r))
}
#======================================================================#
## function for converting individual unit concentrations to ugm3
# inputs - filename of grid w/ unit or summed impacts
# - model
# - covariate raster
# - 'x' name in the raster from model
#
# output - data table of ugm3 impacts for all input units
#======================================================================#
hyads_to_pm25_unit <- function(
year.m = 2006,
month.n = NULL,
fstart = NULL,
fstart.total,
fstart_out,
model.dataset = preds.mon.idwe06w05,
model.name = 'model.cv', #'model.gam'
name.x = 'hyads',
mask.use = mask.usa,
met.dest = '/projects/HAQ_LAB/lhennem/data/disperseR/HyADS_to_pm25/met',
total = F,
p4s
){
message( paste( 'Converting', month.name[month.n], year.m))
#define the met layer names, do the actual downloading
Sys.setenv(TZ='UTC')
layer.names <- c( "air.2m.mon.mean.nc",
"apcp.mon.mean.nc",
"rhum.2m.mon.mean.nc",
"vwnd.10m.mon.mean.nc",
"uwnd.10m.mon.mean.nc")
names( layer.names) <- c( "temp", "apcp", "rhum", "vwnd", "uwnd")
# do the data downloading
# set destination parameter to where you want the data downloaded,
# for example, destination = '~/Desktop'
list.met <- lapply( layer.names,
downloader.fn,
destination = met.dest,
dataset = 'NARR')
# annual or month-specific actions
if( is.null( month.n)){
# define file nanmes
fname <- paste0( fstart, year.m, '.fst')
fname.total <- paste0( fstart.total, year.m, '.fst')
fname_out <- paste0( fstart_out, year.m, '.fst')
# download met data
mets.use.p <- suppressWarnings(
usa.functioner( year.m, list.met, dataset = 'NARR',
avg.period = 'year', return.usa.sub = F)
)
# pick out the appropriate model
model.use <- model.dataset[[ model.name]]
# get the prediction crs
model.rast <- model.dataset$Y.ho.hat.raster
model.csr <- crs( model.rast)
} else {
# define date/month names
month.N <- formatC( month.n, width = 2, flag = '0')
name.m <- paste0( 'X', year.m, '.', month.N, '.01')
model.m <- paste0( 'X2005.', month.N, '.01')
name.Date <- as.Date( name.m, format = 'X%Y.%m.%d')
# define file nanmes
fname <- paste0( fstart, year.m, '_', month.N, '.fst')
fname.total <- paste0( fstart.total, year.m, '_', month.N, '.fst')
fname_out <- paste0( fstart_out, year.m, '_', month.N, '.fst')
# download met data
mets.m <- suppressWarnings(
usa.functioner( year.m, list.met, dataset = 'NARR',
avg.period = 'month', return.usa.sub = F)
)
mets.use.p <- mets.m[[name.m]]
# pick out the appropriate model
model.use <- model.dataset[ model.name, model.m][[1]]
# get the prediction crs
model.rast <- model.dataset['Y.ho.hat.raster',][[model.m]]
model.csr <- crs( model.rast)
}
# create prediction dataset
mets.use.p <- projectRaster( mets.use.p, crs = model.csr)
dat.s <- project_and_stack( model.rast, mets.use.p, mask.use = mask.use)
# read in total hyads
hyads.total.dt <- read.fst( fname.total, columns = c( 'x', 'y', 'hyads'), as.data.table = T)
hyads.total.r <- rasterFromXYZ( hyads.total.dt, crs = p4s)
# predict with total hyads
dat.use.s<- copy( dat.s)
hyads.total.proj <- project_and_stack( dat.use.s, hyads.total.r, mask.use = mask.use)
# predict the base scenario
dat.coords <- coordinates( hyads.total.proj)
dat_total.dt <- data.table( cbind( dat.coords, values( hyads.total.proj)))
dat.total.pred <- predict( model.use, newdata = dat_total.dt) %>%
exp()
# predict a 0-hyads scenario
dat_0.dt <- dat_total.dt[, hyads := 0]
dat.0.pred <- predict( model.use, newdata = dat_0.dt, type = 'response') %>%
exp()
# predict pm into a raster
pred_pm.r <- rasterFromXYZ( data.table( dat.coords, dat.total.pred), crs = p4s) %>%
projectRaster( dat.use.s)
pred_0.r <- rasterFromXYZ( data.table( dat.coords, dat.0.pred), crs = p4s) %>%
projectRaster( dat.use.s)
# remove mean
pred_nomean.r <- pred_pm.r - pred_0.r
#read in, project hyads
if( total){
# collect coordinates and values
coords.out <- coordinates( pred_nomean.r)
vals.out <- values( pred_nomean.r)
# write out the data.table as fst
pred_pm.dt <- data.table( cbind( coords.out, vals.out))
# print( summary( pred_pm.dt[, 6:10]))
write_fst( pred_pm.dt, fname_out)
note <- paste( 'Unit conversions saved to', fname_out)
message( note)
return( note)
} else {
hyads.dt <- read.fst( fname, columns = c( 'x', 'y', 'uID', 'hyads'), as.data.table = T)
hyads.dt <- hyads.dt[!is.na( x) & !is.na( y)]
hyads.dt.c <- dcast( hyads.dt, x + y ~ uID, value.var = 'hyads')
hyads.use.p <- rasterFromXYZ( hyads.dt.c, crs = p4s)
hyads.use.p[is.na( hyads.use.p)] <- 0
hyads.proj <- project_and_stack( dat.s[[1]], hyads.use.p, mask.use = mask.use)
hyads.proj <- dropLayer( hyads.proj, 1)
hyads.n <- paste0( 'X', names( hyads.dt.c)[!(names( hyads.dt.c) %in% c( 'x', 'y'))])
names( hyads.proj) <- hyads.n
# take fraction of total hyads
hyads.frac.total <- hyads.proj / hyads.total.proj[[name.x]]
hyads.frac.total.pm <- hyads.frac.total * pred_nomean.r
names( hyads.frac.total.pm) <- hyads.n
coords.out <- coordinates( hyads.frac.total.pm)
vals.out <- values( hyads.frac.total.pm)
# write out the data.table as fst
pred_pm.dt <- data.table( cbind( coords.out, vals.out))
# print( summary( pred_pm.dt[, 6:10]))
write_fst( pred_pm.dt, fname_out)
note <- paste( 'Unit conversions saved to', fname_out)
message( note)
return( note)
}
}
|
/RCode/hyads_to_pm25_functions.R
|
no_license
|
lhenneman/HyADS_to_pm25
|
R
| false
| false
| 52,533
|
r
|
library( data.table)
library( raster)
# library( spBayes)
library( disperseR)
library( ggplot2)
library( viridis)
library( lubridate)
library( mgcv)
# library( xgboost)
library( pbmcapply)
`%ni%` <- Negate(`%in%`)
#======================================================================#
## ddm to grid raster
#======================================================================#
ddm_to_zip <- function( ddm_coal_file,
Year,
avg.period = 'year'){
p4s <- "+proj=lcc +lat_1=33 +lat_2=45 +lat_0=40 +lon_0=-97 +a=6370000 +b=6370000"
#read data,
ddm_coal <- fread(ddm_coal_file)
# melt, extract year, month, day
ddm_coal.m <- melt( ddm_coal, id.vars = c( 'X', 'Y'),
variable.name = 'date.in', value.name = 'coal_pm25')
ddm_coal.m[, `:=` ( date.in = as.Date( date.in, format = '%m/%d/%y'))]
ddm_coal.m[, `:=` ( year.in = year( date.in),
month.in = month( date.in))]
# remove blow up values (greater than 30)
ddm_coal.m[ coal_pm25 < 0 | coal_pm25 > 30, coal_pm25 := NA]
#rasterize as brick
names.ddm <- unique( ddm_coal.m$date.in)
ddm_coal.b <- brick( lapply( names.ddm,
function( name, dt.m){
r <- rasterFromXYZ(dt.m[ date.in == name,
.(x = X, y = Y, z = coal_pm25)],
crs = CRS(p4s))
names( r) <- name
return( r)
}, ddm_coal.m))
# fill NA's with linear interpolation across days
ddm_coal.b <- approxNA( ddm_coal.b, rule=2)
names( ddm_coal.b) <- names.ddm
# take monthly averages
if( avg.period == 'month'){
ddm_coal.month <- lapply( 1:12,
function( m, ddm_raster.b){
names.dates <- as.Date( gsub( '\\.', '-', gsub( 'X', '', names( ddm_raster.b))))
id <- which( month( names.dates) == m)
ddm_coal.mon <- mean( subset( ddm_raster.b, id))
return( ddm_coal.mon)
}, ddm_coal.b)
ddm_coal.z <- brick( ddm_coal.month)
names( ddm_coal.z) <- paste( Year, formatC( 1:12, width = 2, flag = '0'), sep = '.')
# take annual averages
} else if( avg.period == 'year'){
# take annual average
ddm_coal.z <- mean( ddm_coal.b)
names( ddm_coal.z) <- Year
}
return( ddm_coal.z)
}
#======================================================================#
## functions to get meteorology data
# download the necessary met files, 20th century reanalysis
#======================================================================#
downloader.fn <- function( filename,
destination = file.path('~', 'Dropbox', 'Harvard', 'RFMeval_Local',
'Comparisons_Intermodel', 'Global_meteorology'),
dataset = c( '20thC_ReanV2c', 'ncep.reanalysis.derived', 'NARR')){
if( length( dataset) > 1)
dataset <- dataset[1]
fileloc <- file.path( destination, dataset)
# create directory to store in
dir.create( fileloc,
recursive = T,
showWarnings = F)
# name variable, filenames
varname_NOAA <- gsub( "\\..*", "", filename)
file_NOAA <- file.path( fileloc, filename)
# define URL
if( dataset == '20thC_ReanV2c'){
# https://www.esrl.noaa.gov/psd/data/gridded/data.20thC_ReanV2c.monolevel.mm.html
url_NOAA <- paste0( "ftp://ftp.cdc.noaa.gov/Datasets/20thC_ReanV2c/Monthlies/gaussian/monolevel/", filename)
} else if( dataset == 'ncep.reanalysis.derived'){
url_NOAA <- paste0( "ftp://ftp.cdc.noaa.gov/Datasets/ncep.reanalysis.derived/surface/", filename)
}else if( dataset == 'NARR'){
url_NOAA <- paste0( "ftp://ftp.cdc.noaa.gov/Datasets/NARR/Monthlies/monolevel/", filename)
}
if( !file.exists( file_NOAA))
download.file( url = url_NOAA,
destfile = file_NOAA)
hpbl_rasterin <- brick( x = file_NOAA,
varname = varname_NOAA)
return( hpbl_rasterin)
}
#======================================================================#
## functions to get meteorology data
# extract the year of interest, average by year or return months
#======================================================================#
extract_year.fn <- function( raster.in = list.met[[1]],
year.in = 2005,
avg.period = 'year',
dataset = c( '20thC_ReanV2c', 'ncep.reanalysis.derived', 'NARR')){
# default to 20th cent reanalysis
if( length( dataset) > 1){
dataset <- dataset[1]
print( paste( 'No dataset specified, defaulting to', dataset))
}
# name months 1:12 for extracting from raster
names.months <- paste0( year.in, '-',
formatC( 1:12, width = 2, flag = '0'), '-',
'01')
# extract monthly dates using function from hyspdisp
raster.sub <- brick( subset_nc_date( hpbl_brick = raster.in,
vardate = names.months))
#NARR dataset requires rotating
if( dataset != 'NARR')
raster.sub <- rotate( raster.sub)
# take annual mean
if( avg.period == 'year'){
out <- stackApply( raster.sub, indices = rep( 1, 12), fun = mean)
} else
out <- raster.sub
return( out)
}
#======================================================================#
## functions to get meteorology data
# trim data over US, create raster object
#======================================================================#
usa.functioner <- function( year.in = 2005,
list.met,
dataset = c( '20thC_ReanV2c', 'ncep.reanalysis.derived', 'NARR'),
avg.period = 'year',
return.usa.mask = F,
return.usa.sub = T){
# extract year
if( avg.period == 'year'){
mets <- lapply( list.met,
extract_year.fn,
year.in = year.in,
avg.period = avg.period,
dataset = dataset)
} else
mets <- lapply( list.met,
extract_year.fn,
year.in = year.in,
avg.period = avg.period,
dataset = dataset)
crs.str <- projection( list.met[[1]])
crs.usa <- crs( crs.str)
# convert temp to celcius
mets$temp <- mets$temp - 273.15
# calculate windspeed
# calculate meteorology wind angle (0 is north wind)
# http://weatherclasses.com/uploads/3/6/2/3/36231461/computing_wind_direction_and_speed_from_u_and_v.pdf
if( 'uwnd' %in% names( list.met) & 'vwnd' %in% names( list.met)){
mets$wspd <- sqrt( mets$uwnd ^ 2 + mets$vwnd ^ 2)
mets$phi <- atan2( mets$uwnd, mets$vwnd) * 180 / pi + 180
names( mets$wspd) <- names(mets$vwnd)
names( mets$phi) <- names(mets$vwnd)
}
# download USA polygon from rnaturalearth
us_states.names <- state.abb[!(state.abb %in% c( 'HI', 'AK'))]
us_states <- st_transform( USAboundaries::us_states(), crs.str)
us_states.sp <- sf::as_Spatial(us_states)[ us_states$state_abbr %in% us_states.names,]
if( return.usa.mask){
return( us_states.sp)
}
if( return.usa.sub){
mets_crop <- rasterize( us_states.sp, mets[[1]], getCover=TRUE)
mets_crop[mets_crop==0] <- NA
mets_crop[!is.na( mets_crop)] <- 1
# crop to USA
if( avg.period == 'year'){
mets.out.l <- lapply( mets, function( x){
trim( mask(x, mets_crop, maskvalue = NA),
padding = 1)
})
mets.out <- brick( mets.out.l)
} else{
names.months <- names( mets[[1]])
mets.out.l <- lapply( mets,
function( x){
lapply( names.months,
function( y){
r <- subset( x, y)
trim( mask(r, mets_crop, maskvalue = NA),
padding = 1)
})})
mets.out.b <- lapply( names.months,
function( n){
lapply( mets.out.l, function( l){
b <- brick( l)
subset( b, n)
})
})
# each month is a brick
mets.out <- lapply( mets.out.b, brick)
names( mets.out) <- names.months
}
return( mets.out)
} else{
if( avg.period == 'year'){
mets.out <- brick( mets)
} else{
# reorganize - list of months
names.months <- names( mets[[1]])
mets.out <- lapply( names.months,
function( name.ext, X){
brick( lapply( X, '[[', name.ext))
}, mets)
names( mets.out) <- names.months
}
return( mets.out)
}
}
#======================================================================#
# define the spBayes model function
#======================================================================#
splM.hyads.ddm <- function( dummy.n = 1,
coords = as.matrix( hyads2005.dt[,.( x, y)]),
Y = ddm2005.dt$X2005,
X = data.table( intercept = 1, hyads = hyads2005.dt$X2005),
seed.n = NULL,
...){
set.seed( seed.n)
quants <- function(x){
quantile(x, prob=c(0.5, 0.025, 0.975))
}
# holdout fraction
ho.frac <- .1
# define number of samples
n.samples <- 5000
# define priors
starting <- list("tau.sq"=1, "sigma.sq"=1, "phi"=6)
tuning <- list("tau.sq"=0.01, "sigma.sq"=0.01, "phi"=0.1)
priors <- list("beta.Flat", "tau.sq.IG"=c(2, 1),
"sigma.sq.IG"=c(2, 1), "phi.Unif"=c(3, 30))
# define holdout parameters
ho <- sample( 1:length( Y), ceiling( ho.frac * length( Y)))
#convert X & Y to matrices
X.m <- as.matrix( X)
Y.m <- as.matrix( Y)
# define inputs
coords.ho <- coords[ho,]
coords.tr <- coords[-ho,]
Y.ho <- Y.m[ho]
Y.tr <- Y.m[-ho]
X.ho <- X.m[ho,]
X.tr <- X.m[-ho,]
# define burn in
burn.in <- floor(0.75*n.samples)
# train the model
m.i <- spLM( Y.tr ~ X.tr - 1, coords = coords.tr,
modified.pp = TRUE, ...,
starting = starting, tuning = tuning, priors = priors,
cov.model = "exponential",
n.samples = n.samples, n.report = 2500)
# recover estimates of beta and theta
rt.rec <- system.time( {
m.i.rec <- spRecover(m.i, start=burn.in, thin=5, n.report=100)
})
# return simulated y.hat's
rt.y.hat <- system.time( {
m.i.pred <- spPredict( m.i, start=burn.in, thin=2, pred.covars = X.ho,
pred.coords=coords.ho, verbose=FALSE)
})
# find estimates of beta, theta, w.hat, and y.hat
beta.hat <- round(summary(m.i.rec$p.beta.recover.samples)$quantiles[c(3,1,5)],6)
theta.hat <- round( summary( window( m.i$p.theta.samples,
start = burn.in))$quantiles[, c( 3, 1, 5)], 2)
w.hat <- apply(m.i.rec$p.w.recover.samples, 1, median)
y.hat <- data.table( t( apply(m.i.pred$p.y.predictive.samples, 1, quants)))
# set up evaluation data.table
Y.ho.tr.dt <- data.table( cbind( coords.ho, y.hat[,`50%`], Y.ho))
setnames( Y.ho.tr.dt, 'V3', 'Y.hat')
# rasterize output for plots
w.hat.raster <- rasterFromXYZ( cbind( coords.tr, w.hat))
y.hat.raster <- rasterFromXYZ( Y.ho.tr.dt[, .( x, y, Y.hat)])
Y.tr.raster <- rasterFromXYZ( cbind( coords.tr, Y.tr))
Y.ho.raster <- rasterFromXYZ( Y.ho.tr.dt[, .( x, y, Y.ho)])
# mcmc plots
# plot( m.i$p.theta.samples)
# plot( m.i.rec$p.beta.recover.samples)
# spatial areas of y.hat - Y.ho
par(mfrow=c(1,3))
plot( w.hat.raster, main = "w.hat (spatial adjustment term)")
points( m.i$knot.coords, cex=1)
plot( y.hat.raster - Y.ho.raster, main = "Y.hat - Y.ho")
plot( (y.hat.raster - Y.ho.raster) / Y.ho.raster, main = "(Y.hat - Y.ho) / Y.ho")
par(mfrow=c(1,1))
# check out simpler models - set up inputs
names.covars <- names( X)
XY.tr <- data.table( Y = Y.tr, X.tr)
XY.ho <- data.table( Y = Y.ho, X.ho)
setnames( XY.tr, names(XY.tr)[names(XY.tr) %ni% 'Y'], names.covars)
setnames( XY.ho, names(XY.tr)[names(XY.tr) %ni% 'Y'], names.covars)
# check out simpler models - define them
form <- as.formula( paste( 'Y ~ -1 +', paste( names.covars, collapse = '+')))
m.lm <- lm( form, data = XY.tr)
m.mean <- mean( Y.tr / XY.tr$hyads)
# check out simpler models - get predicted Y.ho
y.hat.lm <- predict( m.lm, newdata = XY.ho)
y.hat.mean <- XY.ho$hyads * m.mean
# calculate evaluation metrics
eval.fn <- function( Yhat, Yact, mod.name){
num.diff <- sum( Yhat - Yact)
abs.diff <- sum( abs( Yhat - Yact))
denom <- sum( Yact)
metrics <- data.table( mod.name = mod.name,
NMB = num.diff / denom,
NME = abs.diff / denom,
MB = num.diff / length( ho),
RMSE = sqrt( sum( ( Yhat - Yact) ^ 2) / length( Y.ho)),
R = cor( Yhat, Yact))
return( metrics)
}
metrics.out <- rbind( eval.fn( Y.ho.tr.dt$Y.hat, Y.ho, 'spLM'),
eval.fn( y.hat.lm, Y.ho, 'lm'),
eval.fn( y.hat.mean, Y.ho, 'mean'))
out <- list( metrics = metrics.out,
runtimes = data.table( ncells.tr = length( Y.tr),
train = round(m.i$run.time[3]/60,3),
recover = round(rt.rec[3]/60,3),
est.y.hat = round(rt.y.hat[3]/60,3)))
return( out)
}
#======================================================================#
# define the linear model holdout function
#======================================================================#
lm.hyads.ddm.holdout <- function( seed.n = NULL,
dat.stack = dats2005.s.small,
dat.stack.pred = NULL,
y.name = 'cmaq.ddm',
x.name = 'hyads',
name.idwe = 'tot.sum',
covars.names = NULL, #c( 'temp', 'apcp'),
ho.frac = .1,
return.mods = F,
...){
# define eval function
eval.fn <- function( Yhat, Yact, mod.name){
num.diff <- sum( Yhat - Yact, na.rm = T)
abs.diff <- sum( abs( Yhat - Yact), na.rm = T)
denom <- sum( Yact, na.rm = T)
metrics <- data.table( mod.name = mod.name,
NMB = num.diff / denom,
NME = abs.diff / denom,
MB = num.diff / length( Yhat),
RMSE = sqrt( sum( ( Yhat - Yact) ^ 2, na.rm = T) / length( Yhat)),
R = cor( Yhat, Yact, use = 'complete.obs'),
R.s = cor( Yhat, Yact, use = 'complete.obs', method = 'spearman'))
return( metrics)
}
set.seed( seed.n)
# if no covar names provided, use all covariates that aren't x or y
if( is.null( covars.names))
covars.names <- names( dat.stack)[names( dat.stack) %ni% c( x.name, y.name)]
# define holdout parameters
N <- ncell( dat.stack)
ho <- sample( 1:N, ceiling( ho.frac * N))
# extract coordinates
dat.coords <- coordinates( dat.stack)
dat.coords.ho <- data.table( dat.coords[ho,])
dat.coords.tr <- data.table( dat.coords[-ho,])
# define inputs
dat.stack.ho <- data.table( values( dat.stack)[ ho,])
dat.stack.tr <- data.table( values( dat.stack)[-ho,])
# special case for ho is zero
if( length( ho) == 0){
# extract coordinates
dat.coords.ho <- data.table( dat.coords)
dat.coords.tr <- data.table( dat.coords)
# define inputs
dat.stack.ho <- data.table( dat.coords.ho, values( dat.stack))
dat.stack.tr <- data.table( dat.coords.tr, values( dat.stack))
}
if( !is.null( dat.stack.pred)){
# extract coordinates
dat.coords.ho <- data.table( coordinates( dat.stack.pred))
# define inputs
dat.stack.ho <- data.table( dat.coords.ho, values( dat.stack.pred))
}
# create log variables
dat.stack.tr[, y.name.log := log( get( y.name))]
dat.stack.ho[, y.name.log := log( get( y.name))]
# check out linear regression models - define them
form.ncv <- as.formula( paste( 'y.name.log', '~', x.name))
form.cv_single_poly <-
as.formula( paste( 'y.name.log', '~ poly(', x.name, ', 2) +', x.name, ': (',
paste( c( covars.names),
collapse = '+'), ')')) #, '+ s( x, y, k = 20)'
form.cv_single <-
as.formula( paste( 'y.name.log', '~ ', x.name, '+', x.name, ': (',
paste( c( covars.names),
collapse = '+'), ')'))
form.cv_five <-
as.formula( paste( 'y.name.log', '~ ', x.name, '+', x.name, ': (',
paste( c( covars.names),
collapse = '+'), ')^5'))
# train the models
lm.ncv <- lm( form.ncv, data = dat.stack.tr)
lm.cv_single <- lm( form.cv_single, data = dat.stack.tr)
lm.cv_single_poly <- lm( form.cv_single_poly, data = na.omit( dat.stack.tr))
lm.cv_five <- lm( form.cv_five, data = dat.stack.tr)
# check out simpler models - get predicted Y.ho
y.ho <- unlist( dat.stack.ho[,..y.name])
y.hat.lm.ncv <- predict( lm.ncv, type = 'response', newdata = dat.stack.ho, se.fit = T)
y.hat.lm.cv_single <- predict( lm.cv_single, type = 'response', newdata = dat.stack.ho, se.fit = T)
y.hat.lm.cv_single_poly <- predict( lm.cv_single_poly, type = 'response', newdata = dat.stack.ho, se.fit = T)
y.hat.lm.cv_five <- predict( lm.cv_five, type = 'response', newdata = dat.stack.ho, se.fit = T)
# set up evaluation data.table
Y.ho.hat <- data.table( dat.coords.ho, y.ho,
y.hat.lm.ncv = y.hat.lm.ncv$fit,
y.hat.lm.cv_single = y.hat.lm.cv_single$fit,
y.hat.lm.cv_single_poly = y.hat.lm.cv_single_poly$fit,
y.hat.lm.cv_five = y.hat.lm.cv_five$fit)
Y.ho.hat.bias <- data.table( dat.coords.ho, y.ho,
y.hat.lm.ncv = y.hat.lm.ncv$fit - y.ho,
y.hat.lm.cv_single = y.hat.lm.cv_single$fit - y.ho,
y.hat.lm.cv_single_poly = y.hat.lm.cv_single_poly$fit - y.ho,
y.hat.lm.cv_five = y.hat.lm.cv_five$fit - y.ho)
Y.ho.hat.se <- data.table( dat.coords.ho,
y.hat.lm.ncv = y.hat.lm.ncv$se.fit,
y.hat.lm.cv_single = y.hat.lm.cv_single$se.fit,
y.hat.lm.cv_single_poly = y.hat.lm.cv_single_poly$se.fit,
y.hat.lm.cv_five = y.hat.lm.cv_five$se.fit)
# rasterize output for plots
crs.in <- crs( dat.stack)
Y.ho.hat.raster <- projectRaster( rasterFromXYZ( Y.ho.hat, crs = crs.in), dat.stack)
Y.ho.hat.se.raster <- projectRaster( rasterFromXYZ( Y.ho.hat.se, crs = crs.in), dat.stack)
Y.ho.hat.bias.raster <- projectRaster( rasterFromXYZ( Y.ho.hat.bias, crs = crs.in), dat.stack)
# calculate evaluation metrics
metrics.out <- rbind( eval.fn( exp( Y.ho.hat$y.hat.lm.ncv), y.ho, 'lm.ncv'),
eval.fn( exp( Y.ho.hat$y.hat.lm.cv_single), y.ho, 'lm.cv_single'),
eval.fn( exp( Y.ho.hat$y.hat.lm.cv_single_poly), y.ho, 'lm.cv_single_poly'),
eval.fn( exp( Y.ho.hat$y.hat.lm.cv_five), y.ho, 'lm.cv_five'))
# listify the models
if( return.mods)
out <- list( metrics = metrics.out,
model.lm.ncv = lm.ncv,
model.lm.cv_single = lm.cv_single,
model.lm.cv_single_poly = lm.cv_single_poly,
model.lm.cv_five = lm.cv_five,
Y.ho.hat.raster = Y.ho.hat.raster,
Y.ho.hat.se.raster = Y.ho.hat.se.raster,
Y.ho.hat.bias.raster = Y.ho.hat.bias.raster)
if( !return.mods)
out <- list( metrics = metrics.out,
Y.ho.hat.raster = Y.ho.hat.raster,
Y.ho.hat.se.raster = Y.ho.hat.se.raster,
Y.ho.hat.bias.raster = Y.ho.hat.bias.raster)
return( out)
}
#======================================================================#
# extract a mean from a list of lists of rasters
#======================================================================#
mean.lol <- function( lol, layer1, layer2, plot.out = TRUE){
first.lol <- lapply( lol, '[[', layer1)
second.lol <- lapply( first.lol, '[[', layer2)
mean.lol <- mean( stack( second.lol), na.rm = T)
if( plot.out)
plot( mean.lol, main = layer2)
return( mean.lol)
}
#======================================================================#
# ggplot a raster
#======================================================================#
ggplot.a.raster <- function( ..., bounds = NULL, facet.names = NULL, mask.raster = NULL,
nrow. = NULL, ncol. = NULL, legend.name = NULL, theme.obj = theme()){
in.x <- list( ...)
if( length( in.x) == 1)
in.x <- in.x[[1]]
if( is.null( facet.names))
facet.names <- lapply( in.x, names)
if( is.null( names( in.x)) & !is.null( facet.names))
names( in.x) <- facet.names
in.x.crop <- in.x
if( !is.null( mask.raster) & is.list( in.x))
in.x.crop <- lapply( in.x, function( X, mask.raster.){
X.mask <- mask( X, mask.raster.)
X.crop <- crop( X.mask, mask.raster.)
return( X.crop)
}, mask.raster)
if( !is.null( mask.raster) & !is.list( in.x)){
in.x.mask <- mask( in.x, mask.raster)
in.x.crop <- crop( in.x.mask, mask.raster)
}
dat.dt <- rbindlist( lapply( facet.names, function( x.name, x.list) {
x <- x.list[[x.name]]
r_points <- rasterToPoints( x)
r_dt <- data.table( r_points)[, name.in := x.name]
setnames( r_dt, names( x), 'z')
return( r_dt)
}, in.x.crop))
if( !is.null( facet.names))
dat.dt[, name.in := factor( name.in, levels = facet.names)]
ggplot( dat.dt) +
geom_tile( aes( x = x, y = y, fill = z)) +
scale_fill_viridis( name = legend.name, limits = bounds, oob = scales::squish) +
facet_wrap( . ~ name.in, nrow = nrow., ncol = ncol.) +
# expand_limits( fill = 0) +
theme_bw() +
theme( axis.text = element_blank(),
axis.title = element_blank(),
axis.ticks = element_blank(),
legend.position = 'bottom',
legend.key.width = unit( ncol( in.x[[1]])/3000, 'npc'),
panel.grid = element_blank(),
strip.background = element_blank()) +
theme.obj
}
#======================================================================#
# do the predictions for many months
#======================================================================#
month.trainer <- function( name.m = names( mets2005.m)[1],
name.p = names( mets2006.m)[1],
name.x,
y.m,
ddm.m = ddm.m.all,
mets.m = mets.m.all,
emiss.m = d_nonegu.r,
idwe.m = idwe.m,
.mask.use = NULL,
cov.names = c( "temp", "rhum", "vwnd", "uwnd", "wspd")){ #, names( d_nonegu.r))){
# create training dataset
ddm.use <- ddm.m[[name.m]]
hyads.use <- y.m[[name.m]]
mets.use <- mets.m[[name.m]]
emiss.use <- emiss.m
idwe.use <- idwe.m[[name.m]]
# create prediction dataset
ddm.use.p <- ddm.m[[name.p]]
hyads.use.p <- y.m[[name.p]]
mets.use.p <- mets.m[[name.p]]
idwe.use.p <- idwe.m[[name.p]]
# fix names
names( ddm.use) <- 'cmaq.ddm'
names( ddm.use.p) <- 'cmaq.ddm'
names( hyads.use) <- name.x
names( hyads.use.p) <- name.x
names( idwe.use) <- 'tot.sum.idwe'
names( idwe.use.p) <- 'tot.sum.idwe'
# combine each dataset as stacks
dat.s <- project_and_stack( hyads.use, ddm.use, idwe.use,
mets.use, emiss.use, mask.use = .mask.use)
dat.p <- project_and_stack( hyads.use.p, ddm.use.p, idwe.use.p,
mets.use.p, emiss.use, mask.use = .mask.use)
# do the modeling
pred <- lm.hyads.ddm.holdout( dat.stack = dat.s, dat.stack.pred = dat.p, x.name = name.x,
ho.frac = 0, covars.names = cov.names,
name.idwe = 'tot.sum.idwe', return.mods = T)
return( pred)
}
#======================================================================#
# project and stack in one command
# ## need to update masking in all functions - cells with centroid not covered are cropped
## https://gis.stackexchange.com/questions/255025/r-raster-masking-a-raster-by-polygon-also-remove-cells-partially-covered
## should probably update usa mask too - need to use USAboundaries for consistency
#======================================================================#
project_and_stack <- function( ..., mask.use = NULL){
list.r <- list( ...)
if( length( list.r) > 1)
for( r in 2: length( list.r)){
list.r[[r]] <- projectRaster( list.r[[r]], list.r[[1]], alignOnly = F)
}
# mask over usa
if( !is.null( mask.use)){
mask.use <- spTransform( mask.use, crs( list.r[[1]]))
mask_crop <- rasterize( mask.use, list.r[[1]][[1]], getCover=TRUE)
mask_crop[mask_crop==0] <- NA
mask_crop[!is.na( mask_crop)] <- 1
list.out <- lapply( list.r, function( x){
x1 <- reclassify( x, c( NA, NA, 0))
# x[is.na(x)] <- 0
trim( mask(x1, mask_crop, maskvalue = NA),
padding = 1)
})
} else
list.out <- list.r
return( stack( list.out))
}
#======================================================================#
## function for converting individual unit concentrations to ugm3
# inputs - filename of grid w/ unit or summed impacts
# - model
# - covariate raster
# - 'x' name in the raster from model
#
# output - data table of state, pop-wgted impact for all input units
#======================================================================#
state_exposurer <- function(
month.n,
fstart,
year.m = 2006,
model.dataset = preds.mon.idwe06w05,
model.name = 'model.cv', #'model.gam'
name.x = 'idwe',
mask.use = mask.usa,
ddm.m = ddm.m.all,
mets.m = mets.m.all,
emiss.m = d_nonegu.r,
idwe.m. = idwe.m,
hyads.m = hyads.m.all,
grid_pop.r = grid_popwgt.r,
state_pops = copy( us_states.pop.dt),
p4s,
take.diff = F,
xboost = F
){
message( paste( 'Converting', month.name[month.n]))
month.N <- formatC( month.n, width = 2, flag = '0')
name.m <- paste0( 'X', year.m, '.', month.N, '.01')
model.m <- paste0( 'X2005.', month.N, '.01')
popyr.name <- paste0( 'X', year.m)
name.Date <- as.Date( name.m, format = 'X%Y.%m.%d')
if( name.x == 'idwe'){
fname <- paste0( fstart, year.m, '_', month.n, '.csv')
name.dat <- 'idwe'
} else{
fname <- paste0( fstart, year.m, '_', month.N, '.csv')
name.dat <- 'hyads'
}
# create prediction dataset
# ddm.use.p <- ddm.m[[name.m]]
mets.use.p <- mets.m[[name.m]]
idwe.use.p <- idwe.m.[[name.m]]
hyads.use.p <- hyads.m[[name.m]]
# fix names
# names( ddm.use.p) <- 'cmaq.ddm'
names( hyads.use.p) <- 'hyads'
names( idwe.use.p) <- 'idwe'
# rename population raster
grid_pop.r <- grid_pop.r[[popyr.name]]
names( grid_pop.r) <- 'pop'
dat.s <- project_and_stack( #ddm.use.p,
hyads.use.p, idwe.use.p,
mets.use.p, emiss.m, grid_pop.r, mask.use = mask.use)
# assign state names to raster
mask.r <- rasterize( mask.use[,'state_abbr'], dat.s[[1]])
mask.a <- levels( mask.r)[[1]]
dat.s$ID <- mask.r
# read in, remove
x.in1 <- fread( fname, drop = c( 'V1'))
x.in1[is.na( x.in1)] <- 0
suppressWarnings( x.in1[, `:=` ( yearmon = NULL, yearmonth = NULL)])
x.in2 <- x.in1[, colSums(x.in1) != 0, with = F]
suppressWarnings( x.in2[, `:=` ( x = NULL, y = NULL)])
x.in <- cbind( x.in1[, .( x, y)], x.in2)
# rasterize new x file
x.r <- rasterFromXYZ( x.in, crs = p4s)
x.n <- paste0( 'X', names( x.in)[!(names( x.in) %in% c( 'x', 'y'))])
x.n <- gsub( '^XX', 'X', x.n)
names( x.r) <- x.n
x.proj <- project_and_stack( dat.s[[1]], x.r, mask.use = mask.use)
names( x.proj)[2:dim( x.proj)[3]] <- x.n
# pick out the appropriate model
model.use <- model.dataset[ model.name, model.m][[1]]
# predict the base scenario (zero hyads/idwe)
dat.use0<- copy( dat.s)
dat.use0[[name.dat]] <- 0
dat.coords <- coordinates( dat.s)
dat_raw0.dt <- data.table( cbind( dat.coords, values( dat.use0)))
# xboost requires special treatment
if( xboost){
dat_raw0.dt.trim <- dat_raw0.dt[, model.use$feature_names, with = F]
xhold1c <- xgb.DMatrix( as.matrix( dat_raw0.dt.trim))
dat.pred0 <- predict( model.use, newdata = xhold1c)
} else
dat.pred0 <- predict( model.use, newdata = dat_raw0.dt)
dats0.r <- rasterFromXYZ( data.table( dat.coords, dat.pred0), crs = p4s)
# do the predictions
pred_pm.r <- brick( pbmcapply::pbmclapply( x.n, function( n){
gc()
# assign unit to prediction dataset
dat.use <- copy( dat.s)
dat.use[[name.dat]] <- x.proj[[n]]
# if zero impacts, return raster with only zeros
if( sum( values(x.proj[[n]]), na.rm = T) == 0)
return( x.proj[[n]])
# set up the dataset
dat_raw.dt <- data.table( cbind( dat.coords, values( dat.use)))
# do the predictions
if( xboost){
dat_raw.dt.trim <- dat_raw.dt[, model.use$feature_names, with = F]
xhold.pred <- xgb.DMatrix( as.matrix( dat_raw.dt.trim))
dat.pred <- predict( model.use, newdata = xhold.pred)
} else
dat.pred <- predict( model.use, newdata = dat_raw.dt)
# rasterize
dats.r <- rasterFromXYZ( data.table( dat.coords, dat.pred), crs = p4s)
#take difference from base
if( take.diff){
dats.r2 <- dats.r - dats0.r
} else
dats.r2 <- dats.r
names( dats.r2) <- n
return( dats.r2)
}))
# multiply by population
pred_popwgt.r <- pred_pm.r * dat.s[['pop']]
names( pred_popwgt.r) <- names( pred_pm.r)
#calculate raw average for the entire domain
pred_pm.us <- colMeans( data.table( values( pred_pm.r)), na.rm = T)
pred_pm.us.dt <- data.table( uID = names( pred_pm.us),
mean_pm = pred_pm.us,
state_abbr = 'US',
ID = 100)
#calculate pop-weightedverage for the entire domain
pred_pm.pw.us <- colSums( na.omit( data.table( values( pred_popwgt.r)), na.rm = T))
pred_pm.pw.us.dt <- data.table( uID = names( pred_pm.pw.us),
mean_popwgt = pred_pm.pw.us,
state_abbr = 'US',
ID = 100)
# calculate raw average by state
pred_pm.r$ID <- mask.r
pred_pm.dt <- data.table( values( pred_pm.r))
pred_pm.dt.m <- na.omit( melt( pred_pm.dt, id.vars = 'ID', variable.name = 'uID'))
pred_pm.dt.s <- pred_pm.dt.m[, .( mean_pm = mean( value)), by = .( ID, uID)]
pred_pm.dt.s <- merge( pred_pm.dt.s, mask.a, by = 'ID')
# calculate population-weighted by state
pred_popwgt.r$ID <- mask.r
pred_popwgt.dt <- data.table( values( pred_popwgt.r))
pred_popwgt.dt.m <- na.omit( melt( pred_popwgt.dt, id.vars = 'ID', variable.name = 'uID'))
pred_popwgt.dt.s <- pred_popwgt.dt.m[, .( mean_popwgt = sum( value)), by = .( ID, uID)]
pred_popwgt.dt.s <- merge( pred_popwgt.dt.s, mask.a, by = 'ID')
# bind with united states pops
pred_pm <- rbind( pred_pm.dt.s, pred_pm.us.dt)
pred_pm.pw <- rbind( pred_popwgt.dt.s, pred_pm.pw.us.dt)
# now just divide by each state's total population
setnames( state_pops, popyr.name, 'pop_amnt')
state_pops_lite <- state_pops[, .( state_abbr, pop_amnt)]
pop.tot <- data.table( state_abbr = 'US', pop_amnt = sum( state_pops$pop_amnt))
state_pops_lite <- rbind( state_pops_lite, pop.tot)
pred_popwgt.out <- merge( pred_pm.pw, state_pops_lite, by = 'state_abbr')
# divide by total population
pred_popwgt.out[, `:=` ( popwgt = mean_popwgt / pop_amnt,
month = name.Date)]
# merge pop-weighted and raw dataset
out <- merge( pred_popwgt.out, pred_pm, by = c( 'state_abbr', 'ID', 'uID'))
return( list( popwgt_states = out, pred_pm.r = pred_pm.r, zero_out.r = dats0.r))
}
#======================================================================#
## same as above, but for a year
#======================================================================#
state_exposurer.year <- function(
fname,
year.m = 2006,
model.use = preds.ann.hyads06w05$model.gam,
name.x = 'idwe',
mask.use = mask.usa,
dat.a = dats2006.a,
grid_pop.r = grid_popwgt.r,
state_pops = copy( us_states.pop.dt),
p4s,
take.diff = F,
xboost = F,
raw = F
){
message( paste( 'Converting', year.m))
# rename population raster
popyr.name <- paste0( 'X', year.m)
grid_pop.r <- grid_pop.r[[popyr.name]]
names( grid_pop.r) <- 'pop'
# assign state names to raster
mask.r <- rasterize( mask.use[,'state_abbr'], dat.a[[1]])
mask.a <- levels( mask.r)[[1]]
dat.a$ID <- mask.r
dat.a <- project_and_stack( dat.a, grid_pop.r)
# read in, rasterize new x file
if( name.x == 'hyads'){
x.in <- fread( fname, drop = c( 'V1', 'year.E', 'year.H'))
x.cast <- dcast( x.in, x + y ~ uID, value.var = 'hyads')
name.dat <- 'hyads'
}
if( name.x == 'idwe'){
x.cast <- fread( fname, drop = c( 'V1'))
name.dat <- 'idwe'
}
# cast and rasterize
x.r <- rasterFromXYZ( x.cast, crs = p4s)
x.n <- paste0( 'X', names( x.cast)[!(names( x.cast) %in% c( 'x', 'y'))])
x.n <- gsub( '^XX', 'X', x.n)
names( x.r) <- x.n
x.proj <- project_and_stack( dat.a[[1]], x.r, mask.use = mask.use)
# predict the base scenario (zero hyads/idwe)
dat.use0<- copy( dat.a)
dat.use0[[name.dat]] <- 0
dat.coords <- coordinates( dat.a)
dat_raw0.dt <- data.table( cbind( dat.coords, values( dat.use0)))
# do the prediction if not taking raw values
if( !raw){
# xboost requires special treatment
if( xboost){
dat_raw0.dt.trim <- dat_raw0.dt[, model.use$feature_names, with = F]
xhold1c <- xgb.DMatrix( as.matrix( dat_raw0.dt.trim))
dat.pred0 <- predict( model.use, newdata = xhold1c)
} else
dat.pred0 <- predict( model.use, newdata = dat_raw0.dt)
dats0.r <- rasterFromXYZ( data.table( dat.coords, dat.pred0), crs = p4s)
} else{
dats0.r <- rasterFromXYZ( dat_raw0.dt[, c( 'x', 'y', name.dat), with = F], crs = p4s)
}
# do the predictions
pred_pm.r <- brick( pbmcapply::pbmclapply( x.n, function( n){
gc()
# if zero impacts, return raster with only zeros
if( sum( values(x.proj[[n]]), na.rm = T) == 0 | raw)
return( x.proj[[n]])
# assign unit to prediction dataset
dat.use <- copy( dat.a)
dat.use[[name.dat]] <- x.proj[[n]]
# set up the dataset
dat_raw.dt <- data.table( cbind( dat.coords, values( dat.use)))
# do the predictions
if( xboost){
dat_raw.dt.trim <- dat_raw.dt[, model.use$feature_names, with = F]
xhold.pred <- xgb.DMatrix( as.matrix( dat_raw.dt.trim))
dat.pred <- predict( model.use, newdata = xhold.pred)
} else
dat.pred <- predict( model.use, newdata = dat_raw.dt)
# rasterize
dats.r <- rasterFromXYZ( data.table( dat.coords, dat.pred), crs = p4s)
#take difference from base
if( take.diff){
dats.r2 <- dats.r - dats0.r
} else
dats.r2 <- dats.r
names( dats.r2) <- n
return( dats.r2)
}))
# multiply by population
pred_popwgt.r <- pred_pm.r * dat.a[['pop']]
names( pred_popwgt.r) <- names( pred_pm.r)
#calculate raw average for the entire domain
pred_pm.us <- colMeans( data.table( values( pred_pm.r)), na.rm = T)
pred_pm.us.dt <- data.table( uID = names( pred_pm.us),
mean_pm = pred_pm.us,
state_abbr = 'US',
ID = 100)
#calculate pop-weightedverage for the entire domain
pred_pm.pw.us <- colSums( na.omit( data.table( values( pred_popwgt.r)), na.rm = T))
pred_pm.pw.us.dt <- data.table( uID = names( pred_pm.pw.us),
mean_popwgt = pred_pm.pw.us,
state_abbr = 'US',
ID = 100)
# calculate raw average by state
pred_pm.r$ID <- mask.r
pred_pm.dt <- data.table( values( pred_pm.r))
pred_pm.dt.m <- na.omit( melt( pred_pm.dt, id.vars = 'ID', variable.name = 'uID'))
pred_pm.dt.s <- pred_pm.dt.m[, .( mean_pm = mean( value)), by = .( ID, uID)]
pred_pm.dt.s <- merge( pred_pm.dt.s, mask.a, by = 'ID')
# calculate population-weighted by state
pred_popwgt.r$ID <- mask.r
pred_popwgt.dt <- data.table( values( pred_popwgt.r))
pred_popwgt.dt.m <- na.omit( melt( pred_popwgt.dt, id.vars = 'ID', variable.name = 'uID'))
pred_popwgt.dt.s <- pred_popwgt.dt.m[, .( mean_popwgt = sum( value)), by = .( ID, uID)]
pred_popwgt.dt.s <- merge( pred_popwgt.dt.s, mask.a, by = 'ID')
# bind with united states pops
pred_pm <- rbind( pred_pm.dt.s, pred_pm.us.dt)
pred_pm.pw <- rbind( pred_popwgt.dt.s, pred_pm.pw.us.dt)
# now just divide by each state's total population
setnames( state_pops, popyr.name, 'pop_amnt')
state_pops_lite <- state_pops[, .( state_abbr, pop_amnt)]
pop.tot <- data.table( state_abbr = 'US', pop_amnt = sum( state_pops$pop_amnt))
state_pops_lite <- rbind( state_pops_lite, pop.tot)
pred_popwgt.out <- merge( pred_pm.pw, state_pops_lite, by = 'state_abbr')
# divide by total population
pred_popwgt.out[, `:=` (popwgt = mean_popwgt / pop_amnt,
year = year.m)]
# merge pop-weighted and raw dataset
out <- merge( pred_popwgt.out, pred_pm, by = c( 'state_abbr', 'ID', 'uID'))
return( list( popwgt_states = out, pred_pm.r = pred_pm.r, zero_out.r = dats0.r))
}
#======================================================================#
## calculate evaluation metrics
#======================================================================#
evals.fn <- function( Yhat, Yact){
num.diff <- sum( Yhat - Yact)
abs.diff <- sum( abs( Yhat - Yact))
denom <- sum( Yact)
metrics <- data.table( NMB = num.diff / denom,
NME = abs.diff / denom,
MB = num.diff / length( Yhat),
ME = abs.diff / length( Yhat),
RMSE = sqrt( sum( ( Yhat - Yact) ^ 2) / length( Yhat)),
R.p = cor( Yhat, Yact, method = 'pearson'),
R.s = cor( Yhat, Yact, method = 'spearman'))
return( metrics)
}
#======================================================================#
## function for converting individual unit concentrations to ugm3
# inputs - filename of grid w/ unit or summed impacts
# - model
# - covariate raster
# - 'x' name in the raster from model
#
# output - data table of state, pop-wgted impact for all input units
#======================================================================#
hyads_to_pm25 <- function(
month.n = NULL,
fstart,
year.m = 2006,
model.dataset = preds.mon.idwe06w05,
model.name = 'model.cv', #'model.gam'
name.x = 'idwe',
mask.use = mask.usa,
ddm.m = ddm.m.all,
mets.m = mets.m.all,
emiss.m = d_nonegu.r,
idwe.m. = idwe.m,
hyads.m = hyads.m.all,
take.diff = T,
p4s
){
message( paste( 'Converting', month.name[month.n]))
month.N <- formatC( month.n, width = 2, flag = '0')
name.m <- paste0( 'X', year.m, '.', month.N, '.01')
model.m <- paste0( 'X2005.', month.N, '.01')
popyr.name <- paste0( 'X', year.m)
name.Date <- as.Date( name.m, format = 'X%Y.%m.%d')
if( name.x == 'idwe'){
fname <- paste0( fstart, year.m, '_', month.n, '.csv')
name.dat <- 'idwe'
} else{
fname <- paste0( fstart, year.m, '_', month.N, '.csv')
name.dat <- 'hyads'
}
# create prediction dataset
# ddm.use.p <- ddm.m[[name.m]]
mets.use.p <- mets.m[[name.m]]
idwe.use.p <- idwe.m.[[name.m]]
hyads.use.p <- hyads.m[[name.m]]
# fix names
# names( ddm.use.p) <- 'cmaq.ddm'
names( hyads.use.p) <- 'hyads'
names( idwe.use.p) <- 'idwe'
# rename population raster
grid_pop.r <- grid_pop.r[[popyr.name]]
names( grid_pop.r) <- 'pop'
dat.s <- project_and_stack( #ddm.use.p,
hyads.use.p, idwe.use.p,
mets.use.p, emiss.m, grid_pop.r, mask.use = mask.use)
# assign state names to raster
mask.r <- rasterize( mask.use[,'state_abbr'], dat.s[[1]])
mask.a <- levels( mask.r)[[1]]
dat.s$ID <- mask.r
# read in, remove
x.in1 <- fread( fname, drop = c( 'V1'))
x.in1[is.na( x.in1)] <- 0
suppressWarnings( x.in1[, `:=` ( yearmon = NULL, yearmonth = NULL)])
x.in2 <- x.in1[, colSums(x.in1) != 0, with = F]
suppressWarnings( x.in2[, `:=` ( x = NULL, y = NULL)])
x.in <- cbind( x.in1[, .( x, y)], x.in2)
# rasterize new x file
x.r <- rasterFromXYZ( x.in, crs = p4s)
x.n <- paste0( 'X', names( x.in)[!(names( x.in) %in% c( 'x', 'y'))])
x.n <- gsub( '^XX', 'X', x.n)
names( x.r) <- x.n
x.proj <- project_and_stack( dat.s[[1]], x.r, mask.use = mask.use)
names( x.proj)[2:dim( x.proj)[3]] <- x.n
# pick out the appropriate model
model.use <- model.dataset[ model.name, model.m][[1]]
# predict the base scenario (zero hyads/idwe)
dat.use0<- copy( dat.s)
dat.use0[[name.dat]] <- 0
dat.coords <- coordinates( dat.s)
dat_raw0.dt <- data.table( cbind( dat.coords, values( dat.use0)))
# xboost requires special treatment
if( xboost){
dat_raw0.dt.trim <- dat_raw0.dt[, model.use$feature_names, with = F]
xhold1c <- xgb.DMatrix( as.matrix( dat_raw0.dt.trim))
dat.pred0 <- predict( model.use, newdata = xhold1c)
} else
dat.pred0 <- predict( model.use, newdata = dat_raw0.dt)
dats0.r <- rasterFromXYZ( data.table( dat.coords, dat.pred0), crs = p4s)
# do the predictions
pred_pm.r <- brick( pbmcapply::pbmclapply( x.n, function( n){
gc()
# assign unit to prediction dataset
dat.use <- copy( dat.s)
dat.use[[name.dat]] <- x.proj[[n]]
# if zero impacts, return raster with only zeros
if( sum( values(x.proj[[n]]), na.rm = T) == 0)
return( x.proj[[n]])
# set up the dataset
dat_raw.dt <- data.table( cbind( dat.coords, values( dat.use)))
# do the predictions
if( xboost){
dat_raw.dt.trim <- dat_raw.dt[, model.use$feature_names, with = F]
xhold.pred <- xgb.DMatrix( as.matrix( dat_raw.dt.trim))
dat.pred <- predict( model.use, newdata = xhold.pred)
} else
dat.pred <- predict( model.use, newdata = dat_raw.dt)
# rasterize
dats.r <- rasterFromXYZ( data.table( dat.coords, dat.pred), crs = p4s)
#take difference from base
if( take.diff){
dats.r2 <- dats.r - dats0.r
} else
dats.r2 <- dats.r
names( dats.r2) <- n
return( dats.r2)
}))
# multiply by population
pred_popwgt.r <- pred_pm.r * dat.s[['pop']]
names( pred_popwgt.r) <- names( pred_pm.r)
#calculate raw average for the entire domain
pred_pm.us <- colMeans( data.table( values( pred_pm.r)), na.rm = T)
pred_pm.us.dt <- data.table( uID = names( pred_pm.us),
mean_pm = pred_pm.us,
state_abbr = 'US',
ID = 100)
#calculate pop-weightedverage for the entire domain
pred_pm.pw.us <- colSums( na.omit( data.table( values( pred_popwgt.r)), na.rm = T))
pred_pm.pw.us.dt <- data.table( uID = names( pred_pm.pw.us),
mean_popwgt = pred_pm.pw.us,
state_abbr = 'US',
ID = 100)
# calculate raw average by state
pred_pm.r$ID <- mask.r
pred_pm.dt <- data.table( values( pred_pm.r))
pred_pm.dt.m <- na.omit( melt( pred_pm.dt, id.vars = 'ID', variable.name = 'uID'))
pred_pm.dt.s <- pred_pm.dt.m[, .( mean_pm = mean( value)), by = .( ID, uID)]
pred_pm.dt.s <- merge( pred_pm.dt.s, mask.a, by = 'ID')
# calculate population-weighted by state
pred_popwgt.r$ID <- mask.r
pred_popwgt.dt <- data.table( values( pred_popwgt.r))
pred_popwgt.dt.m <- na.omit( melt( pred_popwgt.dt, id.vars = 'ID', variable.name = 'uID'))
pred_popwgt.dt.s <- pred_popwgt.dt.m[, .( mean_popwgt = sum( value)), by = .( ID, uID)]
pred_popwgt.dt.s <- merge( pred_popwgt.dt.s, mask.a, by = 'ID')
# bind with united states pops
pred_pm <- rbind( pred_pm.dt.s, pred_pm.us.dt)
pred_pm.pw <- rbind( pred_popwgt.dt.s, pred_pm.pw.us.dt)
# now just divide by each state's total population
setnames( state_pops, popyr.name, 'pop_amnt')
state_pops_lite <- state_pops[, .( state_abbr, pop_amnt)]
pop.tot <- data.table( state_abbr = 'US', pop_amnt = sum( state_pops$pop_amnt))
state_pops_lite <- rbind( state_pops_lite, pop.tot)
pred_popwgt.out <- merge( pred_pm.pw, state_pops_lite, by = 'state_abbr')
# divide by total population
pred_popwgt.out[, `:=` ( popwgt = mean_popwgt / pop_amnt,
month = name.Date)]
# merge pop-weighted and raw dataset
out <- merge( pred_popwgt.out, pred_pm, by = c( 'state_abbr', 'ID', 'uID'))
return( list( popwgt_states = out, pred_pm.r = pred_pm.r, zero_out.r = dats0.r))
}
#======================================================================#
## function for converting individual unit concentrations to ugm3
# inputs - filename of grid w/ unit or summed impacts
# - model
# - covariate raster
# - 'x' name in the raster from model
#
# output - data table of ugm3 impacts for all input units
#======================================================================#
hyads_to_pm25_unit <- function(
year.m = 2006,
month.n = NULL,
fstart = NULL,
fstart.total,
fstart_out,
model.dataset = preds.mon.idwe06w05,
model.name = 'model.cv', #'model.gam'
name.x = 'hyads',
mask.use = mask.usa,
met.dest = '/projects/HAQ_LAB/lhennem/data/disperseR/HyADS_to_pm25/met',
total = F,
p4s
){
message( paste( 'Converting', month.name[month.n], year.m))
#define the met layer names, do the actual downloading
Sys.setenv(TZ='UTC')
layer.names <- c( "air.2m.mon.mean.nc",
"apcp.mon.mean.nc",
"rhum.2m.mon.mean.nc",
"vwnd.10m.mon.mean.nc",
"uwnd.10m.mon.mean.nc")
names( layer.names) <- c( "temp", "apcp", "rhum", "vwnd", "uwnd")
# do the data downloading
# set destination parameter to where you want the data downloaded,
# for example, destination = '~/Desktop'
list.met <- lapply( layer.names,
downloader.fn,
destination = met.dest,
dataset = 'NARR')
# annual or month-specific actions
if( is.null( month.n)){
# define file nanmes
fname <- paste0( fstart, year.m, '.fst')
fname.total <- paste0( fstart.total, year.m, '.fst')
fname_out <- paste0( fstart_out, year.m, '.fst')
# download met data
mets.use.p <- suppressWarnings(
usa.functioner( year.m, list.met, dataset = 'NARR',
avg.period = 'year', return.usa.sub = F)
)
# pick out the appropriate model
model.use <- model.dataset[[ model.name]]
# get the prediction crs
model.rast <- model.dataset$Y.ho.hat.raster
model.csr <- crs( model.rast)
} else {
# define date/month names
month.N <- formatC( month.n, width = 2, flag = '0')
name.m <- paste0( 'X', year.m, '.', month.N, '.01')
model.m <- paste0( 'X2005.', month.N, '.01')
name.Date <- as.Date( name.m, format = 'X%Y.%m.%d')
# define file nanmes
fname <- paste0( fstart, year.m, '_', month.N, '.fst')
fname.total <- paste0( fstart.total, year.m, '_', month.N, '.fst')
fname_out <- paste0( fstart_out, year.m, '_', month.N, '.fst')
# download met data
mets.m <- suppressWarnings(
usa.functioner( year.m, list.met, dataset = 'NARR',
avg.period = 'month', return.usa.sub = F)
)
mets.use.p <- mets.m[[name.m]]
# pick out the appropriate model
model.use <- model.dataset[ model.name, model.m][[1]]
# get the prediction crs
model.rast <- model.dataset['Y.ho.hat.raster',][[model.m]]
model.csr <- crs( model.rast)
}
# create prediction dataset
mets.use.p <- projectRaster( mets.use.p, crs = model.csr)
dat.s <- project_and_stack( model.rast, mets.use.p, mask.use = mask.use)
# read in total hyads
hyads.total.dt <- read.fst( fname.total, columns = c( 'x', 'y', 'hyads'), as.data.table = T)
hyads.total.r <- rasterFromXYZ( hyads.total.dt, crs = p4s)
# predict with total hyads
dat.use.s<- copy( dat.s)
hyads.total.proj <- project_and_stack( dat.use.s, hyads.total.r, mask.use = mask.use)
# predict the base scenario
dat.coords <- coordinates( hyads.total.proj)
dat_total.dt <- data.table( cbind( dat.coords, values( hyads.total.proj)))
dat.total.pred <- predict( model.use, newdata = dat_total.dt) %>%
exp()
# predict a 0-hyads scenario
dat_0.dt <- dat_total.dt[, hyads := 0]
dat.0.pred <- predict( model.use, newdata = dat_0.dt, type = 'response') %>%
exp()
# predict pm into a raster
pred_pm.r <- rasterFromXYZ( data.table( dat.coords, dat.total.pred), crs = p4s) %>%
projectRaster( dat.use.s)
pred_0.r <- rasterFromXYZ( data.table( dat.coords, dat.0.pred), crs = p4s) %>%
projectRaster( dat.use.s)
# remove mean
pred_nomean.r <- pred_pm.r - pred_0.r
#read in, project hyads
if( total){
# collect coordinates and values
coords.out <- coordinates( pred_nomean.r)
vals.out <- values( pred_nomean.r)
# write out the data.table as fst
pred_pm.dt <- data.table( cbind( coords.out, vals.out))
# print( summary( pred_pm.dt[, 6:10]))
write_fst( pred_pm.dt, fname_out)
note <- paste( 'Unit conversions saved to', fname_out)
message( note)
return( note)
} else {
hyads.dt <- read.fst( fname, columns = c( 'x', 'y', 'uID', 'hyads'), as.data.table = T)
hyads.dt <- hyads.dt[!is.na( x) & !is.na( y)]
hyads.dt.c <- dcast( hyads.dt, x + y ~ uID, value.var = 'hyads')
hyads.use.p <- rasterFromXYZ( hyads.dt.c, crs = p4s)
hyads.use.p[is.na( hyads.use.p)] <- 0
hyads.proj <- project_and_stack( dat.s[[1]], hyads.use.p, mask.use = mask.use)
hyads.proj <- dropLayer( hyads.proj, 1)
hyads.n <- paste0( 'X', names( hyads.dt.c)[!(names( hyads.dt.c) %in% c( 'x', 'y'))])
names( hyads.proj) <- hyads.n
# take fraction of total hyads
hyads.frac.total <- hyads.proj / hyads.total.proj[[name.x]]
hyads.frac.total.pm <- hyads.frac.total * pred_nomean.r
names( hyads.frac.total.pm) <- hyads.n
coords.out <- coordinates( hyads.frac.total.pm)
vals.out <- values( hyads.frac.total.pm)
# write out the data.table as fst
pred_pm.dt <- data.table( cbind( coords.out, vals.out))
# print( summary( pred_pm.dt[, 6:10]))
write_fst( pred_pm.dt, fname_out)
note <- paste( 'Unit conversions saved to', fname_out)
message( note)
return( note)
}
}
|
#Functions for retrieving data from NEMWEB
#install.packages('tidyverse')
#install.packages('openxlsx')
#install.packages('sqldf')
library(tidyverse)
library(openxlsx)
library(sqldf)
library(data.table)
setwd("C:/Users/Matthew/Google Drive/Uni/19/Thesis/Analysis/Mispricing")
external_data_location <- "D:/Thesis/Data" #for big data
###RRP
#input: yearmonth for file download, int for 0/1 for intervention pricing
rrp_fun <- function(yearmonth, int = 0){
external_data_location <- "D:/Thesis/Data/RRP" #for big data
year <- substr(yearmonth, 1, 4)
month <- substr(yearmonth, 5, 6)
url <- 0
csv_name <- paste0(external_data_location, "/PUBLIC_DVD_DISPATCHPRICE_", yearmonth, "010000.CSV")
if(!file.exists(csv_name)){
url <- paste0("http://nemweb.com.au/Data_Archive/Wholesale_Electricity/MMSDM/", year,"/MMSDM_",
year, "_", month,
"/MMSDM_Historical_Data_SQLLoader/DATA/PUBLIC_DVD_DISPATCHPRICE_",yearmonth,
"010000.zip")
temp <- tempfile()
download.file(url, temp, mode="wb", method = "curl")
unzip(temp, paste0("PUBLIC_DVD_DISPATCHPRICE_", yearmonth, "010000.CSV"),
exdir = external_data_location)
}
rrp <- fread(csv_name, sep=",", stringsAsFactors = FALSE) %>%
filter(INTERVENTION == int) %>% #intervention
select(SETTLEMENTDATE, REGIONID, INTERVENTION, RRP) %>%
mutate(SETTLEMENTDATE = ymd_hms(SETTLEMENTDATE))
if(url != 0){
unlink(temp) #delete zip
}
return(rrp)
}
###DISPACTCH
dispatch_fun <- function(yearmonth){
external_data_location <- "D:/Thesis/Data/DISPATCH" #for big data
year <- substr(yearmonth, 1, 4)
month <- substr(yearmonth, 5, 6)
url <- 0
csv_name <- paste0(external_data_location, "/PUBLIC_DVD_DISPATCHLOAD_", yearmonth, "010000.CSV")
if(!file.exists(csv_name)){
url <- paste0("http://nemweb.com.au/Data_Archive/Wholesale_Electricity/MMSDM/", year,"/MMSDM_",
year, "_", month,
"/MMSDM_Historical_Data_SQLLoader/DATA/PUBLIC_DVD_DISPATCHLOAD_",yearmonth,
"010000.zip")
temp <- tempfile()
download.file(url, temp, mode="wb", method = "curl")
unzip(temp, paste0("PUBLIC_DVD_DISPATCHLOAD_", yearmonth, "010000.CSV"),
exdir = external_data_location)
}
dispatch <- fread(csv_name, sep=",", skip=1, stringsAsFactors = FALSE)
dispatch <- dispatch %>%
filter(INTERVENTION == 0) %>% #chooses whether intevention or non priced
select(DUID, SETTLEMENTDATE, INTERVENTION, INITIALMW) %>%
mutate(SETTLEMENTDATE = ymd_hms(SETTLEMENTDATE))
if(url != 0){
unlink(temp) #delete zip
}
return(dispatch)
}
|
/R/Old/Functions - USED.R
|
no_license
|
MatthewKatzen/NEM_LMP
|
R
| false
| false
| 2,805
|
r
|
#Functions for retrieving data from NEMWEB
#install.packages('tidyverse')
#install.packages('openxlsx')
#install.packages('sqldf')
library(tidyverse)
library(openxlsx)
library(sqldf)
library(data.table)
setwd("C:/Users/Matthew/Google Drive/Uni/19/Thesis/Analysis/Mispricing")
external_data_location <- "D:/Thesis/Data" #for big data
###RRP
#input: yearmonth for file download, int for 0/1 for intervention pricing
rrp_fun <- function(yearmonth, int = 0){
external_data_location <- "D:/Thesis/Data/RRP" #for big data
year <- substr(yearmonth, 1, 4)
month <- substr(yearmonth, 5, 6)
url <- 0
csv_name <- paste0(external_data_location, "/PUBLIC_DVD_DISPATCHPRICE_", yearmonth, "010000.CSV")
if(!file.exists(csv_name)){
url <- paste0("http://nemweb.com.au/Data_Archive/Wholesale_Electricity/MMSDM/", year,"/MMSDM_",
year, "_", month,
"/MMSDM_Historical_Data_SQLLoader/DATA/PUBLIC_DVD_DISPATCHPRICE_",yearmonth,
"010000.zip")
temp <- tempfile()
download.file(url, temp, mode="wb", method = "curl")
unzip(temp, paste0("PUBLIC_DVD_DISPATCHPRICE_", yearmonth, "010000.CSV"),
exdir = external_data_location)
}
rrp <- fread(csv_name, sep=",", stringsAsFactors = FALSE) %>%
filter(INTERVENTION == int) %>% #intervention
select(SETTLEMENTDATE, REGIONID, INTERVENTION, RRP) %>%
mutate(SETTLEMENTDATE = ymd_hms(SETTLEMENTDATE))
if(url != 0){
unlink(temp) #delete zip
}
return(rrp)
}
###DISPACTCH
dispatch_fun <- function(yearmonth){
external_data_location <- "D:/Thesis/Data/DISPATCH" #for big data
year <- substr(yearmonth, 1, 4)
month <- substr(yearmonth, 5, 6)
url <- 0
csv_name <- paste0(external_data_location, "/PUBLIC_DVD_DISPATCHLOAD_", yearmonth, "010000.CSV")
if(!file.exists(csv_name)){
url <- paste0("http://nemweb.com.au/Data_Archive/Wholesale_Electricity/MMSDM/", year,"/MMSDM_",
year, "_", month,
"/MMSDM_Historical_Data_SQLLoader/DATA/PUBLIC_DVD_DISPATCHLOAD_",yearmonth,
"010000.zip")
temp <- tempfile()
download.file(url, temp, mode="wb", method = "curl")
unzip(temp, paste0("PUBLIC_DVD_DISPATCHLOAD_", yearmonth, "010000.CSV"),
exdir = external_data_location)
}
dispatch <- fread(csv_name, sep=",", skip=1, stringsAsFactors = FALSE)
dispatch <- dispatch %>%
filter(INTERVENTION == 0) %>% #chooses whether intevention or non priced
select(DUID, SETTLEMENTDATE, INTERVENTION, INITIALMW) %>%
mutate(SETTLEMENTDATE = ymd_hms(SETTLEMENTDATE))
if(url != 0){
unlink(temp) #delete zip
}
return(dispatch)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/asymptoticTimings.R
\name{asymptoticTimings}
\alias{asymptoticTimings}
\title{Asymptotic Timings Quantifying function}
\usage{
asymptoticTimings(e, data.sizes, max.seconds)
}
\arguments{
\item{e}{An expression which is in the form of a function operating on 'N' (as the data size for the algorithm to be tested against for a run), which takes values from the used-supplied parameter data.sizes.}
\item{data.sizes}{A vector/set of data sizes, which should preferably be a sequence in powers of ten, with mid-values included.
Example: data.sizes = 10^seq(1, 4, by = 0.5)}
\item{max.seconds}{The maximum number of seconds an iteration would be limited upto. (once the limit has been exceeded, further computations on incrementally larger dataset sizes won't be done)
Optional, with the default value set to 1 second.}
}
\value{
A data frame comprising of the timings computed by microbenchmark and the corresponding dataset sizes.
}
\description{
Function to compute benchmarked timings with different data sizes for an R expression
}
\details{
For more information regarding its implementation or functionality/usage, please check https://anirban166.github.io//Timings-function/
}
\examples{
# Quantifying the runtimes for the quick sort algorithm (with sampling performed)
# against a set of increasing input data sizes:
input.sizes = 10^seq(1, 3, by = 0.5)
asymptoticTimings(sort(sample(1:100, data.sizes, replace = TRUE), method = "quick"), input.sizes)
}
|
/man/asymptoticTimings.Rd
|
no_license
|
cran/testComplexity
|
R
| false
| true
| 1,569
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/asymptoticTimings.R
\name{asymptoticTimings}
\alias{asymptoticTimings}
\title{Asymptotic Timings Quantifying function}
\usage{
asymptoticTimings(e, data.sizes, max.seconds)
}
\arguments{
\item{e}{An expression which is in the form of a function operating on 'N' (as the data size for the algorithm to be tested against for a run), which takes values from the used-supplied parameter data.sizes.}
\item{data.sizes}{A vector/set of data sizes, which should preferably be a sequence in powers of ten, with mid-values included.
Example: data.sizes = 10^seq(1, 4, by = 0.5)}
\item{max.seconds}{The maximum number of seconds an iteration would be limited upto. (once the limit has been exceeded, further computations on incrementally larger dataset sizes won't be done)
Optional, with the default value set to 1 second.}
}
\value{
A data frame comprising of the timings computed by microbenchmark and the corresponding dataset sizes.
}
\description{
Function to compute benchmarked timings with different data sizes for an R expression
}
\details{
For more information regarding its implementation or functionality/usage, please check https://anirban166.github.io//Timings-function/
}
\examples{
# Quantifying the runtimes for the quick sort algorithm (with sampling performed)
# against a set of increasing input data sizes:
input.sizes = 10^seq(1, 3, by = 0.5)
asymptoticTimings(sort(sample(1:100, data.sizes, replace = TRUE), method = "quick"), input.sizes)
}
|
## The following function calculates the inverse of the special matrix created
## with the first function. However, it first checks to see if the inverse matrix has
## already been calculated. If so, it gets the inverse matrix from the cache and skips
## the computation. Otherwise, it calculates the inverse matrix and sets these matrix
## in the cache via the setinverse function.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve
## the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
RaykSchumann/ProgrammingAssignment2
|
R
| false
| false
| 1,224
|
r
|
## The following function calculates the inverse of the special matrix created
## with the first function. However, it first checks to see if the inverse matrix has
## already been calculated. If so, it gets the inverse matrix from the cache and skips
## the computation. Otherwise, it calculates the inverse matrix and sets these matrix
## in the cache via the setinverse function.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve
## the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
# This file split from "test-occSSx.R" 2015-02-20
# test_that code for occSStime functions
# library(testthat)
context("Single-season occupancy, time covars")
test_that("occSStime with logit link", {
# Data set (Blue Ridge Salamanders)
require(wiqid)
data(salamanders)
BRS <- salamanders
# Check dots passed to nlm
expect_warning(occSStime(BRS, plot=FALSE, iterlim=4),
"Convergence may not have been reached")
res <- occSStime(BRS, p~.time, plot=FALSE)
expect_that(class(res), equals(c("wiqid", "list")))
expect_that(names(res), equals(c("call", "link", "beta", "beta.vcv", "real", "logLik")))
expect_true(is.call(res$call))
expect_that(colnames(res$real), equals(c("est", "lowCI", "uppCI")))
expect_that(rownames(res$real),
equals(c("psi", "p1", "p2", "p3", "p4", "p5")))
expect_that(round(as.vector(res$real[, 1]), 4), # estimates
equals(c(0.5799, 0.1769, 0.1327, 0.3980, 0.3537, 0.2653)))
expect_that(round(as.vector(res$real[, -1]), 4), # CIs
equals(c(0.3490, 0.0644, 0.0415, 0.1998, 0.1712, 0.1156,
0.7804, 0.4013, 0.3506, 0.6364, 0.5920, 0.4993)))
expect_that(round(AIC(res), 4), equals(167.7144))
# These are the values returned by PRESENCE
res <- occSStime(BRS, p~.time, ci=0.85, plot=FALSE)
expect_that(round(as.vector(res$real), 4),
equals(c(0.5799, 0.1769, 0.1327, 0.3980, 0.3537, 0.2653,
0.4079, 0.0852, 0.0571, 0.2443, 0.2111, 0.1462,
0.7344, 0.3314, 0.2786, 0.5747, 0.5283, 0.4323)))
# Put in some NAs
BRS[c(6,167,130,123,89,154,32,120,127,147)] <- NA
res <- occSStime(BRS, p~.time, plot=FALSE)
expect_that(round(as.vector(res$real), 4),
equals(c(0.5637, 0.1930, 0.1365, 0.3812, 0.3450, 0.2383, 0.3223,
0.0690, 0.0421, 0.1773, 0.1424, 0.0938, 0.7783, 0.4354, 0.3621, 0.6378,
0.6257, 0.4861)))
expect_that(round(AIC(res), 4), equals(153.1581))
# Put in a row of NAs
BRS[3,] <- NA
expect_error(occSStime(BRS, p~.time, plot=FALSE),
"Detection history has a row with all NAs")
res <- occSStime(BRS, p~.time, plot=FALSE, verify=FALSE)
expect_that(round(as.vector(res$real), 4),
equals(c(0.5316, 0.2107, 0.0990, 0.4166, 0.3596, 0.2604, 0.3067,
0.0758, 0.0238, 0.1952, 0.1514, 0.1031, 0.7444, 0.4650, 0.3308, 0.6778,
0.6387, 0.5188)))
expect_that(round(AIC(res), 4), equals(145.6360))
# Put in a column of NAs
BRS[, 3] <- NA
res <- occSStime(BRS, p~.time, plot=FALSE, verify=FALSE)
expect_that(round(res$real[, 1], 4),
is_equivalent_to(c(0.3579, 0.3017, 0.1471,0.3017, 0.5434, 0.3969)))
expect_that(as.vector(res$real[, 2:3]),
is_equivalent_to(rep(NA_real_, 12)))
expect_that(round(AIC(res), 4), equals(NA_real_))
# All ones:
tst <- matrix(1, 39, 5)
res <- occSStime(tst, p~.time, plot=FALSE)
expect_that(round(as.vector(res$real[,1]), 4),
is_equivalent_to(rep(1, 6)))
expect_that(as.vector(res$real[, 2:3]),
is_equivalent_to(rep(NA_real_, 12)))
expect_that(round(AIC(res), 4), equals(NA_real_))
# All zeros:
tst <- matrix(0, 39, 5)
res <- occSStime(tst, p~.time, plot=FALSE)
expect_that(as.vector(res$real),
is_equivalent_to(rep(NA_real_, 18)))
expect_that(AIC(res), equals(NA_real_))
# All NAs:
tst <- matrix(NA, 39, 5)
res <- occSStime(tst, p~.time, plot=FALSE, verify=FALSE)
expect_that(as.vector(res$real),
is_equivalent_to(rep(NA_real_, 18)))
expect_that(AIC(res),
equals(NA_real_))
# Linear trend
BRS <- salamanders
res <- occSStime(BRS, p~.Time, plot=FALSE)
# Values returned by PRESENCE:
expect_that(round(as.vector(t(res$real)), 4),
equals(c( 0.5899, 0.3505, 0.7931,
0.1865, 0.0881, 0.3523,
0.2197, 0.1251, 0.3566,
0.2569, 0.1604, 0.3849,
0.2981, 0.1811, 0.4493,
0.3428, 0.1860, 0.5436)))
expect_that(round(AIC(res), 4), equals(165.9228))
# Quadratic trend
res <- occSStime(BRS, p~.Time + I(.Time^2), plot=FALSE)
# Values returned by PRESENCE to within 0.0001
expect_that(round(as.vector(t(res$real)), 4),
equals(c( 0.5870, 0.3502, 0.7894,
0.1321, 0.0461, 0.3242,
0.2404, 0.1335, 0.3940,
0.3210, 0.1801, 0.5043,
0.3364, 0.1995, 0.5075,
0.2807, 0.1304, 0.5039)))
expect_that(round(AIC(res), 4), equals(166.3525))
} )
# ......................................................................
test_that("occSStime with probit link", {
# Data set (Blue Ridge Salamanders)
require(wiqid)
data(salamanders)
BRS <- salamanders
res <- occSStime(BRS, p~.time, plot=FALSE, link="probit")
expect_that(class(res), equals(c("wiqid", "list")))
expect_that(names(res), equals(c("call", "link", "beta", "beta.vcv", "real", "logLik")))
expect_true(is.call(res$call))
expect_that(colnames(res$real), equals(c("est", "lowCI", "uppCI")))
expect_that(rownames(res$real),
equals(c("psi", "p1", "p2", "p3", "p4", "p5")))
expect_that(round(as.vector(res$real[, 1]), 4), # estimates, same as logit
equals(c(0.5799, 0.1769, 0.1327, 0.3980, 0.3537, 0.2653)))
expect_that(round(as.vector(res$real[, -1]), 4), # CIs differ
equals(c(0.3490, 0.0587, 0.0367, 0.1940, 0.1649, 0.1091,
0.7856, 0.3863, 0.3309, 0.6353, 0.5887, 0.4908)))
expect_that(round(AIC(res), 4), equals(167.7144)) # same
# Linear trend
BRS <- salamanders
res <- occSStime(BRS, p~.Time, plot=FALSE, link="probit")
expect_that(round(as.vector(res$real[, 1]), 4),
equals(c(0.5900, 0.1841, 0.2190, 0.2574, 0.2991, 0.3435)))
expect_that(round(AIC(res), 4), equals(165.8844))
# These are NOT the same as the logit link results
# Quadratic trend
res <- occSStime(BRS, p~.Time + I(.Time^2), plot=FALSE, link="probit")
expect_that(round(as.vector(res$real[, 1]), 4),
equals(c(0.5869, 0.1345, 0.2431, 0.3185, 0.3330, 0.2825)))
expect_that(round(AIC(res), 4), equals(166.4418))
} )
|
/inst/tests/testthat/test-occSStime.R
|
no_license
|
mikemeredith/wiqid
|
R
| false
| false
| 5,998
|
r
|
# This file split from "test-occSSx.R" 2015-02-20
# test_that code for occSStime functions
# library(testthat)
context("Single-season occupancy, time covars")
test_that("occSStime with logit link", {
# Data set (Blue Ridge Salamanders)
require(wiqid)
data(salamanders)
BRS <- salamanders
# Check dots passed to nlm
expect_warning(occSStime(BRS, plot=FALSE, iterlim=4),
"Convergence may not have been reached")
res <- occSStime(BRS, p~.time, plot=FALSE)
expect_that(class(res), equals(c("wiqid", "list")))
expect_that(names(res), equals(c("call", "link", "beta", "beta.vcv", "real", "logLik")))
expect_true(is.call(res$call))
expect_that(colnames(res$real), equals(c("est", "lowCI", "uppCI")))
expect_that(rownames(res$real),
equals(c("psi", "p1", "p2", "p3", "p4", "p5")))
expect_that(round(as.vector(res$real[, 1]), 4), # estimates
equals(c(0.5799, 0.1769, 0.1327, 0.3980, 0.3537, 0.2653)))
expect_that(round(as.vector(res$real[, -1]), 4), # CIs
equals(c(0.3490, 0.0644, 0.0415, 0.1998, 0.1712, 0.1156,
0.7804, 0.4013, 0.3506, 0.6364, 0.5920, 0.4993)))
expect_that(round(AIC(res), 4), equals(167.7144))
# These are the values returned by PRESENCE
res <- occSStime(BRS, p~.time, ci=0.85, plot=FALSE)
expect_that(round(as.vector(res$real), 4),
equals(c(0.5799, 0.1769, 0.1327, 0.3980, 0.3537, 0.2653,
0.4079, 0.0852, 0.0571, 0.2443, 0.2111, 0.1462,
0.7344, 0.3314, 0.2786, 0.5747, 0.5283, 0.4323)))
# Put in some NAs
BRS[c(6,167,130,123,89,154,32,120,127,147)] <- NA
res <- occSStime(BRS, p~.time, plot=FALSE)
expect_that(round(as.vector(res$real), 4),
equals(c(0.5637, 0.1930, 0.1365, 0.3812, 0.3450, 0.2383, 0.3223,
0.0690, 0.0421, 0.1773, 0.1424, 0.0938, 0.7783, 0.4354, 0.3621, 0.6378,
0.6257, 0.4861)))
expect_that(round(AIC(res), 4), equals(153.1581))
# Put in a row of NAs
BRS[3,] <- NA
expect_error(occSStime(BRS, p~.time, plot=FALSE),
"Detection history has a row with all NAs")
res <- occSStime(BRS, p~.time, plot=FALSE, verify=FALSE)
expect_that(round(as.vector(res$real), 4),
equals(c(0.5316, 0.2107, 0.0990, 0.4166, 0.3596, 0.2604, 0.3067,
0.0758, 0.0238, 0.1952, 0.1514, 0.1031, 0.7444, 0.4650, 0.3308, 0.6778,
0.6387, 0.5188)))
expect_that(round(AIC(res), 4), equals(145.6360))
# Put in a column of NAs
BRS[, 3] <- NA
res <- occSStime(BRS, p~.time, plot=FALSE, verify=FALSE)
expect_that(round(res$real[, 1], 4),
is_equivalent_to(c(0.3579, 0.3017, 0.1471,0.3017, 0.5434, 0.3969)))
expect_that(as.vector(res$real[, 2:3]),
is_equivalent_to(rep(NA_real_, 12)))
expect_that(round(AIC(res), 4), equals(NA_real_))
# All ones:
tst <- matrix(1, 39, 5)
res <- occSStime(tst, p~.time, plot=FALSE)
expect_that(round(as.vector(res$real[,1]), 4),
is_equivalent_to(rep(1, 6)))
expect_that(as.vector(res$real[, 2:3]),
is_equivalent_to(rep(NA_real_, 12)))
expect_that(round(AIC(res), 4), equals(NA_real_))
# All zeros:
tst <- matrix(0, 39, 5)
res <- occSStime(tst, p~.time, plot=FALSE)
expect_that(as.vector(res$real),
is_equivalent_to(rep(NA_real_, 18)))
expect_that(AIC(res), equals(NA_real_))
# All NAs:
tst <- matrix(NA, 39, 5)
res <- occSStime(tst, p~.time, plot=FALSE, verify=FALSE)
expect_that(as.vector(res$real),
is_equivalent_to(rep(NA_real_, 18)))
expect_that(AIC(res),
equals(NA_real_))
# Linear trend
BRS <- salamanders
res <- occSStime(BRS, p~.Time, plot=FALSE)
# Values returned by PRESENCE:
expect_that(round(as.vector(t(res$real)), 4),
equals(c( 0.5899, 0.3505, 0.7931,
0.1865, 0.0881, 0.3523,
0.2197, 0.1251, 0.3566,
0.2569, 0.1604, 0.3849,
0.2981, 0.1811, 0.4493,
0.3428, 0.1860, 0.5436)))
expect_that(round(AIC(res), 4), equals(165.9228))
# Quadratic trend
res <- occSStime(BRS, p~.Time + I(.Time^2), plot=FALSE)
# Values returned by PRESENCE to within 0.0001
expect_that(round(as.vector(t(res$real)), 4),
equals(c( 0.5870, 0.3502, 0.7894,
0.1321, 0.0461, 0.3242,
0.2404, 0.1335, 0.3940,
0.3210, 0.1801, 0.5043,
0.3364, 0.1995, 0.5075,
0.2807, 0.1304, 0.5039)))
expect_that(round(AIC(res), 4), equals(166.3525))
} )
# ......................................................................
test_that("occSStime with probit link", {
# Data set (Blue Ridge Salamanders)
require(wiqid)
data(salamanders)
BRS <- salamanders
res <- occSStime(BRS, p~.time, plot=FALSE, link="probit")
expect_that(class(res), equals(c("wiqid", "list")))
expect_that(names(res), equals(c("call", "link", "beta", "beta.vcv", "real", "logLik")))
expect_true(is.call(res$call))
expect_that(colnames(res$real), equals(c("est", "lowCI", "uppCI")))
expect_that(rownames(res$real),
equals(c("psi", "p1", "p2", "p3", "p4", "p5")))
expect_that(round(as.vector(res$real[, 1]), 4), # estimates, same as logit
equals(c(0.5799, 0.1769, 0.1327, 0.3980, 0.3537, 0.2653)))
expect_that(round(as.vector(res$real[, -1]), 4), # CIs differ
equals(c(0.3490, 0.0587, 0.0367, 0.1940, 0.1649, 0.1091,
0.7856, 0.3863, 0.3309, 0.6353, 0.5887, 0.4908)))
expect_that(round(AIC(res), 4), equals(167.7144)) # same
# Linear trend
BRS <- salamanders
res <- occSStime(BRS, p~.Time, plot=FALSE, link="probit")
expect_that(round(as.vector(res$real[, 1]), 4),
equals(c(0.5900, 0.1841, 0.2190, 0.2574, 0.2991, 0.3435)))
expect_that(round(AIC(res), 4), equals(165.8844))
# These are NOT the same as the logit link results
# Quadratic trend
res <- occSStime(BRS, p~.Time + I(.Time^2), plot=FALSE, link="probit")
expect_that(round(as.vector(res$real[, 1]), 4),
equals(c(0.5869, 0.1345, 0.2431, 0.3185, 0.3330, 0.2825)))
expect_that(round(AIC(res), 4), equals(166.4418))
} )
|
library(gapminder)
install.packages('tidyverse')
library(dplyr)
library(magrittr)
library(nycflights13)
#Функция фильтр
filter(gapminder, lifeExp < 29)
filter(gapminder, country == "Afghanistan", year > 1981)
filter(gapminder, continent %in% c("Asia", "Africa"))
#Тоже самое для векторов
gapminder[gapminder$lifeExp < 29, ]
subset(gapminder, country == "Rwanda")
head(gapminder)
gapminder %>% head(3)
head(select(gapminder, year, lifeExp),4)
#Ниже то же самое, но с пайпом
gapminder %>%
select(year, lifeExp) %>%
head(4)
gapminder %>%
filter(country == "Cambodia") %>%
select(year, lifeExp)
#Ниже то же самое
gapminder[gapminder$country == "Cambodia", c("year", "lifeExp")]
#Для демонстрации следующих функций загрузим другой датасет
msleep <- read.csv("https://raw.githubusercontent.com/genomicsclass/dagdata/master/inst/extdata/msleep_ggplot2.csv")
head(msleep)
msleep
#Упорядочить по одной колонке
msleep %>% arrange(order) %>% head
#По нескольким
msleep %>%
select(name, order, sleep_total) %>%
arrange(order, sleep_total) %>%
head
#Отфильтруем и отсортируем по убыванию
msleep %>%
select(name, order, sleep_total) %>%
arrange(order, sleep_total) %>%
filter(sleep_total >= 16)
#Добавление колонок
msleep %>%
select(name, sleep_rem, sleep_total) %>%
mutate(rem_proportion = sleep_rem / sleep_total) %>%
head
#Получение итогов
msleep %>%
summarise(avg_sleep = mean(sleep_total),
min_sleep = min(sleep_total),
max_sleep = max(sleep_total),
total = n())
msleep %>%
group_by(order) %>%
summarise(avg_sleep = mean(sleep_total),
min_sleep = min(sleep_total),
max_sleep = max(sleep_total),
total = n())
msleep %>%
rename(Name = name, Genus = genus, Vore = vore) %>%
head
tbl_df(msleep)
glimpse(msleep)
msleep %>%
group_by(order, sleep_total) %>%
tally
msleep %>% ungroup
|
/classwork6/classwork6.R
|
no_license
|
DimaLokshteyn/MD-DA-2018
|
R
| false
| false
| 2,227
|
r
|
library(gapminder)
install.packages('tidyverse')
library(dplyr)
library(magrittr)
library(nycflights13)
#Функция фильтр
filter(gapminder, lifeExp < 29)
filter(gapminder, country == "Afghanistan", year > 1981)
filter(gapminder, continent %in% c("Asia", "Africa"))
#Тоже самое для векторов
gapminder[gapminder$lifeExp < 29, ]
subset(gapminder, country == "Rwanda")
head(gapminder)
gapminder %>% head(3)
head(select(gapminder, year, lifeExp),4)
#Ниже то же самое, но с пайпом
gapminder %>%
select(year, lifeExp) %>%
head(4)
gapminder %>%
filter(country == "Cambodia") %>%
select(year, lifeExp)
#Ниже то же самое
gapminder[gapminder$country == "Cambodia", c("year", "lifeExp")]
#Для демонстрации следующих функций загрузим другой датасет
msleep <- read.csv("https://raw.githubusercontent.com/genomicsclass/dagdata/master/inst/extdata/msleep_ggplot2.csv")
head(msleep)
msleep
#Упорядочить по одной колонке
msleep %>% arrange(order) %>% head
#По нескольким
msleep %>%
select(name, order, sleep_total) %>%
arrange(order, sleep_total) %>%
head
#Отфильтруем и отсортируем по убыванию
msleep %>%
select(name, order, sleep_total) %>%
arrange(order, sleep_total) %>%
filter(sleep_total >= 16)
#Добавление колонок
msleep %>%
select(name, sleep_rem, sleep_total) %>%
mutate(rem_proportion = sleep_rem / sleep_total) %>%
head
#Получение итогов
msleep %>%
summarise(avg_sleep = mean(sleep_total),
min_sleep = min(sleep_total),
max_sleep = max(sleep_total),
total = n())
msleep %>%
group_by(order) %>%
summarise(avg_sleep = mean(sleep_total),
min_sleep = min(sleep_total),
max_sleep = max(sleep_total),
total = n())
msleep %>%
rename(Name = name, Genus = genus, Vore = vore) %>%
head
tbl_df(msleep)
glimpse(msleep)
msleep %>%
group_by(order, sleep_total) %>%
tally
msleep %>% ungroup
|
## Neural Net
# corre modelos
# librerias y funciones ---------------------------------------------------
source("src/funciones.R")
source("src/load_librerias.R")
# bases ------------------------------------------------------------------
### TRAIN
base_tv <- readRDS(file="data/final/base_train_validation.rds")
# predictores train
x_train <- base_tv %>% dplyr::select(-gender) %>% dplyr::mutate_if(is.character, as.factor)
# variable respuesta train
y_train <- base_tv$gender
### TEST
base_test <- readRDS(file="data/final/base_test.rds")
# predictores test
x_test <- base_test %>% dplyr::select(-gender) %>% dplyr::mutate_if(is.character, as.factor)
# variable respuesta test
y_test <- base_test$gender
# train-test para probar modelos solo con variables words
text_train <- x_train %>% dplyr::select(dplyr::starts_with("t_"),
dplyr::starts_with("d_"))
text_test <- x_test %>% dplyr::select(dplyr::starts_with("t_"),
dplyr::starts_with("d_"))
###############
# validation methods ------------------------------------------------------
# repeated CV y grid search (ver bien qué es)
train_rcv <- caret::trainControl(method = "repeatedcv",
number = 10,
repeats = 3,
search = "grid",
# este lo meto para poder calcular ROC (ver doc caret)
classProbs=T)
# k-fold CV
train_cv <- caret::trainControl(method="cv",
number=5,
classProbs=T)
# parametros definidos por usuario sin validation:
train_simple <- caret::trainControl(method="none",
classProbs=T)
## Base Numerica
x_train_xg <- x_train %>% dplyr::select(-c(user_timezone,color_link, color_side))
x_test_xg <- x_test %>% dplyr::select(-c(user_timezone,color_link, color_side))
## Modelo XGBOOST
xg_param <- expand.grid("nrounds" = c(10,15),
"lambda" = c(0,1,2,3),
"alpha" = c(0,1),
"eta" = c(0.01))
# modelo
xg_mod <- caret::train(x=x_train_xg,
y=as.factor(y_train),
method="xgbLinear",
trControl=train_cv,
tuneGrid=xg_param)
xg_mod
xg_mod$results
# matriz de confusion y accuracy
xg_pred <- predict(xg_mod, newdata=x_test_xg)
xg_cm <- caret::confusionMatrix(xg_pred, as.factor(y_test))
xg_cm$table
xg_cm$overall[1]
# A mas observaciones, mejor accuracy en Test
# Llega a 0.61 en test,bastante equilibrado en lo que estima
# cOn nrounds = 15, lambda = 0, alpha = 1, eta = 0.01 llega a 0.63
# mejor Brand y Females que Male como todos...
|
/src/modelo_xg.R
|
no_license
|
fbetteo/dm-TwitterGender
|
R
| false
| false
| 2,831
|
r
|
## Neural Net
# corre modelos
# librerias y funciones ---------------------------------------------------
source("src/funciones.R")
source("src/load_librerias.R")
# bases ------------------------------------------------------------------
### TRAIN
base_tv <- readRDS(file="data/final/base_train_validation.rds")
# predictores train
x_train <- base_tv %>% dplyr::select(-gender) %>% dplyr::mutate_if(is.character, as.factor)
# variable respuesta train
y_train <- base_tv$gender
### TEST
base_test <- readRDS(file="data/final/base_test.rds")
# predictores test
x_test <- base_test %>% dplyr::select(-gender) %>% dplyr::mutate_if(is.character, as.factor)
# variable respuesta test
y_test <- base_test$gender
# train-test para probar modelos solo con variables words
text_train <- x_train %>% dplyr::select(dplyr::starts_with("t_"),
dplyr::starts_with("d_"))
text_test <- x_test %>% dplyr::select(dplyr::starts_with("t_"),
dplyr::starts_with("d_"))
###############
# validation methods ------------------------------------------------------
# repeated CV y grid search (ver bien qué es)
train_rcv <- caret::trainControl(method = "repeatedcv",
number = 10,
repeats = 3,
search = "grid",
# este lo meto para poder calcular ROC (ver doc caret)
classProbs=T)
# k-fold CV
train_cv <- caret::trainControl(method="cv",
number=5,
classProbs=T)
# parametros definidos por usuario sin validation:
train_simple <- caret::trainControl(method="none",
classProbs=T)
## Base Numerica
x_train_xg <- x_train %>% dplyr::select(-c(user_timezone,color_link, color_side))
x_test_xg <- x_test %>% dplyr::select(-c(user_timezone,color_link, color_side))
## Modelo XGBOOST
xg_param <- expand.grid("nrounds" = c(10,15),
"lambda" = c(0,1,2,3),
"alpha" = c(0,1),
"eta" = c(0.01))
# modelo
xg_mod <- caret::train(x=x_train_xg,
y=as.factor(y_train),
method="xgbLinear",
trControl=train_cv,
tuneGrid=xg_param)
xg_mod
xg_mod$results
# matriz de confusion y accuracy
xg_pred <- predict(xg_mod, newdata=x_test_xg)
xg_cm <- caret::confusionMatrix(xg_pred, as.factor(y_test))
xg_cm$table
xg_cm$overall[1]
# A mas observaciones, mejor accuracy en Test
# Llega a 0.61 en test,bastante equilibrado en lo que estima
# cOn nrounds = 15, lambda = 0, alpha = 1, eta = 0.01 llega a 0.63
# mejor Brand y Females que Male como todos...
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_manip.R
\name{data_manip}
\alias{data_manip}
\title{Interactively manipulate master files.}
\usage{
data_manip(use_afs = TRUE, update = FALSE, data = NULL)
}
\arguments{
\item{use_afs}{Use master files from AFS}
\item{update}{Update AFS files before grabbing.}
\item{data}{Data to use (default NULL, and use AFS data)}
}
\description{
Mess around with data interactively (clean/reshape etc.)
}
|
/man/data_manip.Rd
|
no_license
|
nverno/iclean
|
R
| false
| true
| 480
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_manip.R
\name{data_manip}
\alias{data_manip}
\title{Interactively manipulate master files.}
\usage{
data_manip(use_afs = TRUE, update = FALSE, data = NULL)
}
\arguments{
\item{use_afs}{Use master files from AFS}
\item{update}{Update AFS files before grabbing.}
\item{data}{Data to use (default NULL, and use AFS data)}
}
\description{
Mess around with data interactively (clean/reshape etc.)
}
|
labbcat.url <- "https://labbcat.canterbury.ac.nz/demo"
test_that("getTranscriptIdsWithParticipant works", {
skip_on_cran() # don't run tests that depend on external resource on CRAN
if (!is.null(labbcatCredentials(labbcat.url, "demo", "demo"))) skip("Server not available")
ids <- getTranscriptIdsWithParticipant(labbcat.url, "UC427_ViktoriaPapp_A_ENG")
expect_equal(length(ids), 1)
expect_false("QB247_Jacqui.eaf" %in% ids)
expect_true("UC427_ViktoriaPapp_A_ENG.eaf" %in% ids)
})
test_that("getTranscriptIdsWithParticipant empty result is correct type", {
skip_on_cran() # don't run tests that depend on external resource on CRAN
if (!is.null(labbcatCredentials(labbcat.url, "demo", "demo"))) skip("Server not available")
ids <- getTranscriptIdsWithParticipant(labbcat.url, "nonexistent")
expect_equal(length(ids), 0)
})
|
/tests/testthat/test-getTranscriptIdsWithParticipant.R
|
no_license
|
cran/nzilbb.labbcat
|
R
| false
| false
| 866
|
r
|
labbcat.url <- "https://labbcat.canterbury.ac.nz/demo"
test_that("getTranscriptIdsWithParticipant works", {
skip_on_cran() # don't run tests that depend on external resource on CRAN
if (!is.null(labbcatCredentials(labbcat.url, "demo", "demo"))) skip("Server not available")
ids <- getTranscriptIdsWithParticipant(labbcat.url, "UC427_ViktoriaPapp_A_ENG")
expect_equal(length(ids), 1)
expect_false("QB247_Jacqui.eaf" %in% ids)
expect_true("UC427_ViktoriaPapp_A_ENG.eaf" %in% ids)
})
test_that("getTranscriptIdsWithParticipant empty result is correct type", {
skip_on_cran() # don't run tests that depend on external resource on CRAN
if (!is.null(labbcatCredentials(labbcat.url, "demo", "demo"))) skip("Server not available")
ids <- getTranscriptIdsWithParticipant(labbcat.url, "nonexistent")
expect_equal(length(ids), 0)
})
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\docType{methods}
\name{arrangeViewports}
\alias{arrangeViewports}
\alias{arrangeViewports,list-method}
\title{Determine optimal plotting arrangement of RasterStack}
\usage{
arrangeViewports(extents, name = NULL)
\S4method{arrangeViewports}{list}(extents, name = NULL)
}
\arguments{
\item{toPlot}{Raster* object}
\item{axes}{passed from Plot}
}
\description{
Hidden function.
}
\details{
This assesses the device geometry, the map geometry, and the number of rasters
to plot and builds an object that will be used by the Plot functions to plot
them efficiently
}
|
/SpaDES-master/man/arrangeViewports.Rd
|
no_license
|
B-Ron12/RCodeSK
|
R
| false
| false
| 619
|
rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\docType{methods}
\name{arrangeViewports}
\alias{arrangeViewports}
\alias{arrangeViewports,list-method}
\title{Determine optimal plotting arrangement of RasterStack}
\usage{
arrangeViewports(extents, name = NULL)
\S4method{arrangeViewports}{list}(extents, name = NULL)
}
\arguments{
\item{toPlot}{Raster* object}
\item{axes}{passed from Plot}
}
\description{
Hidden function.
}
\details{
This assesses the device geometry, the map geometry, and the number of rasters
to plot and builds an object that will be used by the Plot functions to plot
them efficiently
}
|
library(alr4)
### Name: florida
### Title: Florida presidential election
### Aliases: florida
### Keywords: datasets
### ** Examples
head(florida)
## maybe str(florida) ; plot(florida) ...
|
/data/genthat_extracted_code/alr4/examples/florida.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 196
|
r
|
library(alr4)
### Name: florida
### Title: Florida presidential election
### Aliases: florida
### Keywords: datasets
### ** Examples
head(florida)
## maybe str(florida) ; plot(florida) ...
|
plot3 <- function(){
##For transforming Dates
library(lubridate)
library(ggplot2)
library(tidyr)
library(dplyr)
##Reading data
file <- file.path(".","household_power_consumption.txt")
data <- read.table(file,header=T,na.strings=c("?","NA"),sep=";")
##Converting Dates
data$Date <- dmy(data$Date)
##Subsetting
data <- subset(data, Date %in% c(ymd("2007/2/1"),ymd("2007/2/2")))
##Subsetting DateTime
data$datetime <- paste(data[,"Date"],data[,"Time"]) %>% ymd_hms()
##For plotting
data <- dplyr::select(data,Global_active_power,datetime,Sub_metering_1,Sub_metering_2,Sub_metering_3)
data <- tidyr::gather(data,key="Sub_Metering",value="Value",Sub_metering_1,Sub_metering_2,Sub_metering_3)
##Plotting
par(mar=c(4,4,4,1))
plot3 <- ggplot(data, aes(x=datetime,y=Value,color=Sub_Metering)) +
geom_line(aes(color=Sub_Metering)) +
scale_color_brewer(palette="Dark2") +
labs(title="Plot 3",x="", y="Global Active Power") +
theme_minimal() +
theme(legend.position=c(1,0.8),legend.justification=c(1,0))
plot3
##ggsave("plot3.png",plot3,height=480,width=480,units="mm")
}
|
/plot3.R
|
no_license
|
abhi584/ExData_Plotting1
|
R
| false
| false
| 1,297
|
r
|
plot3 <- function(){
##For transforming Dates
library(lubridate)
library(ggplot2)
library(tidyr)
library(dplyr)
##Reading data
file <- file.path(".","household_power_consumption.txt")
data <- read.table(file,header=T,na.strings=c("?","NA"),sep=";")
##Converting Dates
data$Date <- dmy(data$Date)
##Subsetting
data <- subset(data, Date %in% c(ymd("2007/2/1"),ymd("2007/2/2")))
##Subsetting DateTime
data$datetime <- paste(data[,"Date"],data[,"Time"]) %>% ymd_hms()
##For plotting
data <- dplyr::select(data,Global_active_power,datetime,Sub_metering_1,Sub_metering_2,Sub_metering_3)
data <- tidyr::gather(data,key="Sub_Metering",value="Value",Sub_metering_1,Sub_metering_2,Sub_metering_3)
##Plotting
par(mar=c(4,4,4,1))
plot3 <- ggplot(data, aes(x=datetime,y=Value,color=Sub_Metering)) +
geom_line(aes(color=Sub_Metering)) +
scale_color_brewer(palette="Dark2") +
labs(title="Plot 3",x="", y="Global Active Power") +
theme_minimal() +
theme(legend.position=c(1,0.8),legend.justification=c(1,0))
plot3
##ggsave("plot3.png",plot3,height=480,width=480,units="mm")
}
|
# Create data for the graph.
d<- c(1,2,2,3,3,3,4,4,4,4)
# Create the histogram.
hist(d,xlab = "data",col = "yellow",border = "blue")
|
/Chapter-2/Ch2-2-histogram-Chart.r
|
no_license
|
mohzary/5562-Statistical-learning
|
R
| false
| false
| 135
|
r
|
# Create data for the graph.
d<- c(1,2,2,3,3,3,4,4,4,4)
# Create the histogram.
hist(d,xlab = "data",col = "yellow",border = "blue")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tbl.R
\name{print.whippr}
\alias{print.whippr}
\title{Whippr print method}
\usage{
\method{print}{whippr}(x, ...)
}
\arguments{
\item{x}{A tibble with class 'whippr'}
\item{...}{Extra arguments, not used.}
}
\description{
Whippr print method
}
|
/man/print.whippr.Rd
|
permissive
|
fmmattioni/whippr
|
R
| false
| true
| 323
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tbl.R
\name{print.whippr}
\alias{print.whippr}
\title{Whippr print method}
\usage{
\method{print}{whippr}(x, ...)
}
\arguments{
\item{x}{A tibble with class 'whippr'}
\item{...}{Extra arguments, not used.}
}
\description{
Whippr print method
}
|
library(testthat)
context("test z.DranchukPurvisRobinson")
# test only one point at Ppr=0.5 and Tpr = 1.3
# print(z.DranchukPurvisRobinson(0.5, 1.3))
test_that("DPR matches z at Ppr=0.5 and Tpr=1.3", {
expect_equal(z.DranchukPurvisRobinson(0.5, 1.3), 0.9197157, tolerance = 1E-7)
})
test_that("DPR corr matches solution of 4x7 Ppr, Tpr matrix", {
ppr <- c(0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5)
tpr <- c(1.3, 1.5, 1.7, 2)
# dpr <- z.DranchukPurvisRobinson(ppr, tpr); save(dpr, file = "dpr_4x7.rda")
load(file = "dpr_4x7.rda");
expect_equal(z.DranchukPurvisRobinson(ppr, tpr), dpr)
})
test_that("DPR corr matches solution of 2x6 Ppr, Tpr matrix", {
tpr <- c(1.05, 1.1)
ppr <- c(0.5, 1.5, 2.5, 3.5, 4.5, 5.5)
# dpr <- z.DranchukPurvisRobinson(ppr, tpr); save(dpr, file = "dpr_2x6.rda")
load(file = "dpr_2x6.rda");
expect_equal(z.DranchukPurvisRobinson(ppr, tpr), dpr)
})
test_that("DPR corr matches solution of 4x13 Ppr, Tpr matrix", {
tpr <- c(1.05, 1.1, 1.2, 1.3)
ppr <- c(0.5, 1.0, 1.5, 2, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5)
# dpr <- z.DranchukPurvisRobinson(ppr, tpr); save(dpr, file = "dpr_4x13.rda")
load(file = "dpr_4x13.rda");
expect_equal(z.DranchukPurvisRobinson(ppr, tpr), dpr)
})
test_that("DPR corr matches solution of 16x7 Ppr, Tpr (all) matrix", {
tpr <- getStandingKatzTpr(pprRange = "lp")
ppr <- c(0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5)
# dpr <- z.DranchukPurvisRobinson(ppr, tpr); save(dpr, file = "dpr_16x7.rda")
load(file = "dpr_16x7.rda");
expect_equal(z.DranchukPurvisRobinson(ppr, tpr), dpr)
})
test_that("uni-element vectors of Ppr and Tpr work", {
# print(z.DranchukPurvisRobinson(c(1.0), c(1.5)))
expect_equal(z.DranchukPurvisRobinson(1.0, 1.5), 0.9025952, tolerance = 1e-7)
expect_equal(z.DranchukPurvisRobinson(c(1.0), c(1.5)), 0.9025952, tolerance = 1e-7)
})
test_that("1x2 matrix of Ppr and Tpr work", {
ppr <- c(1.0, 2.0)
tpr <- 1.5
# print(z.DranchukPurvisRobinson(ppr, tpr))
expected <- matrix(c(0.9025952, 0.820633), nrow=1, ncol=2)
rownames(expected) <- tpr
colnames(expected) <- ppr
expect_equal(z.DranchukPurvisRobinson(ppr, tpr), expected, tolerance = 1e-7)
})
|
/tests/testthat/test_DranchukPurvisRobinson.R
|
no_license
|
sunilgarg1/zFactor
|
R
| false
| false
| 2,236
|
r
|
library(testthat)
context("test z.DranchukPurvisRobinson")
# test only one point at Ppr=0.5 and Tpr = 1.3
# print(z.DranchukPurvisRobinson(0.5, 1.3))
test_that("DPR matches z at Ppr=0.5 and Tpr=1.3", {
expect_equal(z.DranchukPurvisRobinson(0.5, 1.3), 0.9197157, tolerance = 1E-7)
})
test_that("DPR corr matches solution of 4x7 Ppr, Tpr matrix", {
ppr <- c(0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5)
tpr <- c(1.3, 1.5, 1.7, 2)
# dpr <- z.DranchukPurvisRobinson(ppr, tpr); save(dpr, file = "dpr_4x7.rda")
load(file = "dpr_4x7.rda");
expect_equal(z.DranchukPurvisRobinson(ppr, tpr), dpr)
})
test_that("DPR corr matches solution of 2x6 Ppr, Tpr matrix", {
tpr <- c(1.05, 1.1)
ppr <- c(0.5, 1.5, 2.5, 3.5, 4.5, 5.5)
# dpr <- z.DranchukPurvisRobinson(ppr, tpr); save(dpr, file = "dpr_2x6.rda")
load(file = "dpr_2x6.rda");
expect_equal(z.DranchukPurvisRobinson(ppr, tpr), dpr)
})
test_that("DPR corr matches solution of 4x13 Ppr, Tpr matrix", {
tpr <- c(1.05, 1.1, 1.2, 1.3)
ppr <- c(0.5, 1.0, 1.5, 2, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5)
# dpr <- z.DranchukPurvisRobinson(ppr, tpr); save(dpr, file = "dpr_4x13.rda")
load(file = "dpr_4x13.rda");
expect_equal(z.DranchukPurvisRobinson(ppr, tpr), dpr)
})
test_that("DPR corr matches solution of 16x7 Ppr, Tpr (all) matrix", {
tpr <- getStandingKatzTpr(pprRange = "lp")
ppr <- c(0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5)
# dpr <- z.DranchukPurvisRobinson(ppr, tpr); save(dpr, file = "dpr_16x7.rda")
load(file = "dpr_16x7.rda");
expect_equal(z.DranchukPurvisRobinson(ppr, tpr), dpr)
})
test_that("uni-element vectors of Ppr and Tpr work", {
# print(z.DranchukPurvisRobinson(c(1.0), c(1.5)))
expect_equal(z.DranchukPurvisRobinson(1.0, 1.5), 0.9025952, tolerance = 1e-7)
expect_equal(z.DranchukPurvisRobinson(c(1.0), c(1.5)), 0.9025952, tolerance = 1e-7)
})
test_that("1x2 matrix of Ppr and Tpr work", {
ppr <- c(1.0, 2.0)
tpr <- 1.5
# print(z.DranchukPurvisRobinson(ppr, tpr))
expected <- matrix(c(0.9025952, 0.820633), nrow=1, ncol=2)
rownames(expected) <- tpr
colnames(expected) <- ppr
expect_equal(z.DranchukPurvisRobinson(ppr, tpr), expected, tolerance = 1e-7)
})
|
#-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
##--------------------------------------------------------------------------------------------------#
##' Check two lists. Identical does not work since one can be loaded
##' from the database and the other from a CSV file.
##'
##' @name check.lists
##' @title Compares two lists
##' @param x first list
##' @param y second list
##' @param filename one of "species.csv" or "cultivars.csv"
##' @return true if two list are the same
##' @author Rob Kooper
##'
check.lists <- function(x, y, filename = "species.csv") {
if (nrow(x) != nrow(y)) {
return(FALSE)
}
if(filename == "species.csv"){
cols <- c('id', 'genus', 'species', 'scientificname')
} else if (filename == "cultivars.csv") {
cols <- c('id', 'specie_id', 'species_name', 'cultivar_name')
} else {
return(FALSE)
}
xy_match <- vapply(cols, function(i) identical(as.character(x[[i]]), as.character(y[[i]])), logical(1))
return(all(unlist(xy_match)))
}
##--------------------------------------------------------------------------------------------------#
##' Get trait data from the database for a single pft
##'
##' @name get.trait.data.pft
##' @title Gets trait data from the database
##' @details \code{pft} should be a list containing at least `name` and `outdir`, and optionally `posteriorid` and `constants`. BEWARE: All existing files in \code{outir} will be deleted!
##' @param pft list of settings for the pft whos traits to retrieve. See details
##' @param modeltype type of model that is used, this is used to distinguish between different pfts with the same name.
##' @param dbfiles location where previous results are found
##' @param dbcon database connection
##' @param forceupdate set this to true to force an update, auto will check to see if an update is needed.
##' @param trait.names list of trait names to retrieve
##' @return updated pft with posteriorid
##' @author David LeBauer, Shawn Serbin, Rob Kooper
##' @export
##'
get.trait.data.pft <- function(pft, modeltype, dbfiles, dbcon, trait.names,
forceupdate = FALSE) {
# Create directory if necessary
if (!file.exists(pft$outdir) && !dir.create(pft$outdir, recursive = TRUE)) {
PEcAn.logger::logger.error(paste0("Couldn't create PFT output directory: ", pft$outdir))
}
## Remove old files. Clean up.
old.files <- list.files(path = pft$outdir, full.names = TRUE, include.dirs = FALSE)
file.remove(old.files)
# find appropriate pft
pftres <- (dplyr::tbl(dbcon, "pfts")
%>% dplyr::filter(name == pft$name))
if (!is.null(modeltype)) {
pftres <- (pftres %>% dplyr::semi_join(
(dplyr::tbl(dbcon, "modeltypes") %>% dplyr::filter(name == modeltype)),
by = c("modeltype_id" = "id")))
}
pftres <- (pftres
%>% dplyr::select(.data$id, .data$pft_type)
%>% dplyr::collect())
pfttype <- pftres[['pft_type']]
pftid <- pftres[['id']]
if(nrow(pftres) > 1){
PEcAn.logger::logger.severe(
"Multiple PFTs named", pft$name, "found,",
"with ids", PEcAn.utils::vecpaste(pftres$id), ".",
"Specify modeltype to fix this.")
}
if (is.null(pftid)) {
PEcAn.logger::logger.severe("Could not find pft", pft$name)
return(NA)
}
# get the member species/cultivars, we need to check if anything changed
if (pfttype == "plant") {
pft_member_filename = "species.csv"
pft_members <- PEcAn.DB::query.pft_species(pft$name, modeltype, dbcon)
} else if (pfttype == "cultivar") {
pft_member_filename = "cultivars.csv"
pft_members <- PEcAn.DB::query.pft_cultivars(pft$name, modeltype, dbcon)
} else {
PEcAn.logger::logger.severe("Unknown pft type! Expected 'plant' or 'cultivar', got", pfttype)
}
# get the priors
prior.distns <- PEcAn.DB::query.priors(pft = pftid, trstr = PEcAn.utils::vecpaste(trait.names), out = pft$outdir, con = dbcon)
prior.distns <- prior.distns[which(!rownames(prior.distns) %in% names(pft$constants)),]
traits <- rownames(prior.distns)
# get the trait data (don't bother sampling derived traits until after update check)
trait.data.check <- PEcAn.DB::query.traits(ids = pft_members$id, priors = traits, con = dbcon, update.check.only = TRUE, ids_are_cultivars = (pfttype=="cultivar"))
traits <- names(trait.data.check)
# Set forceupdate FALSE if it's a string (backwards compatible with 'AUTO' flag used in the past)
if (!is.logical(forceupdate)) {
forceupdate <- FALSE
}
# check to see if we need to update
if (!forceupdate) {
if (is.null(pft$posteriorid)) {
pft$posteriorid <- db.query(
query = paste0(
"SELECT id FROM posteriors WHERE pft_id=", pftid,
" ORDER BY created_at DESC LIMIT 1"
),
con = dbcon
)[['id']]
}
if (!is.null(pft$posteriorid)) {
files <- dbfile.check(type = 'Posterior', container.id = pft$posteriorid, con = dbcon)
ids <- match(c('trait.data.Rdata', 'prior.distns.Rdata', pft_member_filename), files$file_name)
if (!any(is.na(ids))) {
foundallfiles <- TRUE
for(id in ids) {
PEcAn.logger::logger.info(files$file_path[[id]], files$file_name[[id]])
if (!file.exists(file.path(files$file_path[[id]], files$file_name[[id]]))) {
foundallfiles <- FALSE
PEcAn.logger::logger.error("can not find posterior file: ", file.path(files$file_path[[id]], files$file_name[[id]]))
} else if (files$file_name[[id]] == pft_member_filename) {
PEcAn.logger::logger.debug("Checking if pft membership has changed")
testme <- utils::read.csv(file = file.path(files$file_path[[id]], files$file_name[[id]]))
if (!check.lists(pft_members, testme, pft_member_filename)) {
foundallfiles <- FALSE
PEcAn.logger::logger.error("pft membership has changed: ", file.path(files$file_path[[id]], files$file_name[[id]]))
}
remove(testme)
} else if (files$file_name[[id]] == "prior.distns.Rdata") {
PEcAn.logger::logger.debug("Checking if priors have changed")
prior.distns.tmp <- prior.distns
if(file.exists(files$file_path[[id]], files$file_name[[id]])){
load(file.path(files$file_path[[id]], files$file_name[[id]]))#HERE IS THE PROBLEM
}else{
PEcAn.logger::logger.debug("Prior file does not exist. If empty (zero-byte) input file error is recived, set forceupdate to TRUE for one run.")
}
testme <- prior.distns
prior.distns <- prior.distns.tmp
if (!identical(prior.distns, testme)) {
foundallfiles <- FALSE
PEcAn.logger::logger.error("priors have changed: ", file.path(files$file_path[[id]], files$file_name[[id]]))
}
remove(testme)
} else if (files$file_name[[id]] == "trait.data.Rdata") {
PEcAn.logger::logger.debug("Checking if trait data has changed")
load(file.path(files$file_path[[id]], files$file_name[[id]]))
# For trait data including converted data, only check unconverted
converted.stats2na <- function(x) {
if (all(c("mean", "stat", "mean_unconverted", "stat_unconverted") %in% names(x)))
x[,c("mean","stat")] <- NA
return(x)
}
trait.data <- lapply(trait.data, converted.stats2na)
trait.data.check <- lapply(trait.data.check, converted.stats2na)
if (!identical(trait.data.check, trait.data)) {
foundallfiles <- FALSE
PEcAn.logger::logger.error("trait data has changed: ", file.path(files$file_path[[id]], files$file_name[[id]]))
}
remove(trait.data, trait.data.check)
}
}
if (foundallfiles) {
PEcAn.logger::logger.info("Reusing existing files from posterior", pft$posteriorid, "for", pft$name)
for (id in seq_len(nrow(files))) {
file.copy(from = file.path(files[[id, 'file_path']], files[[id, 'file_name']]),
to = file.path(pft$outdir, files[[id, 'file_name']]))
}
# May need to symlink the generic post.distns.Rdata to a specific post.distns.*.Rdata file.
if (length(dir(pft$outdir, "post.distns.Rdata")) == 0) {
all.files <- dir(pft$outdir)
post.distn.file <- all.files[grep("post.distns.*.Rdata", all.files)]
if (length(post.distn.file) > 1)
stop("get.trait.data.pft() doesn't know how to handle multiple post.distns.*.Rdata files")
else if (length(post.distn.file) == 1) {
# Found exactly one post.distns.*.Rdata file. Use it.
file.symlink(from = file.path(pft$outdir, post.distn.file),
to = file.path(pft$outdir, 'post.distns.Rdata')
)
}
}
return(pft)
}
}
}
}
# get the trait data (including sampling of derived traits, if any)
trait.data <- query.traits(pft_members$id, traits, con = dbcon, update.check.only = FALSE, ids_are_cultivars=(pfttype=="cultivar"))
traits <- names(trait.data)
# get list of existing files so they get ignored saving
old.files <- list.files(path = pft$outdir)
# create a new posterior
now <- format(x = Sys.time(), format = "%Y-%m-%d %H:%M:%S")
db.query(query = paste0("INSERT INTO posteriors (pft_id, created_at, updated_at) VALUES (", pftid, ", '", now, "', '", now, "')"),
con = dbcon)
pft$posteriorid <- db.query(query = paste0("SELECT id FROM posteriors WHERE pft_id=", pftid, " AND created_at='", now, "'"),
con = dbcon)[['id']]
# create path where to store files
pathname <- file.path(dbfiles, "posterior", pft$posteriorid)
dir.create(pathname, showWarnings = FALSE, recursive = TRUE)
## 1. get species/cultivar list based on pft
utils::write.csv(pft_members, file.path(pft$outdir, pft_member_filename), row.names = FALSE)
## save priors
save(prior.distns, file = file.path(pft$outdir, "prior.distns.Rdata"))
utils::write.csv(prior.distns,
file = file.path(pft$outdir, "prior.distns.csv"), row.names = TRUE)
## 3. display info to the console
PEcAn.logger::logger.info('Summary of Prior distributions for: ', pft$name)
PEcAn.logger::logger.info(colnames(prior.distns))
apply(X = cbind(rownames(prior.distns), prior.distns), MARGIN = 1, FUN = PEcAn.logger::logger.info)
## traits = variables with prior distributions for this pft
trait.data.file <- file.path(pft$outdir, "trait.data.Rdata")
save(trait.data, file = trait.data.file)
utils::write.csv(dplyr::bind_rows(trait.data),
file = file.path(pft$outdir, "trait.data.csv"), row.names = FALSE)
PEcAn.logger::logger.info("number of observations per trait for", pft$name)
for (t in names(trait.data)) {
PEcAn.logger::logger.info(nrow(trait.data[[t]]), "observations of", t)
}
### save and store in database all results except those that were there already
for (file in list.files(path = pft$outdir)) {
if (file %in% old.files) {
next
}
filename <- file.path(pathname, file)
file.copy(file.path(pft$outdir, file), filename)
dbfile.insert(in.path = pathname, in.prefix = file, type = 'Posterior', id = pft$posteriorid, con = dbcon)
}
return(pft)
}
##--------------------------------------------------------------------------------------------------#
##' Get trait data from the database.
##'
##' This will use the following items from setings:
##' - settings$pfts
##' - settings$model$type
##' - settings$database$bety
##' - settings$database$dbfiles
##' - settings$meta.analysis$update
##' @name get.trait.data
##' @title Gets trait data from the database
##' @param pfts the list of pfts to get traits for
##' @param modeltype type of model that is used, this is is used to distinguis between different pfts with the same name.
##' @param dbfiles location where previous results are found
##' @param database database connection parameters
##' @param forceupdate set this to true to force an update, false to check to see if an update is needed.
##' @param trait.names list of traits to query. If TRUE, uses trait.dictionary
##' @return list of pfts with update posteriorids
##' @author David LeBauer, Shawn Serbin
##' @export
##'
get.trait.data <- function(pfts, modeltype, dbfiles, database, forceupdate, trait.names=NULL) {
if (!is.list(pfts)) {
PEcAn.logger::logger.severe('pfts must be a list')
}
# Check that all PFTs have associated outdir entries
pft_outdirs <- lapply(pfts, '[[', 'outdir')
if (any(sapply(pft_outdirs, is.null))) {
PEcAn.logger::logger.severe('At least one pft in settings is missing its "outdir"')
}
##---------------- Load trait dictionary --------------#
if (is.logical(trait.names)) {
if (trait.names) {
trait.names <- as.character(PEcAn.utils::trait.dictionary$id)
}
}
# process all pfts
dbcon <- db.open(database)
on.exit(db.close(dbcon))
result <- lapply(pfts, get.trait.data.pft,
modeltype = modeltype,
dbfiles = dbfiles,
dbcon = dbcon,
forceupdate = forceupdate,
trait.names = trait.names)
invisible(result)
}
####################################################################################################
### EOF. End of R script file.
####################################################################################################
|
/base/db/R/get.trait.data.R
|
permissive
|
yan130/pecan
|
R
| false
| false
| 14,037
|
r
|
#-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
##--------------------------------------------------------------------------------------------------#
##' Check two lists. Identical does not work since one can be loaded
##' from the database and the other from a CSV file.
##'
##' @name check.lists
##' @title Compares two lists
##' @param x first list
##' @param y second list
##' @param filename one of "species.csv" or "cultivars.csv"
##' @return true if two list are the same
##' @author Rob Kooper
##'
check.lists <- function(x, y, filename = "species.csv") {
if (nrow(x) != nrow(y)) {
return(FALSE)
}
if(filename == "species.csv"){
cols <- c('id', 'genus', 'species', 'scientificname')
} else if (filename == "cultivars.csv") {
cols <- c('id', 'specie_id', 'species_name', 'cultivar_name')
} else {
return(FALSE)
}
xy_match <- vapply(cols, function(i) identical(as.character(x[[i]]), as.character(y[[i]])), logical(1))
return(all(unlist(xy_match)))
}
##--------------------------------------------------------------------------------------------------#
##' Get trait data from the database for a single pft
##'
##' @name get.trait.data.pft
##' @title Gets trait data from the database
##' @details \code{pft} should be a list containing at least `name` and `outdir`, and optionally `posteriorid` and `constants`. BEWARE: All existing files in \code{outir} will be deleted!
##' @param pft list of settings for the pft whos traits to retrieve. See details
##' @param modeltype type of model that is used, this is used to distinguish between different pfts with the same name.
##' @param dbfiles location where previous results are found
##' @param dbcon database connection
##' @param forceupdate set this to true to force an update, auto will check to see if an update is needed.
##' @param trait.names list of trait names to retrieve
##' @return updated pft with posteriorid
##' @author David LeBauer, Shawn Serbin, Rob Kooper
##' @export
##'
get.trait.data.pft <- function(pft, modeltype, dbfiles, dbcon, trait.names,
forceupdate = FALSE) {
# Create directory if necessary
if (!file.exists(pft$outdir) && !dir.create(pft$outdir, recursive = TRUE)) {
PEcAn.logger::logger.error(paste0("Couldn't create PFT output directory: ", pft$outdir))
}
## Remove old files. Clean up.
old.files <- list.files(path = pft$outdir, full.names = TRUE, include.dirs = FALSE)
file.remove(old.files)
# find appropriate pft
pftres <- (dplyr::tbl(dbcon, "pfts")
%>% dplyr::filter(name == pft$name))
if (!is.null(modeltype)) {
pftres <- (pftres %>% dplyr::semi_join(
(dplyr::tbl(dbcon, "modeltypes") %>% dplyr::filter(name == modeltype)),
by = c("modeltype_id" = "id")))
}
pftres <- (pftres
%>% dplyr::select(.data$id, .data$pft_type)
%>% dplyr::collect())
pfttype <- pftres[['pft_type']]
pftid <- pftres[['id']]
if(nrow(pftres) > 1){
PEcAn.logger::logger.severe(
"Multiple PFTs named", pft$name, "found,",
"with ids", PEcAn.utils::vecpaste(pftres$id), ".",
"Specify modeltype to fix this.")
}
if (is.null(pftid)) {
PEcAn.logger::logger.severe("Could not find pft", pft$name)
return(NA)
}
# get the member species/cultivars, we need to check if anything changed
if (pfttype == "plant") {
pft_member_filename = "species.csv"
pft_members <- PEcAn.DB::query.pft_species(pft$name, modeltype, dbcon)
} else if (pfttype == "cultivar") {
pft_member_filename = "cultivars.csv"
pft_members <- PEcAn.DB::query.pft_cultivars(pft$name, modeltype, dbcon)
} else {
PEcAn.logger::logger.severe("Unknown pft type! Expected 'plant' or 'cultivar', got", pfttype)
}
# get the priors
prior.distns <- PEcAn.DB::query.priors(pft = pftid, trstr = PEcAn.utils::vecpaste(trait.names), out = pft$outdir, con = dbcon)
prior.distns <- prior.distns[which(!rownames(prior.distns) %in% names(pft$constants)),]
traits <- rownames(prior.distns)
# get the trait data (don't bother sampling derived traits until after update check)
trait.data.check <- PEcAn.DB::query.traits(ids = pft_members$id, priors = traits, con = dbcon, update.check.only = TRUE, ids_are_cultivars = (pfttype=="cultivar"))
traits <- names(trait.data.check)
# Set forceupdate FALSE if it's a string (backwards compatible with 'AUTO' flag used in the past)
if (!is.logical(forceupdate)) {
forceupdate <- FALSE
}
# check to see if we need to update
if (!forceupdate) {
if (is.null(pft$posteriorid)) {
pft$posteriorid <- db.query(
query = paste0(
"SELECT id FROM posteriors WHERE pft_id=", pftid,
" ORDER BY created_at DESC LIMIT 1"
),
con = dbcon
)[['id']]
}
if (!is.null(pft$posteriorid)) {
files <- dbfile.check(type = 'Posterior', container.id = pft$posteriorid, con = dbcon)
ids <- match(c('trait.data.Rdata', 'prior.distns.Rdata', pft_member_filename), files$file_name)
if (!any(is.na(ids))) {
foundallfiles <- TRUE
for(id in ids) {
PEcAn.logger::logger.info(files$file_path[[id]], files$file_name[[id]])
if (!file.exists(file.path(files$file_path[[id]], files$file_name[[id]]))) {
foundallfiles <- FALSE
PEcAn.logger::logger.error("can not find posterior file: ", file.path(files$file_path[[id]], files$file_name[[id]]))
} else if (files$file_name[[id]] == pft_member_filename) {
PEcAn.logger::logger.debug("Checking if pft membership has changed")
testme <- utils::read.csv(file = file.path(files$file_path[[id]], files$file_name[[id]]))
if (!check.lists(pft_members, testme, pft_member_filename)) {
foundallfiles <- FALSE
PEcAn.logger::logger.error("pft membership has changed: ", file.path(files$file_path[[id]], files$file_name[[id]]))
}
remove(testme)
} else if (files$file_name[[id]] == "prior.distns.Rdata") {
PEcAn.logger::logger.debug("Checking if priors have changed")
prior.distns.tmp <- prior.distns
if(file.exists(files$file_path[[id]], files$file_name[[id]])){
load(file.path(files$file_path[[id]], files$file_name[[id]]))#HERE IS THE PROBLEM
}else{
PEcAn.logger::logger.debug("Prior file does not exist. If empty (zero-byte) input file error is recived, set forceupdate to TRUE for one run.")
}
testme <- prior.distns
prior.distns <- prior.distns.tmp
if (!identical(prior.distns, testme)) {
foundallfiles <- FALSE
PEcAn.logger::logger.error("priors have changed: ", file.path(files$file_path[[id]], files$file_name[[id]]))
}
remove(testme)
} else if (files$file_name[[id]] == "trait.data.Rdata") {
PEcAn.logger::logger.debug("Checking if trait data has changed")
load(file.path(files$file_path[[id]], files$file_name[[id]]))
# For trait data including converted data, only check unconverted
converted.stats2na <- function(x) {
if (all(c("mean", "stat", "mean_unconverted", "stat_unconverted") %in% names(x)))
x[,c("mean","stat")] <- NA
return(x)
}
trait.data <- lapply(trait.data, converted.stats2na)
trait.data.check <- lapply(trait.data.check, converted.stats2na)
if (!identical(trait.data.check, trait.data)) {
foundallfiles <- FALSE
PEcAn.logger::logger.error("trait data has changed: ", file.path(files$file_path[[id]], files$file_name[[id]]))
}
remove(trait.data, trait.data.check)
}
}
if (foundallfiles) {
PEcAn.logger::logger.info("Reusing existing files from posterior", pft$posteriorid, "for", pft$name)
for (id in seq_len(nrow(files))) {
file.copy(from = file.path(files[[id, 'file_path']], files[[id, 'file_name']]),
to = file.path(pft$outdir, files[[id, 'file_name']]))
}
# May need to symlink the generic post.distns.Rdata to a specific post.distns.*.Rdata file.
if (length(dir(pft$outdir, "post.distns.Rdata")) == 0) {
all.files <- dir(pft$outdir)
post.distn.file <- all.files[grep("post.distns.*.Rdata", all.files)]
if (length(post.distn.file) > 1)
stop("get.trait.data.pft() doesn't know how to handle multiple post.distns.*.Rdata files")
else if (length(post.distn.file) == 1) {
# Found exactly one post.distns.*.Rdata file. Use it.
file.symlink(from = file.path(pft$outdir, post.distn.file),
to = file.path(pft$outdir, 'post.distns.Rdata')
)
}
}
return(pft)
}
}
}
}
# get the trait data (including sampling of derived traits, if any)
trait.data <- query.traits(pft_members$id, traits, con = dbcon, update.check.only = FALSE, ids_are_cultivars=(pfttype=="cultivar"))
traits <- names(trait.data)
# get list of existing files so they get ignored saving
old.files <- list.files(path = pft$outdir)
# create a new posterior
now <- format(x = Sys.time(), format = "%Y-%m-%d %H:%M:%S")
db.query(query = paste0("INSERT INTO posteriors (pft_id, created_at, updated_at) VALUES (", pftid, ", '", now, "', '", now, "')"),
con = dbcon)
pft$posteriorid <- db.query(query = paste0("SELECT id FROM posteriors WHERE pft_id=", pftid, " AND created_at='", now, "'"),
con = dbcon)[['id']]
# create path where to store files
pathname <- file.path(dbfiles, "posterior", pft$posteriorid)
dir.create(pathname, showWarnings = FALSE, recursive = TRUE)
## 1. get species/cultivar list based on pft
utils::write.csv(pft_members, file.path(pft$outdir, pft_member_filename), row.names = FALSE)
## save priors
save(prior.distns, file = file.path(pft$outdir, "prior.distns.Rdata"))
utils::write.csv(prior.distns,
file = file.path(pft$outdir, "prior.distns.csv"), row.names = TRUE)
## 3. display info to the console
PEcAn.logger::logger.info('Summary of Prior distributions for: ', pft$name)
PEcAn.logger::logger.info(colnames(prior.distns))
apply(X = cbind(rownames(prior.distns), prior.distns), MARGIN = 1, FUN = PEcAn.logger::logger.info)
## traits = variables with prior distributions for this pft
trait.data.file <- file.path(pft$outdir, "trait.data.Rdata")
save(trait.data, file = trait.data.file)
utils::write.csv(dplyr::bind_rows(trait.data),
file = file.path(pft$outdir, "trait.data.csv"), row.names = FALSE)
PEcAn.logger::logger.info("number of observations per trait for", pft$name)
for (t in names(trait.data)) {
PEcAn.logger::logger.info(nrow(trait.data[[t]]), "observations of", t)
}
### save and store in database all results except those that were there already
for (file in list.files(path = pft$outdir)) {
if (file %in% old.files) {
next
}
filename <- file.path(pathname, file)
file.copy(file.path(pft$outdir, file), filename)
dbfile.insert(in.path = pathname, in.prefix = file, type = 'Posterior', id = pft$posteriorid, con = dbcon)
}
return(pft)
}
##--------------------------------------------------------------------------------------------------#
##' Get trait data from the database.
##'
##' This will use the following items from setings:
##' - settings$pfts
##' - settings$model$type
##' - settings$database$bety
##' - settings$database$dbfiles
##' - settings$meta.analysis$update
##' @name get.trait.data
##' @title Gets trait data from the database
##' @param pfts the list of pfts to get traits for
##' @param modeltype type of model that is used, this is is used to distinguis between different pfts with the same name.
##' @param dbfiles location where previous results are found
##' @param database database connection parameters
##' @param forceupdate set this to true to force an update, false to check to see if an update is needed.
##' @param trait.names list of traits to query. If TRUE, uses trait.dictionary
##' @return list of pfts with update posteriorids
##' @author David LeBauer, Shawn Serbin
##' @export
##'
get.trait.data <- function(pfts, modeltype, dbfiles, database, forceupdate, trait.names=NULL) {
if (!is.list(pfts)) {
PEcAn.logger::logger.severe('pfts must be a list')
}
# Check that all PFTs have associated outdir entries
pft_outdirs <- lapply(pfts, '[[', 'outdir')
if (any(sapply(pft_outdirs, is.null))) {
PEcAn.logger::logger.severe('At least one pft in settings is missing its "outdir"')
}
##---------------- Load trait dictionary --------------#
if (is.logical(trait.names)) {
if (trait.names) {
trait.names <- as.character(PEcAn.utils::trait.dictionary$id)
}
}
# process all pfts
dbcon <- db.open(database)
on.exit(db.close(dbcon))
result <- lapply(pfts, get.trait.data.pft,
modeltype = modeltype,
dbfiles = dbfiles,
dbcon = dbcon,
forceupdate = forceupdate,
trait.names = trait.names)
invisible(result)
}
####################################################################################################
### EOF. End of R script file.
####################################################################################################
|
#' Title computing all within and between facet distances between quantile categories given a data
#'
#' @param .data data for which mmpd needs to be calculated
#' @param gran_x granularities mapped across x levels
#' @param gran_facet granularities mapped across facets
#' @param response univarite response variable
#' @param quantile_prob probabilities
#' @param dist_ordered if categories are ordered
#' @param lambda value of tuning parameter for computing weighted pairwise distances
#' @return the raw weighted pairwise within-facet and between-facet distances
#'
#' @examples
#' library(tidyverse)
#' library(gravitas)
#' library(parallel)
#' sm <- smart_meter10 %>%
#' filter(customer_id %in% c("10017936"))
#' gran_x <- "month_year"
#' gran_facet <- "wknd_wday"
#' v <- compute_pairwise_dist(sm, gran_x, gran_facet,
#' response = general_supply_kwh
#' )
#' # month of the year not working in this setup
#' @export compute_pairwise_dist
compute_pairwise_dist <- function(.data,
gran_x = NULL,
gran_facet = NA,
response = NULL,
quantile_prob =
seq(0.01, 0.99, 0.01),
dist_ordered = TRUE,
lambda = 0.67) {
if(!is.na(gran_facet))
{
lambda_t = lambda
if (!((gran_x %in% names(.data) &
(gran_facet %in% names(.data)))))
.data <- .data %>%
gravitas::create_gran(gran_x) %>%
gravitas::create_gran(gran_facet) %>%
dplyr::rename("id_facet" = !!gran_facet) %>%
dplyr::rename("id_x" = !!gran_x)
else{
.data <- .data %>%
dplyr::rename("id_facet" = !!gran_facet) %>%
dplyr::rename("id_x" = !!gran_x)
}
}
else{
lambda_t = 1
if (!((gran_x %in% names(.data) )))
.data <- .data %>%
gravitas::create_gran(gran_x) %>%
dplyr::rename("id_x" = !!gran_x) %>%
dplyr::mutate(id_facet = 1)
else{
.data <- .data %>%
dplyr::rename("id_facet" = !!gran_facet) %>%
dplyr::rename("id_x" = !!gran_x)
}
}
all_dist_data <- suppressMessages(
.data %>%
tibble::as_tibble() %>%
dplyr::select(id_x, id_facet, {{ response }}) %>%
dplyr::rename("sim_data" = {{ response }}) %>%
# mutate(sim_data = scale(sim_data)) %>%
compute_quantiles(
quantile_prob =
quantile_prob
) %>%
distance_all_pairwise(
quantile_prob =
quantile_prob,
dist_ordered = dist_ordered,
lambda = lambda_t
)
)
all_dist_data
}
|
/R/compute_pairwise_dist.R
|
no_license
|
Sayani07/hakear
|
R
| false
| false
| 2,702
|
r
|
#' Title computing all within and between facet distances between quantile categories given a data
#'
#' @param .data data for which mmpd needs to be calculated
#' @param gran_x granularities mapped across x levels
#' @param gran_facet granularities mapped across facets
#' @param response univarite response variable
#' @param quantile_prob probabilities
#' @param dist_ordered if categories are ordered
#' @param lambda value of tuning parameter for computing weighted pairwise distances
#' @return the raw weighted pairwise within-facet and between-facet distances
#'
#' @examples
#' library(tidyverse)
#' library(gravitas)
#' library(parallel)
#' sm <- smart_meter10 %>%
#' filter(customer_id %in% c("10017936"))
#' gran_x <- "month_year"
#' gran_facet <- "wknd_wday"
#' v <- compute_pairwise_dist(sm, gran_x, gran_facet,
#' response = general_supply_kwh
#' )
#' # month of the year not working in this setup
#' @export compute_pairwise_dist
compute_pairwise_dist <- function(.data,
gran_x = NULL,
gran_facet = NA,
response = NULL,
quantile_prob =
seq(0.01, 0.99, 0.01),
dist_ordered = TRUE,
lambda = 0.67) {
if(!is.na(gran_facet))
{
lambda_t = lambda
if (!((gran_x %in% names(.data) &
(gran_facet %in% names(.data)))))
.data <- .data %>%
gravitas::create_gran(gran_x) %>%
gravitas::create_gran(gran_facet) %>%
dplyr::rename("id_facet" = !!gran_facet) %>%
dplyr::rename("id_x" = !!gran_x)
else{
.data <- .data %>%
dplyr::rename("id_facet" = !!gran_facet) %>%
dplyr::rename("id_x" = !!gran_x)
}
}
else{
lambda_t = 1
if (!((gran_x %in% names(.data) )))
.data <- .data %>%
gravitas::create_gran(gran_x) %>%
dplyr::rename("id_x" = !!gran_x) %>%
dplyr::mutate(id_facet = 1)
else{
.data <- .data %>%
dplyr::rename("id_facet" = !!gran_facet) %>%
dplyr::rename("id_x" = !!gran_x)
}
}
all_dist_data <- suppressMessages(
.data %>%
tibble::as_tibble() %>%
dplyr::select(id_x, id_facet, {{ response }}) %>%
dplyr::rename("sim_data" = {{ response }}) %>%
# mutate(sim_data = scale(sim_data)) %>%
compute_quantiles(
quantile_prob =
quantile_prob
) %>%
distance_all_pairwise(
quantile_prob =
quantile_prob,
dist_ordered = dist_ordered,
lambda = lambda_t
)
)
all_dist_data
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22808249671287e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615782771-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 329
|
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22808249671287e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
testthat::context("testing influx_query")
# setup influx connection
testthat::test_that("connection", {
# only local tests
testthat::skip_on_cran()
testthat::skip_on_travis()
con <<- influx_connection(group = "admin")
testthat::expect_is(object = con, class = "list")
})
testthat::test_that("single query no chunking", {
# only local tests
testthat::skip_on_cran()
testthat::skip_on_travis()
data1a <- influx_query(con = con,
chunked = FALSE,
db = "stbmod",
timestamp_format = "n",
query = "select value from MengeNEZ where Ort='Flachbau' group by * limit 10",
return_xts = FALSE)
data1b <- influx_select(con, "stbmod",
field_keys = "value",
where = "Ort ='Flachbau'",
measurement = "MengeNEZ",
group_by = "*",
limit = 10)
data1c <- influx_select(con, "stbmod",
field_keys = "value",
where = "Ort ='Flachbau'",
measurement = "MengeNEZ",
limit = 10,
simplifyList = TRUE)
data1d <- influx_select(con, "stbmod",
field_keys = "value",
where = "Ort ='Flachbau'",
measurement = "MengeNEZ",
limit = 10,
simplifyList = FALSE)
testthat::expect_is(data1a, class = "list")
testthat::expect_is(data1b, class = "list")
testthat::expect_equal(data1c, data1d[[1]][[1]])
})
testthat::test_that("multiple query no chunking", {
# only local tests
testthat::skip_on_cran()
testthat::skip_on_travis()
data2 <- influx_query(con = con,
chunked = FALSE,
db = "stbmod",
timestamp_format = "n",
query = "select value from MengeNEZ where Ort='Flachbau' group by * limit 10;
select value from Durchfluss where Ort='Flachbau' limit 10",
return_xts = FALSE)
testthat::expect_is(object = data2, class = "list")
})
testthat::test_that("single query with chunking", {
# only local tests
testthat::skip_on_cran()
testthat::skip_on_travis()
data3 <- influx_query(con = con,
chunked = 10,
db = "stbmod",
query = "select value from MengeNEZ, Durchfluss where Ort='Flachbau' group by * limit 100",
return_xts = FALSE)
testthat::expect_is(object = data3, class = "list")
})
testthat::test_that("multiple query with chunking", {
# only local tests
testthat::skip_on_cran()
testthat::skip_on_travis()
data4 <- influx_query(con = con,
chunked = 10,
db = "stbmod",
query = "select value from MengeNEZ where Ort='Flachbau' group by * limit 100;
select value from Durchfluss where Ort='Flachbau' limit 100",
return_xts = FALSE)
testthat::expect_is(object = data4, class = "list")
})
testthat::test_that("multiple query with chunking and xts result", {
# only local tests
testthat::skip_on_cran()
testthat::skip_on_travis()
data5 <- influx_query(con = con,
chunked = 10,
db = "stbmod",
query = "select value from MengeNEZ where Ort='Flachbau' group by * limit 100;
select value from Durchfluss where Ort='Flachbau' limit 100",
return_xts = TRUE)
testthat::expect_is(object = data5, class = "list")
})
|
/tests/testthat/test_query.R
|
no_license
|
vspinu/influxdbr
|
R
| false
| false
| 3,963
|
r
|
testthat::context("testing influx_query")
# setup influx connection
testthat::test_that("connection", {
# only local tests
testthat::skip_on_cran()
testthat::skip_on_travis()
con <<- influx_connection(group = "admin")
testthat::expect_is(object = con, class = "list")
})
testthat::test_that("single query no chunking", {
# only local tests
testthat::skip_on_cran()
testthat::skip_on_travis()
data1a <- influx_query(con = con,
chunked = FALSE,
db = "stbmod",
timestamp_format = "n",
query = "select value from MengeNEZ where Ort='Flachbau' group by * limit 10",
return_xts = FALSE)
data1b <- influx_select(con, "stbmod",
field_keys = "value",
where = "Ort ='Flachbau'",
measurement = "MengeNEZ",
group_by = "*",
limit = 10)
data1c <- influx_select(con, "stbmod",
field_keys = "value",
where = "Ort ='Flachbau'",
measurement = "MengeNEZ",
limit = 10,
simplifyList = TRUE)
data1d <- influx_select(con, "stbmod",
field_keys = "value",
where = "Ort ='Flachbau'",
measurement = "MengeNEZ",
limit = 10,
simplifyList = FALSE)
testthat::expect_is(data1a, class = "list")
testthat::expect_is(data1b, class = "list")
testthat::expect_equal(data1c, data1d[[1]][[1]])
})
testthat::test_that("multiple query no chunking", {
# only local tests
testthat::skip_on_cran()
testthat::skip_on_travis()
data2 <- influx_query(con = con,
chunked = FALSE,
db = "stbmod",
timestamp_format = "n",
query = "select value from MengeNEZ where Ort='Flachbau' group by * limit 10;
select value from Durchfluss where Ort='Flachbau' limit 10",
return_xts = FALSE)
testthat::expect_is(object = data2, class = "list")
})
testthat::test_that("single query with chunking", {
# only local tests
testthat::skip_on_cran()
testthat::skip_on_travis()
data3 <- influx_query(con = con,
chunked = 10,
db = "stbmod",
query = "select value from MengeNEZ, Durchfluss where Ort='Flachbau' group by * limit 100",
return_xts = FALSE)
testthat::expect_is(object = data3, class = "list")
})
testthat::test_that("multiple query with chunking", {
# only local tests
testthat::skip_on_cran()
testthat::skip_on_travis()
data4 <- influx_query(con = con,
chunked = 10,
db = "stbmod",
query = "select value from MengeNEZ where Ort='Flachbau' group by * limit 100;
select value from Durchfluss where Ort='Flachbau' limit 100",
return_xts = FALSE)
testthat::expect_is(object = data4, class = "list")
})
testthat::test_that("multiple query with chunking and xts result", {
# only local tests
testthat::skip_on_cran()
testthat::skip_on_travis()
data5 <- influx_query(con = con,
chunked = 10,
db = "stbmod",
query = "select value from MengeNEZ where Ort='Flachbau' group by * limit 100;
select value from Durchfluss where Ort='Flachbau' limit 100",
return_xts = TRUE)
testthat::expect_is(object = data5, class = "list")
})
|
UNITEST_kmc <- function(){
x <- c( 1, 1.5, 2, 3, 4.2, 5.0, 6.1, 5.3, 4.5, 0.9, 2.1, 4.3) # positive time
d <- c( 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1) # status censored/uncensored
#### compute e-value and its adjustment ####
g=list( f=function(x) { x-3.7} )
result = kmc.solve( x,d,g)
return(sprintf('%0.5f', result["loglik.null"]));
}
UNITEST_kmcbj <- function(){
library(survival)
stanford5 <- stanford2[!is.na(stanford2$t5), ]
y=log10(stanford5$time)
d <- stanford5$status
oy = order(y,-d)
d=d[oy]
y=y[oy]
x=cbind(1,stanford5$age)[oy,]
beta0 = c(3.2, -0.015)
result = kmc.bjtest(y, d, x=x, beta = beta0,
init.st="naive")[["-2LLR"]]
return(sprintf('%0.5f', result));
}
test_that("kmc works", {
expect_equal(UNITEST_kmc(), "-17.51983")
expect_equal(UNITEST_kmcbj(), "0.20148")
})
|
/tests/testthat/test-kmc.R
|
no_license
|
yfyang86/kmc
|
R
| false
| false
| 870
|
r
|
UNITEST_kmc <- function(){
x <- c( 1, 1.5, 2, 3, 4.2, 5.0, 6.1, 5.3, 4.5, 0.9, 2.1, 4.3) # positive time
d <- c( 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1) # status censored/uncensored
#### compute e-value and its adjustment ####
g=list( f=function(x) { x-3.7} )
result = kmc.solve( x,d,g)
return(sprintf('%0.5f', result["loglik.null"]));
}
UNITEST_kmcbj <- function(){
library(survival)
stanford5 <- stanford2[!is.na(stanford2$t5), ]
y=log10(stanford5$time)
d <- stanford5$status
oy = order(y,-d)
d=d[oy]
y=y[oy]
x=cbind(1,stanford5$age)[oy,]
beta0 = c(3.2, -0.015)
result = kmc.bjtest(y, d, x=x, beta = beta0,
init.st="naive")[["-2LLR"]]
return(sprintf('%0.5f', result));
}
test_that("kmc works", {
expect_equal(UNITEST_kmc(), "-17.51983")
expect_equal(UNITEST_kmcbj(), "0.20148")
})
|
#' @title Compute specificity
#' @description Calculates specificity of tools
#' @details Compares tool adjacency matrix output to Klemm-Eguiluz adjacency matrices
#'
#' @param imatrix true positive matrix, e.g. Klemm-Eguiluz adjacency matrix
#' @param outmatrix matrix with values, e.g. Spearman correlation or other tool output
#' @param absolute Calculates matches for absolute values if true instead of taking sign into account
#'
#' @return specificity score
#' @export
computeSpecificity = function(imatrix, outmatrix, absolute = FALSE){
outmatrix = t(outmatrix)
neglist = which(imatrix == 0, arr.ind = T)
n = length(neglist[,1])
tn = 0
for (i in 1:length(neglist[,1])){
coords = neglist[i,]
if (!absolute){
if (imatrix[coords[1],coords[2]] == outmatrix[coords[1],coords[2]]){
tn = tn + 1
}
}
if (absolute){
if (abs(imatrix[coords[1],coords[2]]) == abs(outmatrix[coords[1],coords[2]])){
tn = tn + 1
}
}
}
spc = (tn/n)
return(spc)
}
|
/R/computeSpecificity.R
|
no_license
|
ramellose/NetworkUtils
|
R
| false
| false
| 1,014
|
r
|
#' @title Compute specificity
#' @description Calculates specificity of tools
#' @details Compares tool adjacency matrix output to Klemm-Eguiluz adjacency matrices
#'
#' @param imatrix true positive matrix, e.g. Klemm-Eguiluz adjacency matrix
#' @param outmatrix matrix with values, e.g. Spearman correlation or other tool output
#' @param absolute Calculates matches for absolute values if true instead of taking sign into account
#'
#' @return specificity score
#' @export
computeSpecificity = function(imatrix, outmatrix, absolute = FALSE){
outmatrix = t(outmatrix)
neglist = which(imatrix == 0, arr.ind = T)
n = length(neglist[,1])
tn = 0
for (i in 1:length(neglist[,1])){
coords = neglist[i,]
if (!absolute){
if (imatrix[coords[1],coords[2]] == outmatrix[coords[1],coords[2]]){
tn = tn + 1
}
}
if (absolute){
if (abs(imatrix[coords[1],coords[2]]) == abs(outmatrix[coords[1],coords[2]])){
tn = tn + 1
}
}
}
spc = (tn/n)
return(spc)
}
|
library(pROC)
data(aSAH)
context("roc.test")
test_that("roc.test works", {
t1 <<- roc.test(r.wfns, r.s100b)
t2 <<- roc.test(r.wfns, r.ndka)
t3 <<- roc.test(r.ndka, r.s100b)
expect_is(t1, "htest")
expect_is(t2, "htest")
expect_is(t3, "htest")
})
test_that("roc.test statistic and p are as expected with defaults", {
expect_equal(t1$statistic, c(Z=2.20898359144091))
expect_equal(t1$p.value, 0.0271757822291882)
expect_match(t1$method, "DeLong")
expect_match(t1$method, "correlated")
expect_identical(t1$alternative, "two.sided")
expect_equal(t2$statistic, c(Z=2.79777591868904))
expect_equal(t2$p.value, 0.00514557970691098)
expect_match(t2$method, "DeLong")
expect_match(t2$method, "correlated")
expect_identical(t2$alternative, "two.sided")
expect_equal(t3$statistic, c(Z=-1.39077002573558))
expect_equal(t3$p.value, 0.164295175223054)
expect_match(t3$method, "DeLong")
expect_match(t3$method, "correlated")
expect_identical(t3$alternative, "two.sided")
})
test_that("two.sided roc.test produces identical p values when roc curves are reversed", {
t1b <- roc.test(r.s100b, r.wfns)
expect_equal(t1b$p.value, t1$p.value)
expect_equal(t1b$statistic, -t1$statistic)
t2b <- roc.test(r.ndka, r.wfns)
expect_equal(t2b$p.value, t2$p.value)
expect_equal(t2b$statistic, -t2$statistic)
t3b <- roc.test(r.s100b, r.ndka)
expect_equal(t3b$p.value, t3$p.value)
expect_equal(t3b$statistic, -t3$statistic)
})
test_that("unpaired roc.test works", {
# Warns about pairing
expect_warning(t1up <<- roc.test(r.wfns, r.s100b, paired = FALSE))
expect_warning(t2up <<- roc.test(r.wfns, r.ndka, paired = FALSE))
expect_warning(t3up <<- roc.test(r.ndka, r.s100b, paired = FALSE))
})
test_that("unpaired roc.test statistic and p are as expected", {
expect_equal(t1up$statistic, c(D=1.43490640926908))
expect_equal(t1up$p.value, 0.152825378808796)
expect_match(t1up$method, "DeLong")
expect_identical(t1up$alternative, "two.sided")
expect_equal(t2up$statistic, c(D=3.10125096778969))
expect_equal(t2up$p.value, 0.00220950791756457)
expect_match(t2up$method, "DeLong")
expect_identical(t2up$alternative, "two.sided")
expect_equal(t3up$statistic, c(D=-1.55995743389685))
expect_equal(t3up$p.value, 0.120192832430845)
expect_match(t3up$method, "DeLong")
expect_identical(t3up$alternative, "two.sided")
})
test_that("unpaired two.sided roc.test produces identical p values when roc curves are reversed", {
expect_warning(t1upb <- roc.test(r.s100b, r.wfns, paired = FALSE))
expect_equal(t1upb$p.value, t1up$p.value)
expect_equal(t1upb$statistic, -t1up$statistic)
expect_warning(t2upb <- roc.test(r.ndka, r.wfns, paired = FALSE))
expect_equal(t2upb$p.value, t2up$p.value)
expect_equal(t2upb$statistic, -t2up$statistic)
expect_warning(t3upb <- roc.test(r.s100b, r.ndka, paired = FALSE))
expect_equal(t3upb$p.value, t3up$p.value)
expect_equal(t3upb$statistic, -t3up$statistic)
})
test_that("one-sided roc.test work and produce expected results", {
t1gt <- roc.test(r.wfns, r.s100b, alternative = "greater")
t1lt <- roc.test(r.wfns, r.s100b, alternative = "less")
expect_equal(t1gt$statistic, t1$statistic)
expect_equal(t1lt$statistic, t1$statistic)
expect_equal(t1gt$p.value, 0.0135878911145941)
expect_equal(t1lt$p.value, 0.986412108885406)
expect_match(t1gt$method, "DeLong")
expect_match(t1gt$method, "correlated")
expect_identical(t1gt$alternative, "greater")
expect_match(t1lt$method, "DeLong")
expect_match(t1lt$method, "correlated")
expect_identical(t1lt$alternative, "less")
})
test_that("unpaired one-sided roc.test work and produce expected results", {
expect_warning(t1upgt <- roc.test(r.wfns, r.s100b, alternative = "greater", paired = FALSE))
expect_warning(t1uplt <- roc.test(r.wfns, r.s100b, alternative = "less", paired = FALSE))
expect_equal(t1upgt$statistic, t1up$statistic)
expect_equal(t1uplt$statistic, t1up$statistic)
expect_equal(t1upgt$p.value, 0.076412689404398)
expect_equal(t1uplt$p.value, 0.923587310595602)
expect_match(t1upgt$method, "DeLong")
expect_identical(t1upgt$alternative, "greater")
expect_match(t1uplt$method, "DeLong")
expect_identical(t1uplt$alternative, "less")
})
test_that("roc.formula works", {
expect_silent(t1c <- roc.test(aSAH$outcome ~ aSAH$wfns + aSAH$s100b, quiet = TRUE)) # make sure silent is passed
expect_equal(t1c$statistic, t1$statistic)
expect_equal(t1c$p.value, t1$p.value)
expect_match(t1$method, "DeLong")
expect_match(t1$method, "correlated")
expect_identical(t1$alternative, "two.sided")
expect_warning(t1upc <- roc.test(aSAH$outcome ~ aSAH$wfns + aSAH$s100b, quiet = TRUE, paired = FALSE))
expect_equal(t1upc$statistic, t1up$statistic)
expect_equal(t1upc$p.value, t1up$p.value)
expect_match(t1upc$method, "DeLong")
expect_identical(t1upc$alternative, "two.sided")
})
test_that("roc.formula supports subset and na.omit", {
check.only.items <- c("p.value", "statistic")
expect_identical(
roc.test(outcome ~ wfns + ndka, data = aSAH, subset = (gender == "Female"), quiet = TRUE)[check.only.items],
roc.test(aSAH$outcome[aSAH$gender == "Female"], aSAH$wfns[aSAH$gender == "Female"], aSAH$ndka[aSAH$gender == "Female"], quiet = TRUE)[check.only.items]
)
# Generate missing values
aSAH.missing <- aSAH
aSAH.missing$wfns[1:20] <- NA
aSAH.missing$ndka[1:20] <- NA
expect_identical(
roc.test(outcome ~ wfns + ndka, data = aSAH.missing, na.action = na.omit, quiet = TRUE)[check.only.items],
roc.test(aSAH$outcome[21:113], aSAH$wfns[21:113], aSAH$ndka[21:113], quiet = TRUE)[check.only.items]
)
#na.fail should fail
expect_error(roc.test(outcome ~ wfns + ndka, data = aSAH.missing, na.action = na.fail, quiet = TRUE))
#weights should fail too
expect_error(roc.test(outcome ~ wfns + ndka, data = aSAH, weights = seq_len(nrow(aSAH))), regexp = "weights are not supported")
# Both na.action and subset
expect_identical(
roc.test(outcome ~ wfns + ndka, data = aSAH.missing, na.action = na.omit, subset = (gender == "Female"), quiet = TRUE)[check.only.items],
roc.test(aSAH$outcome[21:113][aSAH[21:113,]$gender == "Female"], aSAH$wfns[21:113][aSAH[21:113,]$gender == "Female"], aSAH$ndka[21:113][aSAH[21:113,]$gender == "Female"], quiet = TRUE)[check.only.items]
)
})
test_that("paired tests don't work on unpaired curves", {
# Make an unpaired ROC curve
up.r.ndka <- roc(controls = aSAH$ndka[aSAH$outcome == "Good"], cases = aSAH$ndka[aSAH$outcome == "Poor"], quiet = TRUE)
# unpaired by default
t4 <- roc.test(r.wfns, up.r.ndka)
expect_false(grepl("correlated", t4$method))
# Shoud be an error:
expect_error(roc.test(r.wfns, up.r.ndka, paired = TRUE))
})
test_that("one-sided roc.test work with direction='>' and produce expected results", {
r.mwfns <- roc(aSAH$outcome, -as.numeric(aSAH$wfns))
r.ms100b <- roc(aSAH$outcome, -aSAH$s100b)
## We already tested those before:
#t1gt <- roc.test(r.wfns, r.s100b, alternative = "greater")
#t1lt <- roc.test(r.wfns, r.s100b, alternative = "less")
# Test with inverted direction
m1gt <- roc.test(r.mwfns, r.ms100b, alternative = "greater")
m1lt <- roc.test(r.mwfns, r.ms100b, alternative = "less")
expect_equal(m1gt$statistic, t1$statistic)
expect_equal(m1lt$statistic, t1$statistic)
expect_equal(m1gt$p.value, 0.0135878911145941)
expect_equal(m1lt$p.value, 0.986412108885406)
})
|
/tests/testthat/test-roc.test.R
|
no_license
|
Mwebaza/pROC
|
R
| false
| false
| 7,333
|
r
|
library(pROC)
data(aSAH)
context("roc.test")
test_that("roc.test works", {
t1 <<- roc.test(r.wfns, r.s100b)
t2 <<- roc.test(r.wfns, r.ndka)
t3 <<- roc.test(r.ndka, r.s100b)
expect_is(t1, "htest")
expect_is(t2, "htest")
expect_is(t3, "htest")
})
test_that("roc.test statistic and p are as expected with defaults", {
expect_equal(t1$statistic, c(Z=2.20898359144091))
expect_equal(t1$p.value, 0.0271757822291882)
expect_match(t1$method, "DeLong")
expect_match(t1$method, "correlated")
expect_identical(t1$alternative, "two.sided")
expect_equal(t2$statistic, c(Z=2.79777591868904))
expect_equal(t2$p.value, 0.00514557970691098)
expect_match(t2$method, "DeLong")
expect_match(t2$method, "correlated")
expect_identical(t2$alternative, "two.sided")
expect_equal(t3$statistic, c(Z=-1.39077002573558))
expect_equal(t3$p.value, 0.164295175223054)
expect_match(t3$method, "DeLong")
expect_match(t3$method, "correlated")
expect_identical(t3$alternative, "two.sided")
})
test_that("two.sided roc.test produces identical p values when roc curves are reversed", {
t1b <- roc.test(r.s100b, r.wfns)
expect_equal(t1b$p.value, t1$p.value)
expect_equal(t1b$statistic, -t1$statistic)
t2b <- roc.test(r.ndka, r.wfns)
expect_equal(t2b$p.value, t2$p.value)
expect_equal(t2b$statistic, -t2$statistic)
t3b <- roc.test(r.s100b, r.ndka)
expect_equal(t3b$p.value, t3$p.value)
expect_equal(t3b$statistic, -t3$statistic)
})
test_that("unpaired roc.test works", {
# Warns about pairing
expect_warning(t1up <<- roc.test(r.wfns, r.s100b, paired = FALSE))
expect_warning(t2up <<- roc.test(r.wfns, r.ndka, paired = FALSE))
expect_warning(t3up <<- roc.test(r.ndka, r.s100b, paired = FALSE))
})
test_that("unpaired roc.test statistic and p are as expected", {
expect_equal(t1up$statistic, c(D=1.43490640926908))
expect_equal(t1up$p.value, 0.152825378808796)
expect_match(t1up$method, "DeLong")
expect_identical(t1up$alternative, "two.sided")
expect_equal(t2up$statistic, c(D=3.10125096778969))
expect_equal(t2up$p.value, 0.00220950791756457)
expect_match(t2up$method, "DeLong")
expect_identical(t2up$alternative, "two.sided")
expect_equal(t3up$statistic, c(D=-1.55995743389685))
expect_equal(t3up$p.value, 0.120192832430845)
expect_match(t3up$method, "DeLong")
expect_identical(t3up$alternative, "two.sided")
})
test_that("unpaired two.sided roc.test produces identical p values when roc curves are reversed", {
expect_warning(t1upb <- roc.test(r.s100b, r.wfns, paired = FALSE))
expect_equal(t1upb$p.value, t1up$p.value)
expect_equal(t1upb$statistic, -t1up$statistic)
expect_warning(t2upb <- roc.test(r.ndka, r.wfns, paired = FALSE))
expect_equal(t2upb$p.value, t2up$p.value)
expect_equal(t2upb$statistic, -t2up$statistic)
expect_warning(t3upb <- roc.test(r.s100b, r.ndka, paired = FALSE))
expect_equal(t3upb$p.value, t3up$p.value)
expect_equal(t3upb$statistic, -t3up$statistic)
})
test_that("one-sided roc.test work and produce expected results", {
t1gt <- roc.test(r.wfns, r.s100b, alternative = "greater")
t1lt <- roc.test(r.wfns, r.s100b, alternative = "less")
expect_equal(t1gt$statistic, t1$statistic)
expect_equal(t1lt$statistic, t1$statistic)
expect_equal(t1gt$p.value, 0.0135878911145941)
expect_equal(t1lt$p.value, 0.986412108885406)
expect_match(t1gt$method, "DeLong")
expect_match(t1gt$method, "correlated")
expect_identical(t1gt$alternative, "greater")
expect_match(t1lt$method, "DeLong")
expect_match(t1lt$method, "correlated")
expect_identical(t1lt$alternative, "less")
})
test_that("unpaired one-sided roc.test work and produce expected results", {
expect_warning(t1upgt <- roc.test(r.wfns, r.s100b, alternative = "greater", paired = FALSE))
expect_warning(t1uplt <- roc.test(r.wfns, r.s100b, alternative = "less", paired = FALSE))
expect_equal(t1upgt$statistic, t1up$statistic)
expect_equal(t1uplt$statistic, t1up$statistic)
expect_equal(t1upgt$p.value, 0.076412689404398)
expect_equal(t1uplt$p.value, 0.923587310595602)
expect_match(t1upgt$method, "DeLong")
expect_identical(t1upgt$alternative, "greater")
expect_match(t1uplt$method, "DeLong")
expect_identical(t1uplt$alternative, "less")
})
test_that("roc.formula works", {
expect_silent(t1c <- roc.test(aSAH$outcome ~ aSAH$wfns + aSAH$s100b, quiet = TRUE)) # make sure silent is passed
expect_equal(t1c$statistic, t1$statistic)
expect_equal(t1c$p.value, t1$p.value)
expect_match(t1$method, "DeLong")
expect_match(t1$method, "correlated")
expect_identical(t1$alternative, "two.sided")
expect_warning(t1upc <- roc.test(aSAH$outcome ~ aSAH$wfns + aSAH$s100b, quiet = TRUE, paired = FALSE))
expect_equal(t1upc$statistic, t1up$statistic)
expect_equal(t1upc$p.value, t1up$p.value)
expect_match(t1upc$method, "DeLong")
expect_identical(t1upc$alternative, "two.sided")
})
test_that("roc.formula supports subset and na.omit", {
check.only.items <- c("p.value", "statistic")
expect_identical(
roc.test(outcome ~ wfns + ndka, data = aSAH, subset = (gender == "Female"), quiet = TRUE)[check.only.items],
roc.test(aSAH$outcome[aSAH$gender == "Female"], aSAH$wfns[aSAH$gender == "Female"], aSAH$ndka[aSAH$gender == "Female"], quiet = TRUE)[check.only.items]
)
# Generate missing values
aSAH.missing <- aSAH
aSAH.missing$wfns[1:20] <- NA
aSAH.missing$ndka[1:20] <- NA
expect_identical(
roc.test(outcome ~ wfns + ndka, data = aSAH.missing, na.action = na.omit, quiet = TRUE)[check.only.items],
roc.test(aSAH$outcome[21:113], aSAH$wfns[21:113], aSAH$ndka[21:113], quiet = TRUE)[check.only.items]
)
#na.fail should fail
expect_error(roc.test(outcome ~ wfns + ndka, data = aSAH.missing, na.action = na.fail, quiet = TRUE))
#weights should fail too
expect_error(roc.test(outcome ~ wfns + ndka, data = aSAH, weights = seq_len(nrow(aSAH))), regexp = "weights are not supported")
# Both na.action and subset
expect_identical(
roc.test(outcome ~ wfns + ndka, data = aSAH.missing, na.action = na.omit, subset = (gender == "Female"), quiet = TRUE)[check.only.items],
roc.test(aSAH$outcome[21:113][aSAH[21:113,]$gender == "Female"], aSAH$wfns[21:113][aSAH[21:113,]$gender == "Female"], aSAH$ndka[21:113][aSAH[21:113,]$gender == "Female"], quiet = TRUE)[check.only.items]
)
})
test_that("paired tests don't work on unpaired curves", {
# Make an unpaired ROC curve
up.r.ndka <- roc(controls = aSAH$ndka[aSAH$outcome == "Good"], cases = aSAH$ndka[aSAH$outcome == "Poor"], quiet = TRUE)
# unpaired by default
t4 <- roc.test(r.wfns, up.r.ndka)
expect_false(grepl("correlated", t4$method))
# Shoud be an error:
expect_error(roc.test(r.wfns, up.r.ndka, paired = TRUE))
})
test_that("one-sided roc.test work with direction='>' and produce expected results", {
r.mwfns <- roc(aSAH$outcome, -as.numeric(aSAH$wfns))
r.ms100b <- roc(aSAH$outcome, -aSAH$s100b)
## We already tested those before:
#t1gt <- roc.test(r.wfns, r.s100b, alternative = "greater")
#t1lt <- roc.test(r.wfns, r.s100b, alternative = "less")
# Test with inverted direction
m1gt <- roc.test(r.mwfns, r.ms100b, alternative = "greater")
m1lt <- roc.test(r.mwfns, r.ms100b, alternative = "less")
expect_equal(m1gt$statistic, t1$statistic)
expect_equal(m1lt$statistic, t1$statistic)
expect_equal(m1gt$p.value, 0.0135878911145941)
expect_equal(m1lt$p.value, 0.986412108885406)
})
|
#' @title t-tests
#' @description t-tests
#' @param x matrix or character vector with matrix column names
#' @param y matrix to compare against or matrix with x and y column names
#' @param ... adjust.method and cutoff
#' @return t-tests
#' @rdname ttest
#' @export
ttest = function(x, y, ...) {
stopifnot(has_dim(y))
UseMethod('ttest', x)
}
#' @export
ttest.NULL = function(...) {
"NULL"
}
#' @export
ttest.character = function(x, y, ...) {
c(x, y) %<-% split_matrix(m = y, by = x)
ttest.matrix(x = x, y = y, ...)
}
#' @export
ttest.matrix = function(x, y, adjust.method = 'BH', cutoff = NULL, ...) {
x = as.matrix(x)
y = as.matrix(y)
stopifnot(have_equal_rownames(x, y))
res = sapply(1:nrow(x), function(i) stats::t.test(x[i, ], y[i, ])$p.value)
res = stats::setNames(stats::p.adjust(res, method = adjust.method), rownames(x))
if (!is.null(cutoff) && is_p_value(cutoff)) res = res[res <= cutoff]
res
}
#' @export
ttest.data.frame = ttest.matrix
#' @export
ttest.default = function(x, y, ...) {
message('Class of <x> not recognised.')
}
|
/R/ttest.R
|
permissive
|
jlaffy/jtools
|
R
| false
| false
| 1,103
|
r
|
#' @title t-tests
#' @description t-tests
#' @param x matrix or character vector with matrix column names
#' @param y matrix to compare against or matrix with x and y column names
#' @param ... adjust.method and cutoff
#' @return t-tests
#' @rdname ttest
#' @export
ttest = function(x, y, ...) {
stopifnot(has_dim(y))
UseMethod('ttest', x)
}
#' @export
ttest.NULL = function(...) {
"NULL"
}
#' @export
ttest.character = function(x, y, ...) {
c(x, y) %<-% split_matrix(m = y, by = x)
ttest.matrix(x = x, y = y, ...)
}
#' @export
ttest.matrix = function(x, y, adjust.method = 'BH', cutoff = NULL, ...) {
x = as.matrix(x)
y = as.matrix(y)
stopifnot(have_equal_rownames(x, y))
res = sapply(1:nrow(x), function(i) stats::t.test(x[i, ], y[i, ])$p.value)
res = stats::setNames(stats::p.adjust(res, method = adjust.method), rownames(x))
if (!is.null(cutoff) && is_p_value(cutoff)) res = res[res <= cutoff]
res
}
#' @export
ttest.data.frame = ttest.matrix
#' @export
ttest.default = function(x, y, ...) {
message('Class of <x> not recognised.')
}
|
# Define UI for application that plots features of movies
dashboardPage(
skin = 'black',
dashboardHeader(title = "Play with music"),
dashboardSidebar(
sidebarMenu(
menuItem("Content", tabName = "home", icon = icon("dashboard")),
menuItem("Sentiment", icon = icon("meh-o"), tabName = "sentiment",
badgeLabel = "hot", badgeColor = "red"),
menuItem("Wordclouds", icon = icon("cloud"), tabName = "wordcloud"),
menuItem("Word comparison", icon = icon("cloud"), tabName = "comparison"),
menuItem("Topics comparison", icon = icon("clone"), tabName = 'topics'),
menuItem("Songs vs comments", icon = icon("bolt"), tabName = 'songsComments'),
menuItem("Find similar songs", icon = icon('eye'), tabName = 'findSimilar')
)
),
# Sidebar layout with a input and output definitions
# Inputs
dashboardBody(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "styles.css")
),
tabItems(
tabItem(tabName = 'home',
div(style = 'text-align: center',
h1('Stanislaw Smyl, Artur Gorlicki'),
h2('Please prepare to special music experience'))
),
tabItem(tabName = 'sentiment',
fluidRow(
box(width = 12,
div(class = 'simple',
'Boxplot showing the procentage rate of words in specified sentiment in comparison to all the words in a song based on different genre and across time.')
)
),
fluidRow(
box(width = 12,
# # Select sentiment
box(width = 3, title = "Settings", status = "success",
solidHeader = F,
selectizeInput(inputId = "genre",
label = "Genre:",
sort(unique(songsSentiment$genreTop)),
selected = sort(unique(songsSentiment$genreTop))[1]
),
selectizeInput(inputId = "sentiment",
label = "Sentiment:",
unique(songsSentiment$sentiment,
selected = unique(songsSentiment$sentiment)[1])
),
pickerInput(inputId = "years",
label = "Select years",
choices = list('years' = sort(unique(songsSentiment$releaseDate), decreasing = T)),
options = list('actions-box' = TRUE),
multiple = T,
selected = sort(unique(songsSentiment$releaseDate))[length(unique(songsSentiment$releaseDate))])
),
box(width = 9, title = "Boxplot of sentiments", status = "success", solidHeader = F,
div(class = 'simplePlot',
plotlyOutput(outputId = "boxplot"))
)
)
),
fluidRow(
box(width = 12,
div(class = 'simpleMain',
'Comparison of ratio between words with positive and negative sentiment across time.')
)
),
fluidRow(
box(width = 12,
# # Select sentiment
box(width = 3, title = "Settings", status = "success",
solidHeader = F,
div(class = 'simpleSlider',
noUiSliderInput("years2",
label = 'Years:',
value = c(min(songsSentiment$releaseDate),
max(songsSentiment$releaseDate)),
min = min(songsSentiment$releaseDate),
max = max(songsSentiment$releaseDate),
format = wNumbFormat(decimals = FALSE),
step = 1)),
hr(),
pickerInput(inputId = "emotions",
label = "Select emotions",
choices = list('positive' = pos,
'negative' = neg), options = list('actions-box' = TRUE),
multiple = T, selected = c(pos, neg))
),
box(width = 9, title = "Positive emotions ratio", status = "success", solidHeader = F,
div(class = 'simplePlot',
plotlyOutput(outputId = 'lineplot')
)
)
)
)
),
####################################
tabItem(tabName = 'wordcloud',
fluidRow(
box(width = 12,
div(class = 'simple',
'Visualisation of specified number of words in a given list of songs.')
)
),
#####################
fluidRow(
box(width = 12,
# # Select sentiment
box(width = 3, title = "Settings", status = "success",
solidHeader = F,
selectInput(inputId = "wc_songName",
label = "Choose song name",
choices = sort(unique(songsClean$songName)),
selected = sort(unique(songsClean$songName))[1],
multiple = T),
# Add a numeric input for the number of words
numericInput(inputId = 'num', label = "Maximum number of words",
value = 10, min = 5, max = 100),
# Add a colour input for the background colour
colourInput("col", "Background colour", "white")
),
box(width = 9, title = "Wordcloud", status = "success", solidHeader = F,
div(class = 'simplePlot',
style = 'text-align: center',
wordcloud2Output("cloud")
)
)
)
)
######################
),
####################################
####################################
tabItem(tabName = 'comparison',
fluidRow(
box(width = 12,
div(class = 'simple',
'Wordcloud showing a given number of the most frequent words for two specified songs.')
)
),
#####################
fluidRow(
box(width = 12,
# # Select sentiment
box(width = 3, title = "Settings", status = "success",
solidHeader = F,
selectInput(inputId = "cc_songName1",
label = "Choose song nr 1",
choices = sort(unique(songsClean$songName)),
selected = sort(unique(songsClean$songName))[1],
multiple = F),
selectInput(inputId = "cc_songName2",
label = "Choose song nr 2",
choices = sort(unique(songsClean$songName)),
selected = sort(unique(songsClean$songName))[2],
multiple = F),
# Add a numeric input for the number of words
numericInput(inputId = 'cc_num', label = "Maximum number of words",
value = 10, min = 5, max = 100)
),
box(width = 9, title = "Comparison cloud", status = "success", solidHeader = F,
div(class = 'simplePlot',
plotOutput(outputId = 'wordcloud_comp')
)
)
)
),
fluidRow(
box(width = 12,
div(class = 'simple',
'Frequencies of common words between two specified songs.')
)
),
######################
fluidRow(
box(width = 12,
# # Select sentiment
box(width = 3, title = "Settings", status = "success",
solidHeader = F,
selectInput(inputId = "pp_songName1",
label = "Choose song nr 1",
choices = sort(unique(songsClean$songName)),
selected = sort(unique(songsClean$songName))[1],
multiple = F),
selectInput(inputId = "pp_songName2",
label = "Choose song nr 2",
choices = sort(unique(songsClean$songName)),
selected = sort(unique(songsClean$songName))[2],
multiple = F),
# Add a numeric input for the number of words
numericInput(inputId = 'pp_num', label = "Maximum number of words",
value = 10, min = 5, max = 100)
),
box(width = 9, title = "Pyramid plot", status = "success", solidHeader = F,
div(class = 'simplePlot',
plotOutput(outputId = 'pyramid_comp')
)
)
)
)
#############################
),
####################################
####################################
tabItem(tabName = 'topics',
fluidRow(
box(width = 12,
div(class = 'simple',
'All songs divided into two topics based on lyrics similarity. Below you can find the words that fitted its group most.')
)
),
#####################
fluidRow(
box(width = 12, title = "Topics in songs with most fitting words", status = "success", solidHeader = F,
div(class = 'simpleLDA',
plotOutput(outputId = 'ldaSongs')
)
)
),
fluidRow(
box(width = 12,
div(class = 'simple',
'All comments divided into two topics based on content similarity. Below you can find the words that fitted its group most.')
)
),
######################
fluidRow(
box(width = 12, title = "Topics in comments with most fitting words", status = "success", solidHeader = F,
div(class = 'simpleLDA',
plotOutput(outputId = 'ldaComments')
)
)
)
#############################
),
##################################
#################################
tabItem(tabName = 'songsComments',
fluidRow(
box(width = 12,
div(class = 'simple',
'Comparison of sentiment in songs and comments related to that songs. Bar plot shows the procentage of words in a given sentiment in comparison to total number of words in specified genre')
)
),
fluidRow(
box(width = 12,
# # Select sentiment
box(width = 3, title = "Settings", status = "success",
solidHeader = F,
selectizeInput(inputId = "songsCommentsGenre",
label = "Genre:",
sort(unique(songsSentiment$genreTop)),
selected = sort(unique(songsSentiment$genreTop))[1]
)
),
box(width = 9, title = "Songs vs Comments", status = "success", solidHeader = F,
div(class = 'simplePlot',
plotlyOutput(outputId = "songsComments"))
)
)
)
####################################
),
tabItem(tabName = 'findSimilar',
fluidRow(
box(width = 12,
div(class = 'simple',
'It is easy to find the song with lyrics similar to what you paste!')
)
),
fluidRow(
box(width = 12,
# # paste song lyrics
box(width = 3, title = "Song lyrics", status = "success",
solidHeader = F,
textAreaInput(inputId = "songLyrics",
label = HTML("<font size = '4em'>Paste song lyrics: </font><br/> (see example lyrics below)"),
value = inputTextExample,
height = '300px'
)
),
box(width = 9, title = "Similar songs:", status = "success", solidHeader = F,
div(class = 'simplePlot',
dataTableOutput(outputId = "songLyrics"))
)
)
)
####################################
)
)
)
)
|
/Application/ui.R
|
no_license
|
StanislawSmyl/drill_in_music
|
R
| false
| false
| 13,526
|
r
|
# Define UI for application that plots features of movies
dashboardPage(
skin = 'black',
dashboardHeader(title = "Play with music"),
dashboardSidebar(
sidebarMenu(
menuItem("Content", tabName = "home", icon = icon("dashboard")),
menuItem("Sentiment", icon = icon("meh-o"), tabName = "sentiment",
badgeLabel = "hot", badgeColor = "red"),
menuItem("Wordclouds", icon = icon("cloud"), tabName = "wordcloud"),
menuItem("Word comparison", icon = icon("cloud"), tabName = "comparison"),
menuItem("Topics comparison", icon = icon("clone"), tabName = 'topics'),
menuItem("Songs vs comments", icon = icon("bolt"), tabName = 'songsComments'),
menuItem("Find similar songs", icon = icon('eye'), tabName = 'findSimilar')
)
),
# Sidebar layout with a input and output definitions
# Inputs
dashboardBody(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "styles.css")
),
tabItems(
tabItem(tabName = 'home',
div(style = 'text-align: center',
h1('Stanislaw Smyl, Artur Gorlicki'),
h2('Please prepare to special music experience'))
),
tabItem(tabName = 'sentiment',
fluidRow(
box(width = 12,
div(class = 'simple',
'Boxplot showing the procentage rate of words in specified sentiment in comparison to all the words in a song based on different genre and across time.')
)
),
fluidRow(
box(width = 12,
# # Select sentiment
box(width = 3, title = "Settings", status = "success",
solidHeader = F,
selectizeInput(inputId = "genre",
label = "Genre:",
sort(unique(songsSentiment$genreTop)),
selected = sort(unique(songsSentiment$genreTop))[1]
),
selectizeInput(inputId = "sentiment",
label = "Sentiment:",
unique(songsSentiment$sentiment,
selected = unique(songsSentiment$sentiment)[1])
),
pickerInput(inputId = "years",
label = "Select years",
choices = list('years' = sort(unique(songsSentiment$releaseDate), decreasing = T)),
options = list('actions-box' = TRUE),
multiple = T,
selected = sort(unique(songsSentiment$releaseDate))[length(unique(songsSentiment$releaseDate))])
),
box(width = 9, title = "Boxplot of sentiments", status = "success", solidHeader = F,
div(class = 'simplePlot',
plotlyOutput(outputId = "boxplot"))
)
)
),
fluidRow(
box(width = 12,
div(class = 'simpleMain',
'Comparison of ratio between words with positive and negative sentiment across time.')
)
),
fluidRow(
box(width = 12,
# # Select sentiment
box(width = 3, title = "Settings", status = "success",
solidHeader = F,
div(class = 'simpleSlider',
noUiSliderInput("years2",
label = 'Years:',
value = c(min(songsSentiment$releaseDate),
max(songsSentiment$releaseDate)),
min = min(songsSentiment$releaseDate),
max = max(songsSentiment$releaseDate),
format = wNumbFormat(decimals = FALSE),
step = 1)),
hr(),
pickerInput(inputId = "emotions",
label = "Select emotions",
choices = list('positive' = pos,
'negative' = neg), options = list('actions-box' = TRUE),
multiple = T, selected = c(pos, neg))
),
box(width = 9, title = "Positive emotions ratio", status = "success", solidHeader = F,
div(class = 'simplePlot',
plotlyOutput(outputId = 'lineplot')
)
)
)
)
),
####################################
tabItem(tabName = 'wordcloud',
fluidRow(
box(width = 12,
div(class = 'simple',
'Visualisation of specified number of words in a given list of songs.')
)
),
#####################
fluidRow(
box(width = 12,
# # Select sentiment
box(width = 3, title = "Settings", status = "success",
solidHeader = F,
selectInput(inputId = "wc_songName",
label = "Choose song name",
choices = sort(unique(songsClean$songName)),
selected = sort(unique(songsClean$songName))[1],
multiple = T),
# Add a numeric input for the number of words
numericInput(inputId = 'num', label = "Maximum number of words",
value = 10, min = 5, max = 100),
# Add a colour input for the background colour
colourInput("col", "Background colour", "white")
),
box(width = 9, title = "Wordcloud", status = "success", solidHeader = F,
div(class = 'simplePlot',
style = 'text-align: center',
wordcloud2Output("cloud")
)
)
)
)
######################
),
####################################
####################################
tabItem(tabName = 'comparison',
fluidRow(
box(width = 12,
div(class = 'simple',
'Wordcloud showing a given number of the most frequent words for two specified songs.')
)
),
#####################
fluidRow(
box(width = 12,
# # Select sentiment
box(width = 3, title = "Settings", status = "success",
solidHeader = F,
selectInput(inputId = "cc_songName1",
label = "Choose song nr 1",
choices = sort(unique(songsClean$songName)),
selected = sort(unique(songsClean$songName))[1],
multiple = F),
selectInput(inputId = "cc_songName2",
label = "Choose song nr 2",
choices = sort(unique(songsClean$songName)),
selected = sort(unique(songsClean$songName))[2],
multiple = F),
# Add a numeric input for the number of words
numericInput(inputId = 'cc_num', label = "Maximum number of words",
value = 10, min = 5, max = 100)
),
box(width = 9, title = "Comparison cloud", status = "success", solidHeader = F,
div(class = 'simplePlot',
plotOutput(outputId = 'wordcloud_comp')
)
)
)
),
fluidRow(
box(width = 12,
div(class = 'simple',
'Frequencies of common words between two specified songs.')
)
),
######################
fluidRow(
box(width = 12,
# # Select sentiment
box(width = 3, title = "Settings", status = "success",
solidHeader = F,
selectInput(inputId = "pp_songName1",
label = "Choose song nr 1",
choices = sort(unique(songsClean$songName)),
selected = sort(unique(songsClean$songName))[1],
multiple = F),
selectInput(inputId = "pp_songName2",
label = "Choose song nr 2",
choices = sort(unique(songsClean$songName)),
selected = sort(unique(songsClean$songName))[2],
multiple = F),
# Add a numeric input for the number of words
numericInput(inputId = 'pp_num', label = "Maximum number of words",
value = 10, min = 5, max = 100)
),
box(width = 9, title = "Pyramid plot", status = "success", solidHeader = F,
div(class = 'simplePlot',
plotOutput(outputId = 'pyramid_comp')
)
)
)
)
#############################
),
####################################
####################################
tabItem(tabName = 'topics',
fluidRow(
box(width = 12,
div(class = 'simple',
'All songs divided into two topics based on lyrics similarity. Below you can find the words that fitted its group most.')
)
),
#####################
fluidRow(
box(width = 12, title = "Topics in songs with most fitting words", status = "success", solidHeader = F,
div(class = 'simpleLDA',
plotOutput(outputId = 'ldaSongs')
)
)
),
fluidRow(
box(width = 12,
div(class = 'simple',
'All comments divided into two topics based on content similarity. Below you can find the words that fitted its group most.')
)
),
######################
fluidRow(
box(width = 12, title = "Topics in comments with most fitting words", status = "success", solidHeader = F,
div(class = 'simpleLDA',
plotOutput(outputId = 'ldaComments')
)
)
)
#############################
),
##################################
#################################
tabItem(tabName = 'songsComments',
fluidRow(
box(width = 12,
div(class = 'simple',
'Comparison of sentiment in songs and comments related to that songs. Bar plot shows the procentage of words in a given sentiment in comparison to total number of words in specified genre')
)
),
fluidRow(
box(width = 12,
# # Select sentiment
box(width = 3, title = "Settings", status = "success",
solidHeader = F,
selectizeInput(inputId = "songsCommentsGenre",
label = "Genre:",
sort(unique(songsSentiment$genreTop)),
selected = sort(unique(songsSentiment$genreTop))[1]
)
),
box(width = 9, title = "Songs vs Comments", status = "success", solidHeader = F,
div(class = 'simplePlot',
plotlyOutput(outputId = "songsComments"))
)
)
)
####################################
),
tabItem(tabName = 'findSimilar',
fluidRow(
box(width = 12,
div(class = 'simple',
'It is easy to find the song with lyrics similar to what you paste!')
)
),
fluidRow(
box(width = 12,
# # paste song lyrics
box(width = 3, title = "Song lyrics", status = "success",
solidHeader = F,
textAreaInput(inputId = "songLyrics",
label = HTML("<font size = '4em'>Paste song lyrics: </font><br/> (see example lyrics below)"),
value = inputTextExample,
height = '300px'
)
),
box(width = 9, title = "Similar songs:", status = "success", solidHeader = F,
div(class = 'simplePlot',
dataTableOutput(outputId = "songLyrics"))
)
)
)
####################################
)
)
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Get_x_text_angle.R
\name{get_x_text_angle}
\alias{get_x_text_angle}
\title{Check the level of column and return the proper angle of X_text_axis}
\usage{
get_x_text_angle(levels)
}
\arguments{
\item{levels}{the level number of a column}
}
\value{
the proper angle of X_text_axis
}
\description{
Check the level of column and return the proper angle of X_text_axis
}
|
/man/get_x_text_angle.Rd
|
permissive
|
MohsenYN/AllInOne
|
R
| false
| true
| 443
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Get_x_text_angle.R
\name{get_x_text_angle}
\alias{get_x_text_angle}
\title{Check the level of column and return the proper angle of X_text_axis}
\usage{
get_x_text_angle(levels)
}
\arguments{
\item{levels}{the level number of a column}
}
\value{
the proper angle of X_text_axis
}
\description{
Check the level of column and return the proper angle of X_text_axis
}
|
# Solution for Lesson_3_Exercises_Using_R
# mileage.csv is derived from a 1991 U.S EPA study of passenger car mileage.
# This file includes on sixty cars: HP (engine horsepower),
# MPG (average miles per gallon) WT (vehicle weight in 100 lb units)
# and CLASS (vehicle weight class C1,...,C6).
# read the comma-delimited text file creating a data frame object in R
# and examine its structure
mileage <- read.csv(file.path("c:/Rdata/","mileage.csv"))
str(mileage)
# 1) For each weight class determine the mean and standard deviation of MPG. What can you conclude from these calculations?
# begin by defining a simple function to print mean and standard deviation
my_mean_sd <- function(x) c(mean(x), sd(x)) # user-defined function
aggregate (MPG~CLASS, mileage, my_mean_sd) # low variability within classes
# 2) For each weight class determine the mean and standard deviation of HP.
# What can you conclude from these calculations?
aggregate (HP~CLASS, mileage, my_mean_sd) # higher means... higher standard deviations
# ----------------------------------------
# User defined functions to calculate and print selected statistics (adding variance)
range <- function(x) {max(x, na.rm = TRUE) - min(x, na.rm = TRUE)} # distance between min and max
my_stats <- function(x) {
cat("\n mean:", mean(x, na.rm = TRUE))
cat("\n median:", median(x, na.rm = TRUE))
cat("\n range:", range(x))
cat("\n sd:", sd(x, na.rm = TRUE))
cat("\n variance:", var(x, na.rm = TRUE))
cat("\n Q1:", quantile(x, probs = c(0.25), na.rm = TRUE))
cat("\n Q3:", quantile(x, probs = c(0.75), na.rm = TRUE))
cat("\n P10:", quantile(x, probs = c(0.10), na.rm = TRUE))
}
#-----------------------------------------
# shoppers.csv contains the dollar amounts spent in a store
# by individual shoppers during one day.
# read in shoppers and examine its structure
shoppers <- read.csv(file.path("c:/Rdata/","shoppers.csv"))
str(shoppers)
# Find the mean, median, range, standard deviation, variance, Q1, Q3 and P10.
my_stats(shoppers$Spending)
hist(shoppers$Spending) # distribution is skewed right
#-----------------------------------------
# pontus.csv lists the ages of USA Presidents at the time of their inauguration.
# Also listed are the heights of the Presidents and their opponents.
# read in potus and examine its structure
pontus <- read.csv(file.path("c:/Rdata/","pontus.csv"))
str(pontus)
# 1) Find the mean, median, range, standard deviation, Q1, Q3 and P10 of the ages.
my_stats(pontus$Age)
# check calculations by looking at the distribution of ages
Presidents_Ages <- pontus$Age
hist(Presidents_Ages) # to check
# 2) Find the mean, median, range, standard deviation, Q1, Q3 and P10
# of the heights of the Presidents and also their opponents.
my_stats(pontus$Ht)
with(pontus, table(Ht)) # to check
my_stats(pontus$HtOpp)
with(pontus, table(HtOpp)) # to check
# 3) Calculate the difference between each President's height and that of his opponent.
# Plot a histogram and construct a boxplot of these differences.
Ht_Difference <- pontus[,5]-pontus[,6]
summary(Ht_Difference)
hist(Ht_Difference) # Immaterial average height difference between pairs.
boxplot(pontus$Ht, pontus$HtOpp)
# ----------------------------------------
# geyser.csv contains the intervals (in minutes) between eruptions
# of Old Faithful Geyser in Yellowstone National Park.
# The data were taken on two consecutive weeks: WEEK1 and WEEK2.
# Compare the two sets of data using summary statistics and histograms.
# What do you conclude?
# read in geyser and examine its structure
geyser <- read.csv(file.path("c:/Rdata/","geyser.csv"))
str(geyser)
# produce summary statistics and histograms
summary(geyser)
Week1 <- geyser[,1]
Week2 <- geyser[,2]
par(mfrow=c(1,2))
hist(Week1)
hist(Week2)
par(mfrow=c(1,1))
par(mfrow=c(1,2))
boxplot(Week1)
boxplot(Week2)
par(mfrow=c(1,1))
# no apparent average difference between weeks, but multi-modal distribution.
|
/401_Lesson_03_Solution.r
|
no_license
|
jmichaelgilbert/mspaScripts
|
R
| false
| false
| 3,995
|
r
|
# Solution for Lesson_3_Exercises_Using_R
# mileage.csv is derived from a 1991 U.S EPA study of passenger car mileage.
# This file includes on sixty cars: HP (engine horsepower),
# MPG (average miles per gallon) WT (vehicle weight in 100 lb units)
# and CLASS (vehicle weight class C1,...,C6).
# read the comma-delimited text file creating a data frame object in R
# and examine its structure
mileage <- read.csv(file.path("c:/Rdata/","mileage.csv"))
str(mileage)
# 1) For each weight class determine the mean and standard deviation of MPG. What can you conclude from these calculations?
# begin by defining a simple function to print mean and standard deviation
my_mean_sd <- function(x) c(mean(x), sd(x)) # user-defined function
aggregate (MPG~CLASS, mileage, my_mean_sd) # low variability within classes
# 2) For each weight class determine the mean and standard deviation of HP.
# What can you conclude from these calculations?
aggregate (HP~CLASS, mileage, my_mean_sd) # higher means... higher standard deviations
# ----------------------------------------
# User defined functions to calculate and print selected statistics (adding variance)
range <- function(x) {max(x, na.rm = TRUE) - min(x, na.rm = TRUE)} # distance between min and max
my_stats <- function(x) {
cat("\n mean:", mean(x, na.rm = TRUE))
cat("\n median:", median(x, na.rm = TRUE))
cat("\n range:", range(x))
cat("\n sd:", sd(x, na.rm = TRUE))
cat("\n variance:", var(x, na.rm = TRUE))
cat("\n Q1:", quantile(x, probs = c(0.25), na.rm = TRUE))
cat("\n Q3:", quantile(x, probs = c(0.75), na.rm = TRUE))
cat("\n P10:", quantile(x, probs = c(0.10), na.rm = TRUE))
}
#-----------------------------------------
# shoppers.csv contains the dollar amounts spent in a store
# by individual shoppers during one day.
# read in shoppers and examine its structure
shoppers <- read.csv(file.path("c:/Rdata/","shoppers.csv"))
str(shoppers)
# Find the mean, median, range, standard deviation, variance, Q1, Q3 and P10.
my_stats(shoppers$Spending)
hist(shoppers$Spending) # distribution is skewed right
#-----------------------------------------
# pontus.csv lists the ages of USA Presidents at the time of their inauguration.
# Also listed are the heights of the Presidents and their opponents.
# read in potus and examine its structure
pontus <- read.csv(file.path("c:/Rdata/","pontus.csv"))
str(pontus)
# 1) Find the mean, median, range, standard deviation, Q1, Q3 and P10 of the ages.
my_stats(pontus$Age)
# check calculations by looking at the distribution of ages
Presidents_Ages <- pontus$Age
hist(Presidents_Ages) # to check
# 2) Find the mean, median, range, standard deviation, Q1, Q3 and P10
# of the heights of the Presidents and also their opponents.
my_stats(pontus$Ht)
with(pontus, table(Ht)) # to check
my_stats(pontus$HtOpp)
with(pontus, table(HtOpp)) # to check
# 3) Calculate the difference between each President's height and that of his opponent.
# Plot a histogram and construct a boxplot of these differences.
Ht_Difference <- pontus[,5]-pontus[,6]
summary(Ht_Difference)
hist(Ht_Difference) # Immaterial average height difference between pairs.
boxplot(pontus$Ht, pontus$HtOpp)
# ----------------------------------------
# geyser.csv contains the intervals (in minutes) between eruptions
# of Old Faithful Geyser in Yellowstone National Park.
# The data were taken on two consecutive weeks: WEEK1 and WEEK2.
# Compare the two sets of data using summary statistics and histograms.
# What do you conclude?
# read in geyser and examine its structure
geyser <- read.csv(file.path("c:/Rdata/","geyser.csv"))
str(geyser)
# produce summary statistics and histograms
summary(geyser)
Week1 <- geyser[,1]
Week2 <- geyser[,2]
par(mfrow=c(1,2))
hist(Week1)
hist(Week2)
par(mfrow=c(1,1))
par(mfrow=c(1,2))
boxplot(Week1)
boxplot(Week2)
par(mfrow=c(1,1))
# no apparent average difference between weeks, but multi-modal distribution.
|
# Generate n-dimensional response Y that follows linear regression model Y = Xbeta + epsilon, where epsilon is normal zero with variance sigma^2 independent across samples. Seed should be set at the beginning of the function
# X - design matrix
# beta - given parameter vector
# sigma - standard deviation of the noise
# seed - starting seed value
generateY <- function(X, beta, sigma, seed = 5832652){
#[ToDo] Set seed and generate Y following linear model
set.seed(seed)
# epsilon for each dimension is N(0, sigma^2) , independent
epsilon <- rnorm(n, mean=0, sd=sigma)
# linear regression model Y = Xbeta + epsilon
Y <- X %*% beta + epsilon
# Return Y
return(Y)
}
# Calculate beta_LS - least-squares solution, do not use lm function
# X - design matrix
# Y -response
calculateBeta <- function(X, Y){
# Calculate beta_LS
beta_LS <- solve(crossprod(X),crossprod(X,Y))
# pick b that minimizes the squared distance: ||Y - Xb||^2
# take differentiation, we have the normal equation: X^T * X * b = X^T * Y
# that is, b = (X^T * X)^-1 * X^T * Y
# Return beta
return(beta_LS)
}
# Calculate MSE
calculateMSE <- function(beta, beta_LS){
MSE <- norm(beta - beta_LS, "2")^2
# Return MSE - error ||beta - beta_LS||_2^2
return(MSE)
}
|
/FunctionsLM.R
|
no_license
|
tamu-stat689-statcomputing-fall2019/hw1-course-setup-emanuel996
|
R
| false
| false
| 1,263
|
r
|
# Generate n-dimensional response Y that follows linear regression model Y = Xbeta + epsilon, where epsilon is normal zero with variance sigma^2 independent across samples. Seed should be set at the beginning of the function
# X - design matrix
# beta - given parameter vector
# sigma - standard deviation of the noise
# seed - starting seed value
generateY <- function(X, beta, sigma, seed = 5832652){
#[ToDo] Set seed and generate Y following linear model
set.seed(seed)
# epsilon for each dimension is N(0, sigma^2) , independent
epsilon <- rnorm(n, mean=0, sd=sigma)
# linear regression model Y = Xbeta + epsilon
Y <- X %*% beta + epsilon
# Return Y
return(Y)
}
# Calculate beta_LS - least-squares solution, do not use lm function
# X - design matrix
# Y -response
calculateBeta <- function(X, Y){
# Calculate beta_LS
beta_LS <- solve(crossprod(X),crossprod(X,Y))
# pick b that minimizes the squared distance: ||Y - Xb||^2
# take differentiation, we have the normal equation: X^T * X * b = X^T * Y
# that is, b = (X^T * X)^-1 * X^T * Y
# Return beta
return(beta_LS)
}
# Calculate MSE
calculateMSE <- function(beta, beta_LS){
MSE <- norm(beta - beta_LS, "2")^2
# Return MSE - error ||beta - beta_LS||_2^2
return(MSE)
}
|
#' Get HS Metrics
#'
#' @param reports data frame with reports
#' @param src string with name of column name to use (from reports)
#' @return A data table with hs metrics
#' @examples
#' #dat <- getHSMetrics(reports, src="PANEL_TUMOR_HSMETRICS")
#' #dat <- getHSMetrics(reports, src="PANEL_NORMAL_HSMETRICS")
#' #dat <- getHSMetrics(reports, src="RNASEQCAP_HSMETRICS")
getHSMetrics <- function(reports, src="PANEL_TUMOR_HSMETRICS"){
readM <- function(f){
cmd <- paste("grep -A 2 BAIT_SET", f)
dat <- read.table(pipe(cmd), header=TRUE, sep="\t", row.names=NULL, stringsAsFactors=FALSE)
#dat <- dat[,-match(c("SAMPLE", "LIBRARY", "READ_GROUP"), colnames(dat))]
#dat <- data.frame( METRIC=colnames(dat), VALUE=as.numeric( dat[1,] ) )
return(as.data.table(dat) )
}
hsHeader <- c("BAIT_SET","GENOME_SIZE","BAIT_TERRITORY","TARGET_TERRITORY","BAIT_DESIGN_EFFICIENCY","TOTAL_READS","PF_READS","PF_UNIQUE_READS","PCT_PF_READS","PCT_PF_UQ_READS","PF_UQ_READS_ALIGNED","PCT_PF_UQ_READS_ALIGNED","PF_UQ_BASES_ALIGNED","ON_BAIT_BASES","NEAR_BAIT_BASES","OFF_BAIT_BASES","ON_TARGET_BASES","PCT_SELECTED_BASES","PCT_OFF_BAIT","ON_BAIT_VS_SELECTED","MEAN_BAIT_COVERAGE","MEAN_TARGET_COVERAGE","PCT_USABLE_BASES_ON_BAIT","PCT_USABLE_BASES_ON_TARGET","FOLD_ENRICHMENT","ZERO_CVG_TARGETS_PCT","FOLD_80_BASE_PENALTY","PCT_TARGET_BASES_2X","PCT_TARGET_BASES_10X","PCT_TARGET_BASES_20X","PCT_TARGET_BASES_30X","PCT_TARGET_BASES_40X","PCT_TARGET_BASES_50X","PCT_TARGET_BASES_100X","HS_LIBRARY_SIZE","HS_PENALTY_10X","HS_PENALTY_20X","HS_PENALTY_30X","HS_PENALTY_40X","HS_PENALTY_50X","HS_PENALTY_100X","AT_DROPOUT","GC_DROPOUT","SAMPLE","LIBRARY","READ_GROUP", "DataReportID")
dat <- makeEmptyDataTable(hsHeader)
for(k in 1:nrow(reports)){ #k <- 3
infile <- paste(reports$prefix[k] ,reports[k, src],sep="/")
if(file.exists(infile)){
tb <- readM(infile)
} else {
tb <- data.table(t(rep(NA, length(hsHeader))))
}
tb$REPORTID <- reports$REPORTID[k]
dat <- rbindlist(list(dat, tb))
}
dat
}
|
/R/getHSMetrics.R
|
permissive
|
dakl/clinseqr
|
R
| false
| false
| 2,050
|
r
|
#' Get HS Metrics
#'
#' @param reports data frame with reports
#' @param src string with name of column name to use (from reports)
#' @return A data table with hs metrics
#' @examples
#' #dat <- getHSMetrics(reports, src="PANEL_TUMOR_HSMETRICS")
#' #dat <- getHSMetrics(reports, src="PANEL_NORMAL_HSMETRICS")
#' #dat <- getHSMetrics(reports, src="RNASEQCAP_HSMETRICS")
getHSMetrics <- function(reports, src="PANEL_TUMOR_HSMETRICS"){
readM <- function(f){
cmd <- paste("grep -A 2 BAIT_SET", f)
dat <- read.table(pipe(cmd), header=TRUE, sep="\t", row.names=NULL, stringsAsFactors=FALSE)
#dat <- dat[,-match(c("SAMPLE", "LIBRARY", "READ_GROUP"), colnames(dat))]
#dat <- data.frame( METRIC=colnames(dat), VALUE=as.numeric( dat[1,] ) )
return(as.data.table(dat) )
}
hsHeader <- c("BAIT_SET","GENOME_SIZE","BAIT_TERRITORY","TARGET_TERRITORY","BAIT_DESIGN_EFFICIENCY","TOTAL_READS","PF_READS","PF_UNIQUE_READS","PCT_PF_READS","PCT_PF_UQ_READS","PF_UQ_READS_ALIGNED","PCT_PF_UQ_READS_ALIGNED","PF_UQ_BASES_ALIGNED","ON_BAIT_BASES","NEAR_BAIT_BASES","OFF_BAIT_BASES","ON_TARGET_BASES","PCT_SELECTED_BASES","PCT_OFF_BAIT","ON_BAIT_VS_SELECTED","MEAN_BAIT_COVERAGE","MEAN_TARGET_COVERAGE","PCT_USABLE_BASES_ON_BAIT","PCT_USABLE_BASES_ON_TARGET","FOLD_ENRICHMENT","ZERO_CVG_TARGETS_PCT","FOLD_80_BASE_PENALTY","PCT_TARGET_BASES_2X","PCT_TARGET_BASES_10X","PCT_TARGET_BASES_20X","PCT_TARGET_BASES_30X","PCT_TARGET_BASES_40X","PCT_TARGET_BASES_50X","PCT_TARGET_BASES_100X","HS_LIBRARY_SIZE","HS_PENALTY_10X","HS_PENALTY_20X","HS_PENALTY_30X","HS_PENALTY_40X","HS_PENALTY_50X","HS_PENALTY_100X","AT_DROPOUT","GC_DROPOUT","SAMPLE","LIBRARY","READ_GROUP", "DataReportID")
dat <- makeEmptyDataTable(hsHeader)
for(k in 1:nrow(reports)){ #k <- 3
infile <- paste(reports$prefix[k] ,reports[k, src],sep="/")
if(file.exists(infile)){
tb <- readM(infile)
} else {
tb <- data.table(t(rep(NA, length(hsHeader))))
}
tb$REPORTID <- reports$REPORTID[k]
dat <- rbindlist(list(dat, tb))
}
dat
}
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
library(shiny)
library(rpart)
library(rpart.plot)
shinyServer(function(input,output){
output$plot_out=renderPlot({
DB = mtcars
var="mpg ~ "
var.include=input$variable
var.include=paste(var.include,collapse="+")
f=paste(var,var.include)
f=as.formula(f)
modelTREE= rpart(f, data=DB)
prp(modelTREE)
})
})
|
/server.R
|
no_license
|
AndreaMod/GITHUB-DDP
|
R
| false
| false
| 490
|
r
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
library(shiny)
library(rpart)
library(rpart.plot)
shinyServer(function(input,output){
output$plot_out=renderPlot({
DB = mtcars
var="mpg ~ "
var.include=input$variable
var.include=paste(var.include,collapse="+")
f=paste(var,var.include)
f=as.formula(f)
modelTREE= rpart(f, data=DB)
prp(modelTREE)
})
})
|
##* ****************************************************************
## Programmer[s]: Leandro Fernandes
## Company/Institution: Cargill
## email: leandro_h_fernandes@cargill.com
## Date: June 20, 2016
##
## The author believes that share code and knowledge is awesome.
## Feel free to share and modify this piece of code. But don't be
## impolite and remember to cite the author and give him his credits.
##* ****************************************************************
library(caret,quietly = TRUE )
library(ROCR,quietly = TRUE )
BuildModelReport <- function(model.log,response.var, train.data,val.dat){
cat(' model summary\n')
print(summary(model.log))
y.train <- unlist(train.data[,response.var])
pred.train <- predict(model.log, type = 'response')
y.val <- unlist(val.data[,response.var])
pred.val <- predict(model.log,val.data, type = 'response')
## score
z <- log(pred.val/(1.0-pred.val))
##confusion matrix
table(y.train, pred.train > 0.5)
table(y.val, pred.val > 0.5)
## ROCR Curve
ROCRpred <- prediction(pred.val, y.val)
ROCRperf <- performance(ROCRpred, 'tpr','fpr')
auc.perf <- performance(ROCRpred, measure = "auc")
cat('=================================\n\n')
cat('AUC: \n')
print(as.numeric(auc.perf@y.values))
plot(ROCRperf, colorize = TRUE, text.adj = c(-0.2,1.7))
abline(a = 0.0, b=1.0,col = "lightgray", lty = 2)
cat('=================================\n\n')
##cat('Train confusion matrix:\n')
##print(confusionMatrix(pred.train > 0.5, y.train))
cat('Val confusion matrix:\n')
print(confusionMatrix(pred.val > 0.5 , y.val))
}
|
/utils/report.R
|
no_license
|
leandroohf/raop
|
R
| false
| false
| 1,687
|
r
|
##* ****************************************************************
## Programmer[s]: Leandro Fernandes
## Company/Institution: Cargill
## email: leandro_h_fernandes@cargill.com
## Date: June 20, 2016
##
## The author believes that share code and knowledge is awesome.
## Feel free to share and modify this piece of code. But don't be
## impolite and remember to cite the author and give him his credits.
##* ****************************************************************
library(caret,quietly = TRUE )
library(ROCR,quietly = TRUE )
BuildModelReport <- function(model.log,response.var, train.data,val.dat){
cat(' model summary\n')
print(summary(model.log))
y.train <- unlist(train.data[,response.var])
pred.train <- predict(model.log, type = 'response')
y.val <- unlist(val.data[,response.var])
pred.val <- predict(model.log,val.data, type = 'response')
## score
z <- log(pred.val/(1.0-pred.val))
##confusion matrix
table(y.train, pred.train > 0.5)
table(y.val, pred.val > 0.5)
## ROCR Curve
ROCRpred <- prediction(pred.val, y.val)
ROCRperf <- performance(ROCRpred, 'tpr','fpr')
auc.perf <- performance(ROCRpred, measure = "auc")
cat('=================================\n\n')
cat('AUC: \n')
print(as.numeric(auc.perf@y.values))
plot(ROCRperf, colorize = TRUE, text.adj = c(-0.2,1.7))
abline(a = 0.0, b=1.0,col = "lightgray", lty = 2)
cat('=================================\n\n')
##cat('Train confusion matrix:\n')
##print(confusionMatrix(pred.train > 0.5, y.train))
cat('Val confusion matrix:\n')
print(confusionMatrix(pred.val > 0.5 , y.val))
}
|
testlist <- list(cost = structure(c(1.44888560957826e+135, 1.6249392498385e+65, 5.27956628994611e-134, 1.56839475268612e-251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 5L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929466674086e+86, 2.51852491380534e-303, 3.12954510408264e-253, 2.45741775099414e-215, 6.59159492364721e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.0709591854537e+63, 7.43876613929257e+191, 8.31920983010871e+78, 1.26747339146319e+161, 5.68076251052666e-141, 9.98610641272026e+182, 232665383858.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 854537881567133, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.2125809174767e-185, 9.58716852715016e+39, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result)
|
/epiphy/inst/testfiles/costTotCPP/AFL_costTotCPP/costTotCPP_valgrind_files/1615926823-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 1,101
|
r
|
testlist <- list(cost = structure(c(1.44888560957826e+135, 1.6249392498385e+65, 5.27956628994611e-134, 1.56839475268612e-251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 5L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929466674086e+86, 2.51852491380534e-303, 3.12954510408264e-253, 2.45741775099414e-215, 6.59159492364721e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.0709591854537e+63, 7.43876613929257e+191, 8.31920983010871e+78, 1.26747339146319e+161, 5.68076251052666e-141, 9.98610641272026e+182, 232665383858.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 854537881567133, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.2125809174767e-185, 9.58716852715016e+39, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.