content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.81959243534803e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = -5.48612930196713e+303, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615828195-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 488 | r | testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.81959243534803e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = -5.48612930196713e+303, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
\name{@ANNOBJPREFIX@CCANCESTOR}
\alias{@ANNOBJPREFIX@CCANCESTOR}
\title{Annotation of GO Identifiers to their Cellular Component Ancestors}
\description{
This data set describes associations between GO molecular function (CC)
terms and their ancestor CC terms, based on the directed acyclic
graph (DAG) defined by the Gene Ontology Consortium. The format is an R
object mapping the GO CC terms to all ancestor terms, where an
ancestor term is a more general GO term that precedes
the given GO term in the DAG (in other words, the parents, and all
their parents, etc.).
}
\details{
Each GO CC term is mapped to a vector of ancestor GO C terms.
Cellular component is defined as the subcellular structures,
locations, and macromolecular complexes; examples include nucleus,
telomere, and origin recognition complex as defined by Gene Ontology
Consortium.
Mappings were based on data provided: @CCANCESTORSOURCE@
}
\references{
\url{http://www.geneontology.org/} and
\url{https://www.ncib.nlm.nih.gov/entrez/query.fcgi?db=gene}
}
\seealso{
\itemize{
\item \code{\link[AnnotationDbi]{AnnotationDb-class}} for use of
the \code{select()} interface.
}
}
\examples{
## select() interface:
## Objects in this package can be accessed using the select() interface
## from the AnnotationDbi package. See ?select for details.
## Bimap interface:
# Convert the object to a list
xx <- as.list(@ANNOBJPREFIX@CCANCESTOR)
# Remove GO IDs that do not have any ancestor
xx <- xx[!is.na(xx)]
if(length(xx) > 0){
# Get the ancestor GO IDs for the first two elents of xx
goids <- xx[1:2]
}
}
\keyword{datasets}
| /inst/AnnDbPkg-templates/GO.DB/man/CCANCESTOR.Rd | no_license | Bioconductor/AnnotationForge | R | false | false | 1,648 | rd | \name{@ANNOBJPREFIX@CCANCESTOR}
\alias{@ANNOBJPREFIX@CCANCESTOR}
\title{Annotation of GO Identifiers to their Cellular Component Ancestors}
\description{
This data set describes associations between GO molecular function (CC)
terms and their ancestor CC terms, based on the directed acyclic
graph (DAG) defined by the Gene Ontology Consortium. The format is an R
object mapping the GO CC terms to all ancestor terms, where an
ancestor term is a more general GO term that precedes
the given GO term in the DAG (in other words, the parents, and all
their parents, etc.).
}
\details{
Each GO CC term is mapped to a vector of ancestor GO C terms.
Cellular component is defined as the subcellular structures,
locations, and macromolecular complexes; examples include nucleus,
telomere, and origin recognition complex as defined by Gene Ontology
Consortium.
Mappings were based on data provided: @CCANCESTORSOURCE@
}
\references{
\url{http://www.geneontology.org/} and
\url{https://www.ncib.nlm.nih.gov/entrez/query.fcgi?db=gene}
}
\seealso{
\itemize{
\item \code{\link[AnnotationDbi]{AnnotationDb-class}} for use of
the \code{select()} interface.
}
}
\examples{
## select() interface:
## Objects in this package can be accessed using the select() interface
## from the AnnotationDbi package. See ?select for details.
## Bimap interface:
# Convert the object to a list
xx <- as.list(@ANNOBJPREFIX@CCANCESTOR)
# Remove GO IDs that do not have any ancestor
xx <- xx[!is.na(xx)]
if(length(xx) > 0){
# Get the ancestor GO IDs for the first two elents of xx
goids <- xx[1:2]
}
}
\keyword{datasets}
|
selected_method <- "bing"
addr <- "London, UK"
url_base <- tidygeocoder:::get_bing_url()
library(httr)
library(jsonlite)
library(dplyr)
# Test sandbox on dev ----
soup <-
httr::GET(
url = url_base,
query = list(
key = tidygeocoder:::get_key('bing'),
q = addr,
maxResults = 1
)
)
response <-
jsonlite::fromJSON(httr::content(soup, as = "text", encoding = "UTF-8"))
httr::status_code(soup)
length(response$resourceSets$resources[[1]]$point$coordinates)
is.null(response$resourceSets$resources[[1]])
as.data.frame(
matrix(unlist(response$resourceSets$resources[[1]]$point$coordinates), ncol = 2, byrow = TRUE),
col.names = c("lat", "lng")
)
results_minimal <-
tidygeocoder::extract_results(selected_method, response, full_results = FALSE)
results_minimal
results <-
tidygeocoder::extract_results(selected_method, response)
results
full_results_notflat <-
tidygeocoder::extract_results(selected_method,
response,
full_results = TRUE,
flatten = FALSE
)
full_results_notflat
full_results_flat <-
tidygeocoder::extract_results(selected_method,
response,
full_results = TRUE,
flatten = TRUE
)
full_results_flat
# Error on bad user
query_results <- query_api(url_base, list(
key = tidygeocoder:::get_key(selected_method),
q = "Madrid, Spain",
strictMatch = "XX"
))
if (TRUE == TRUE) message(paste0("HTTP Status Code: ", as.character(query_results$status)))
tidygeocoder:::extract_errors_from_results(query_results$content, method = "bing")
httr::status_code(response)
httr::warn_for_status(response)
httr::content(response, as = "text", encoding = "UTF-8")
content <- jsonlite::fromJSON(httr::content(response, as = "text", encoding = "UTF-8"))
httr::content(response, as = "text", encoding = "UTF-8")
tidygeocoder::check_results_for_problems("bing", soup2, TRUE)
# Error on bad key
# Test geo ----
library(tibble)
addr <- "Plaza Mayor"
tidygeocoder::geo(
address = addr,
verbose = TRUE,
lat = "latitude",
long = "longitude",
method = "bing",
limit = 5,
)
# NUll result
tidygeocoder::geo(
address = "asdfghjkl",
verbose = TRUE,
lat = "latitude",
long = "longitude",
method = "bing",
limit = 5,
)
livetest <-
tidygeocoder::geo(
address = addr,
verbose = TRUE,
method = "bing"
)
glimpse(livetest)
livetest_full <-
tidygeocoder::geo(
address = "Antonio de Leyva, Madrid",
verbose = TRUE,
full_results = TRUE,
method = "bing"
)
glimpse(livetest_full)
livetest_fullflat <-
tidygeocoder::geo(
address = addr,
verbose = TRUE,
full_results = TRUE,
flatten = TRUE,
method = "bing"
)
glimpse(livetest_fullflat)
livetest_params <-
tidygeocoder::geo(
address = c("Madrid"),
verbose = TRUE,
full_results = TRUE,
mode = "single",
limit = 5,
method = "bing"
)
glimpse(livetest_params)
# Error
tidygeocoder::geo(
address = c("Nieva"),
verbose = TRUE,
full_results = TRUE,
mode = "single",
limit = 2,
custom_query = list(
key = "aaaaaa"
),
method = "bing"
)
tidygeocoder::geo(
address = c("Nieva"),
verbose = TRUE,
full_results = TRUE,
mode = "single",
limit = 2,
custom_query = list(
strictMatch = "aaaaaa"
),
method = "bing"
)
# End error
glimpse(livetest_params)
library(dplyr)
library(tibble)
library(tidygeocoder)
# create a dataframe with addresses
some_addresses <- tribble(
~name1, ~addr,
"White House", "1600 Pennsylvania Ave NW, Washington, DC",
"Transamerica Pyramid", "600 Montgomery St, San Francisco, CA 94111",
"Willis Tower", "233 S Wacker Dr, Chicago, IL 60606"
)
# geocode the addresses
lat_longs <- some_addresses %>%
geocode(addr,
method = "bing",
full_results = TRUE, mode = "single", verbose = TRUE
)
lat_longs
| /sandbox/query_debugging/bing_test.R | permissive | jessecambon/tidygeocoder | R | false | false | 3,797 | r | selected_method <- "bing"
addr <- "London, UK"
url_base <- tidygeocoder:::get_bing_url()
library(httr)
library(jsonlite)
library(dplyr)
# Test sandbox on dev ----
soup <-
httr::GET(
url = url_base,
query = list(
key = tidygeocoder:::get_key('bing'),
q = addr,
maxResults = 1
)
)
response <-
jsonlite::fromJSON(httr::content(soup, as = "text", encoding = "UTF-8"))
httr::status_code(soup)
length(response$resourceSets$resources[[1]]$point$coordinates)
is.null(response$resourceSets$resources[[1]])
as.data.frame(
matrix(unlist(response$resourceSets$resources[[1]]$point$coordinates), ncol = 2, byrow = TRUE),
col.names = c("lat", "lng")
)
results_minimal <-
tidygeocoder::extract_results(selected_method, response, full_results = FALSE)
results_minimal
results <-
tidygeocoder::extract_results(selected_method, response)
results
full_results_notflat <-
tidygeocoder::extract_results(selected_method,
response,
full_results = TRUE,
flatten = FALSE
)
full_results_notflat
full_results_flat <-
tidygeocoder::extract_results(selected_method,
response,
full_results = TRUE,
flatten = TRUE
)
full_results_flat
# Error on bad user
query_results <- query_api(url_base, list(
key = tidygeocoder:::get_key(selected_method),
q = "Madrid, Spain",
strictMatch = "XX"
))
if (TRUE == TRUE) message(paste0("HTTP Status Code: ", as.character(query_results$status)))
tidygeocoder:::extract_errors_from_results(query_results$content, method = "bing")
httr::status_code(response)
httr::warn_for_status(response)
httr::content(response, as = "text", encoding = "UTF-8")
content <- jsonlite::fromJSON(httr::content(response, as = "text", encoding = "UTF-8"))
httr::content(response, as = "text", encoding = "UTF-8")
tidygeocoder::check_results_for_problems("bing", soup2, TRUE)
# Error on bad key
# Test geo ----
library(tibble)
addr <- "Plaza Mayor"
tidygeocoder::geo(
address = addr,
verbose = TRUE,
lat = "latitude",
long = "longitude",
method = "bing",
limit = 5,
)
# NUll result
tidygeocoder::geo(
address = "asdfghjkl",
verbose = TRUE,
lat = "latitude",
long = "longitude",
method = "bing",
limit = 5,
)
livetest <-
tidygeocoder::geo(
address = addr,
verbose = TRUE,
method = "bing"
)
glimpse(livetest)
livetest_full <-
tidygeocoder::geo(
address = "Antonio de Leyva, Madrid",
verbose = TRUE,
full_results = TRUE,
method = "bing"
)
glimpse(livetest_full)
livetest_fullflat <-
tidygeocoder::geo(
address = addr,
verbose = TRUE,
full_results = TRUE,
flatten = TRUE,
method = "bing"
)
glimpse(livetest_fullflat)
livetest_params <-
tidygeocoder::geo(
address = c("Madrid"),
verbose = TRUE,
full_results = TRUE,
mode = "single",
limit = 5,
method = "bing"
)
glimpse(livetest_params)
# Error
tidygeocoder::geo(
address = c("Nieva"),
verbose = TRUE,
full_results = TRUE,
mode = "single",
limit = 2,
custom_query = list(
key = "aaaaaa"
),
method = "bing"
)
tidygeocoder::geo(
address = c("Nieva"),
verbose = TRUE,
full_results = TRUE,
mode = "single",
limit = 2,
custom_query = list(
strictMatch = "aaaaaa"
),
method = "bing"
)
# End error
glimpse(livetest_params)
library(dplyr)
library(tibble)
library(tidygeocoder)
# create a dataframe with addresses
some_addresses <- tribble(
~name1, ~addr,
"White House", "1600 Pennsylvania Ave NW, Washington, DC",
"Transamerica Pyramid", "600 Montgomery St, San Francisco, CA 94111",
"Willis Tower", "233 S Wacker Dr, Chicago, IL 60606"
)
# geocode the addresses
lat_longs <- some_addresses %>%
geocode(addr,
method = "bing",
full_results = TRUE, mode = "single", verbose = TRUE
)
lat_longs
|
library(InfiniumPurify)
### Name: InfiniumClust
### Title: Tumor sample clustering from Infinium 450k array data
### Aliases: InfiniumClust
### ** Examples
## load example data
data(beta.emp)
normal.data <- beta.emp[,1:21]
tumor.data <- beta.emp[,22:31]
## estimate tumor purity
purity <- getPurity(tumor.data = tumor.data,tumor.type= "LUAD")
## cluster tumor samples accounting for tumor purity
out <- InfiniumClust(tumor.data,purity,K=3, maxiter=5, tol=0.001)
| /data/genthat_extracted_code/InfiniumPurify/examples/InfiniumClust.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 471 | r | library(InfiniumPurify)
### Name: InfiniumClust
### Title: Tumor sample clustering from Infinium 450k array data
### Aliases: InfiniumClust
### ** Examples
## load example data
data(beta.emp)
normal.data <- beta.emp[,1:21]
tumor.data <- beta.emp[,22:31]
## estimate tumor purity
purity <- getPurity(tumor.data = tumor.data,tumor.type= "LUAD")
## cluster tumor samples accounting for tumor purity
out <- InfiniumClust(tumor.data,purity,K=3, maxiter=5, tol=0.001)
|
#' Generate a list of fictional country names
#' @param n Integer number of country names to generate
#' from a library of fictional country names.
#' @param prefixed Proportion of country names with a prefix by default 0.15.
#' @param suffixed Proportion of country names with a suffix by default 0.15.
#' @return String vector of fictional country names
#' @importFrom stringr str_trim
#' @examples
#' generate_states(12)
#' @export
generate_states <- function(n = 10, prefixed = 0.15, suffixed = 0.15){
namelib <- c("Malania", "Maliwar", "Rhonda", "Astan", "Boroland", "Jawar",
"Teldir", "Toramos", "Lanfal", "Samovar", "Westenam", "Aramin", "Cradis",
"Samonda", "Volorea", "Telti", "Jormos", "Karador", "Paradis", "Yutria", "Osmayya",
"Glayland", "Etror", "Esweau", "Askor", "Ugraria")
prefixlib <- c("The", "Central", "East", "Eastern", "Empire of", "Isle of",
"Kingdom of", "New", "North", "Northern", "Repulic of",
"Saint", "San", "South", "Southern", "United",
"The United States of", "Upper", "West", "Western")
prefixlib <- c(prefixlib, rep("", round(length(prefixlib)/(prefixed*100)*100) - length(prefixlib)))
suffixlib <- c("Confederacy", "Empire", "Islands", "Kingdom", "Republic", "Union", "United")
suffixlib <- c(suffixlib, rep("", round(length(suffixlib)/(suffixed*100)*100) - length(suffixlib)))
stringr::str_trim(paste(sample(prefixlib, n), sample(namelib, n), sample(suffixlib, n)))
} | /R/generate_states.R | permissive | globalgov/manystates | R | false | false | 1,521 | r | #' Generate a list of fictional country names
#' @param n Integer number of country names to generate
#' from a library of fictional country names.
#' @param prefixed Proportion of country names with a prefix by default 0.15.
#' @param suffixed Proportion of country names with a suffix by default 0.15.
#' @return String vector of fictional country names
#' @importFrom stringr str_trim
#' @examples
#' generate_states(12)
#' @export
generate_states <- function(n = 10, prefixed = 0.15, suffixed = 0.15){
namelib <- c("Malania", "Maliwar", "Rhonda", "Astan", "Boroland", "Jawar",
"Teldir", "Toramos", "Lanfal", "Samovar", "Westenam", "Aramin", "Cradis",
"Samonda", "Volorea", "Telti", "Jormos", "Karador", "Paradis", "Yutria", "Osmayya",
"Glayland", "Etror", "Esweau", "Askor", "Ugraria")
prefixlib <- c("The", "Central", "East", "Eastern", "Empire of", "Isle of",
"Kingdom of", "New", "North", "Northern", "Repulic of",
"Saint", "San", "South", "Southern", "United",
"The United States of", "Upper", "West", "Western")
prefixlib <- c(prefixlib, rep("", round(length(prefixlib)/(prefixed*100)*100) - length(prefixlib)))
suffixlib <- c("Confederacy", "Empire", "Islands", "Kingdom", "Republic", "Union", "United")
suffixlib <- c(suffixlib, rep("", round(length(suffixlib)/(suffixed*100)*100) - length(suffixlib)))
stringr::str_trim(paste(sample(prefixlib, n), sample(namelib, n), sample(suffixlib, n)))
} |
library(tidyverse)
library(stringr)
library(sp)
library(rgdal)
# data key
# cataincidence = catastrophic spending
# cataincidence_q = catastrophic spending by consumption quintile
# oop_che = out of pocket payments as a share of current spending on health
# unmetneed_hc = self-reported unmet need for healthcare
# read in data
dat <- read.csv('data-raw/example_dataset_whobcn_10_Nov.csv', stringsAsFactors = FALSE)
# make data long
dat <- dat %>% gather(key='year', value='value', -Indicator.code, -Country, -Country.code )
# remove percent from value column and X from year column
dat <- dat %>% mutate(year = gsub('X','',year),
value = as.numeric(gsub('%','',value)))
# recode columns
names(dat) <- tolower(gsub('.', '_', names(dat), fixed = TRUE))
usethis::use_data(dat, overwrite = TRUE)
# read in shp files
world <- readOGR('data-raw/world/', 'TM_WORLD_BORDERS-0.3')
usethis::use_data(world, overwrite = T)
| /data-raw/update_data.R | permissive | databrew/whoapp | R | false | false | 945 | r | library(tidyverse)
library(stringr)
library(sp)
library(rgdal)
# data key
# cataincidence = catastrophic spending
# cataincidence_q = catastrophic spending by consumption quintile
# oop_che = out of pocket payments as a share of current spending on health
# unmetneed_hc = self-reported unmet need for healthcare
# read in data
dat <- read.csv('data-raw/example_dataset_whobcn_10_Nov.csv', stringsAsFactors = FALSE)
# make data long
dat <- dat %>% gather(key='year', value='value', -Indicator.code, -Country, -Country.code )
# remove percent from value column and X from year column
dat <- dat %>% mutate(year = gsub('X','',year),
value = as.numeric(gsub('%','',value)))
# recode columns
names(dat) <- tolower(gsub('.', '_', names(dat), fixed = TRUE))
usethis::use_data(dat, overwrite = TRUE)
# read in shp files
world <- readOGR('data-raw/world/', 'TM_WORLD_BORDERS-0.3')
usethis::use_data(world, overwrite = T)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/showWeaAna.R
\docType{methods}
\name{show,WeaAna-method}
\alias{show,WeaAna-method}
\title{Show basic information of class WeaAna}
\usage{
\S4method{show}{WeaAna}(object)
}
\arguments{
\item{object}{WeaAna objects}
}
\description{
Show the name, number, latitude, longitude of all weather stations.
}
\examples{
library(weaana)
data( "WeatherRecordsDemo" )
show( records )
records
}
| /man/show-WeaAna-method.Rd | no_license | cran/weaana | R | false | true | 462 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/showWeaAna.R
\docType{methods}
\name{show,WeaAna-method}
\alias{show,WeaAna-method}
\title{Show basic information of class WeaAna}
\usage{
\S4method{show}{WeaAna}(object)
}
\arguments{
\item{object}{WeaAna objects}
}
\description{
Show the name, number, latitude, longitude of all weather stations.
}
\examples{
library(weaana)
data( "WeatherRecordsDemo" )
show( records )
records
}
|
\name{tc}
\alias{tc}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Treatment Choice Cox Model
}
\description{
Builds time-varying covariates needed and fits Treatment Choice Cox models (Parametric Treatment Choice (PTC), Hybrid Treatment Choice (HTC), or Interval Treatment Choice (ITC)) for observational time-to-event studies.
}
\usage{
tc(type = "PTC", dataset, cov_names, maxfollow=100, nmaxint = 80, interval_width = 0.1,
min_exp_events = 50, min_future_events = 50, nitc_fixed = 0, n_start_fixed = 10,
n_stop_fixed = 10, interval_stop_beginning = 1.1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{type}{
character indicating the type of TC model to be fit ('PTC' for Parametric, 'HTC' for Hybrid, or 'ITC' for Interval)
}
\item{dataset}{
data.frame containing the data to be used to fit the TC model
dataset should have all baseline covariates, starting with treatment (0 or 1), in the leading positions
following the baseline covariates should be in order the variables: id, start, stop, status
id is a unique number for each subject
start is the beginning of each time interval where treatment is constant
stop is the endpoint of each time interval where treatment is constant
status is an indicator (0 or 1) of an event occuring at the corresponding stop time
dataset should be ordered by start values within each level of id
for each id the first entry should have treatment=0
}
\item{cov_names}{
vector of baseline covariate names (including treatment)
}
\item{maxfollow}{
maximum followup for any subject in dataset
}
\item{nmaxint}{
maximum number of TC intervals allowed
}
\item{interval_width}{
width of the TC intervals
}
\item{min_exp_events}{
minimum number of events expected of subjects in each cell for determining ITC intervals
}
\item{min_future_events}{
minimum number of events expected of future starters(stoppers) of treatment for determining upper bound on starting(stopping) TC intervals
}
\item{nitc_fixed}{
indicator (0 or 1) that potential ITC intervals are fixed
}
\item{n_start_fixed}{
number of fixed ITC starting intervals (only applicable if nitc_fixed=1)
}
\item{n_stop_fixed}{
number of fixed ITC stopping intervals (only applicable if nitc_fixed=1)
}
\item{interval_stop_beginning}{
smallest ITC stopping interval endpoint (only applicable if nitc_fixed=1)
}
}
\value{
\item{fit }{fit of TC model}
\item{nitc_start }{number of ITC starting intervals}
\item{itc_start_endpoint }{vector containing the ITC starting interval endpoints}
\item{nitc_stop }{number of ITC stopping intervals}
\item{itc_stop_endpoint }{vector containing the ITC stopping interval endpoints}
\item{nstartint }{number of TC starting intervals}
\item{startint }{vector containing the TC starting interval endpoints}
\item{nstopint }{number of TC stopping intervals}
\item{stopint }{vector containing the TC stopping interval endpoints}
\item{cov_names }{vector containing the covariate names of the model
tstart is the cumulative constant starting term (PTC only)
tstart1 is the cumulative linear starting term (PTC only)
tstop is the cumulative constant stopping term (PTC only)
tstop1 is the cumulative linear stopping term (PTC only)
tstart0 is the cumulative constant starting term outside of ITC intervals (HTC only)
tstop0 is the cumulative constant stopping term outside of ITC intervals (HTC only)
treatstartp.# is the #'th ITC starting term (ITC and HTC only)
treatstopp.# is the #'th ITC stopping term (ITC and HTC only)}
\item{nperson }{number of subjects in dataset}
\item{numevents }{number of events in datsaet}
\item{medianfollowup }{median followup for subjects in dataset}
}
\references{
Troendle, JF, Leifer, E, Zhang Z, Yang, S, and Tewes H (2017) How to Control for Unmeasured Confounding in an Observational Time-To-Event Study With Exposure Incidence Information: the Treatment Choice Cox Model. Statistics in Medicine 36: 3654-3669.
}
\author{
James F. Troendle
}
\examples{
# Use simulated data in example.dat to build and fit a PTC model
#
require(stats)
require(survival)
cov_names=names(example.dat)[1:2]
example.dat=example.dat[1:500,]
z=tc(type="PTC", dataset=example.dat, cov_names = cov_names, min_exp_events = 5,
min_future_events = 20)
z[[1]]
#
# Use simulated data in example.dat to build and fit an HTC model
#
require(stats)
require(survival)
cov_names=names(example.dat)[1:2]
example.dat=example.dat[1:500,]
z=tc(type="HTC", dataset=example.dat, cov_names = cov_names, min_exp_events = 5,
min_future_events = 20)
z[[1]]
#
# Use simulated data in example.dat to build and fit an ITC model
#
require(stats)
require(survival)
cov_names=names(example.dat)[1:2]
example.dat=example.dat[1:500,]
z=tc(type="ITC", dataset=example.dat, cov_names = cov_names, min_exp_events = 5,
min_future_events = 20)
z[[1]]
}
\keyword{survival}
| /man/tc.Rd | no_license | cran/tccox | R | false | false | 5,305 | rd | \name{tc}
\alias{tc}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Treatment Choice Cox Model
}
\description{
Builds time-varying covariates needed and fits Treatment Choice Cox models (Parametric Treatment Choice (PTC), Hybrid Treatment Choice (HTC), or Interval Treatment Choice (ITC)) for observational time-to-event studies.
}
\usage{
tc(type = "PTC", dataset, cov_names, maxfollow=100, nmaxint = 80, interval_width = 0.1,
min_exp_events = 50, min_future_events = 50, nitc_fixed = 0, n_start_fixed = 10,
n_stop_fixed = 10, interval_stop_beginning = 1.1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{type}{
character indicating the type of TC model to be fit ('PTC' for Parametric, 'HTC' for Hybrid, or 'ITC' for Interval)
}
\item{dataset}{
data.frame containing the data to be used to fit the TC model
dataset should have all baseline covariates, starting with treatment (0 or 1), in the leading positions
following the baseline covariates should be in order the variables: id, start, stop, status
id is a unique number for each subject
start is the beginning of each time interval where treatment is constant
stop is the endpoint of each time interval where treatment is constant
status is an indicator (0 or 1) of an event occuring at the corresponding stop time
dataset should be ordered by start values within each level of id
for each id the first entry should have treatment=0
}
\item{cov_names}{
vector of baseline covariate names (including treatment)
}
\item{maxfollow}{
maximum followup for any subject in dataset
}
\item{nmaxint}{
maximum number of TC intervals allowed
}
\item{interval_width}{
width of the TC intervals
}
\item{min_exp_events}{
minimum number of events expected of subjects in each cell for determining ITC intervals
}
\item{min_future_events}{
minimum number of events expected of future starters(stoppers) of treatment for determining upper bound on starting(stopping) TC intervals
}
\item{nitc_fixed}{
indicator (0 or 1) that potential ITC intervals are fixed
}
\item{n_start_fixed}{
number of fixed ITC starting intervals (only applicable if nitc_fixed=1)
}
\item{n_stop_fixed}{
number of fixed ITC stopping intervals (only applicable if nitc_fixed=1)
}
\item{interval_stop_beginning}{
smallest ITC stopping interval endpoint (only applicable if nitc_fixed=1)
}
}
\value{
\item{fit }{fit of TC model}
\item{nitc_start }{number of ITC starting intervals}
\item{itc_start_endpoint }{vector containing the ITC starting interval endpoints}
\item{nitc_stop }{number of ITC stopping intervals}
\item{itc_stop_endpoint }{vector containing the ITC stopping interval endpoints}
\item{nstartint }{number of TC starting intervals}
\item{startint }{vector containing the TC starting interval endpoints}
\item{nstopint }{number of TC stopping intervals}
\item{stopint }{vector containing the TC stopping interval endpoints}
\item{cov_names }{vector containing the covariate names of the model
tstart is the cumulative constant starting term (PTC only)
tstart1 is the cumulative linear starting term (PTC only)
tstop is the cumulative constant stopping term (PTC only)
tstop1 is the cumulative linear stopping term (PTC only)
tstart0 is the cumulative constant starting term outside of ITC intervals (HTC only)
tstop0 is the cumulative constant stopping term outside of ITC intervals (HTC only)
treatstartp.# is the #'th ITC starting term (ITC and HTC only)
treatstopp.# is the #'th ITC stopping term (ITC and HTC only)}
\item{nperson }{number of subjects in dataset}
\item{numevents }{number of events in datsaet}
\item{medianfollowup }{median followup for subjects in dataset}
}
\references{
Troendle, JF, Leifer, E, Zhang Z, Yang, S, and Tewes H (2017) How to Control for Unmeasured Confounding in an Observational Time-To-Event Study With Exposure Incidence Information: the Treatment Choice Cox Model. Statistics in Medicine 36: 3654-3669.
}
\author{
James F. Troendle
}
\examples{
# Use simulated data in example.dat to build and fit a PTC model
#
require(stats)
require(survival)
cov_names=names(example.dat)[1:2]
example.dat=example.dat[1:500,]
z=tc(type="PTC", dataset=example.dat, cov_names = cov_names, min_exp_events = 5,
min_future_events = 20)
z[[1]]
#
# Use simulated data in example.dat to build and fit an HTC model
#
require(stats)
require(survival)
cov_names=names(example.dat)[1:2]
example.dat=example.dat[1:500,]
z=tc(type="HTC", dataset=example.dat, cov_names = cov_names, min_exp_events = 5,
min_future_events = 20)
z[[1]]
#
# Use simulated data in example.dat to build and fit an ITC model
#
require(stats)
require(survival)
cov_names=names(example.dat)[1:2]
example.dat=example.dat[1:500,]
z=tc(type="ITC", dataset=example.dat, cov_names = cov_names, min_exp_events = 5,
min_future_events = 20)
z[[1]]
}
\keyword{survival}
|
##########################################
## Pull a set of data and condition it for modeling to predict price movement.
## 2018-08-16
##########################################
# reference info
# https://wikitech.wikimedia.org/wiki/Analytics/AQS/Pageviews
#
# Put-call ratio from CBOE
# http://r.789695.n4.nabble.com/as-xts-convert-all-my-numeric-data-to-character-td975564.html
# http://www.cboe.com/publish/ScheduledTask/MktData/datahouse/totalpc.csv
#
##########################################
agg_stock_data <- function(ticker=NULL, term=NULL, save_pulls = NULL) {
# Libraries required
library(quantmod)
library(xts)
library(zoo)
library(pageviews)
library(bizdays)
library(gtrendsR)
library(devtools)
library(TTR)
thePath = "C:\\Users\\Michael\\Downloads"
today = gsub( pattern = "-", replacement="", as.character(Sys.Date()))
today = paste(today, "00", sep = "")
# TESTING VALUES Symbols to generate data on
ticker = "AAPL" #, "AMZN", "T")
term = "aapl stock"
df = getSymbols(Symbols = ticker, return.class='xts', env=NULL)
# subset to include december 2013 onward, which allows for calculating moving averages and stdev
strt = as.Date(first(grep(pattern="2013-12-0+",index(df), value = TRUE)), format = "%Y-%m-%d")
end = nrow(df)
strt = which(index(df)==strt)
# df = index(df)[strt:nrow(df)] # here's how to grab the index only
# df = df[index(df)[strt:end],]
df = df[strt:end,]
# Fix the names of the standard object returned
colnames(df) = c("open","high","low","close","volume","adj.")
# head(df)
##### ______________________ Compute Stock Technicals _____________________________#######
# __________________________________________________________________________#
df$EMA3 = EMA(df$adj, n=3) # thesis
df$EMA5 = EMA(df$adj, n=5) # thesis
df$EMA10 = EMA(df$adj, n=10) # thesis
df$WPR = WPR(df[,c("high", "low", "close")])
df$RSI.10 = RSI(df$adj, n=10, maType = "EMA")
df$RSI.5 = RSI(df$adj, n=5, maType = "EMA")
df$MACD.12.26.EMA = MACD(df$adj, nFast = 12, nSlow = 26, nSig = 9, maType = "EMA")
df$stoch.3 = stoch(df[,c('high','low','close')],
n.fastK=14,
ma.fastD=list("SMA", n=3),
ma.slowD=list("SMA", n=3))
df$stoch.def = stoch(df[,c('high','low','close')])
df$ROC.3 = ROC(df$adj, n=3)
df$ROC.5 = ROC(df$adj, n=5)
df$ROC.10 = ROC(df$adj, n=10)
# Volumetric calculations
df$Vol.ROC.1 = ROC(df$volume, n=1)
df$Vol.ROC.3 = ROC(df$volume, n=3)
df$Vol.ROC.5 = ROC(df$volume, n=5)
df$Vol.ROC.10 = ROC(df$volume, n=10)
df$Vol.RSI.3 = RSI(df$volume, n=3, maType = "EMA")
df$Vol.RSI.5 = RSI(df$volume, n=5, maType = "EMA")
df$Vol.RSI.10 = RSI(df$volume, n=10, maType = "EMA")
##### ______________________ Wikipedia Counts __________________________________#####
# __________________________________________________________________________#
wikicnts = article_pageviews(project = "en.wikipedia",
article = "AAPL",
platform = "all",
user_type = "all",
start = "2015100100",
end = today,
reformat = TRUE)[,7:8] #col 7 & 8 have date and count
wikicnts = xts(as.numeric(wikicnts[,-1]),
order.by = as.Date(wikicnts[,'date'],
format="%Y-%m-%d") )
colnames(wikicnts) = "wikicnts"
# get ROC for wiki.views
wikicnts$wiki.ROC.1 = ROC(wikicnts$wikicnts, n=1, na.pad = TRUE)
wikicnts$wiki.ROC.3 = ROC(wikicnts$wikicnts, n=3, na.pad = TRUE)
# get RSI for wiki views
wikicnts$wiki.RSI.3 = RSI(wikicnts$wikicnts, n=3, maType = "EMA")
wikicnts$wiki.RSI.5 = RSI(wikicnts$wikicnts, n=5, maType = "EMA")
# tail(wikicnts,25)
# str(wikicnts)
# if(save_pulls == TRUE){
# wikiname = paste(thePath, Sys.Date(),"_WikiCnts.csv", sep = "")
# write.zoo(wikicnts, wikiname, sep=",", index.name = "Date", col.names = TRUE)
# }
##### Merge the wikicnts with the df dataframe of stock data ____________________#####
df = merge(df, wikicnts) # join='left') THIS SHOULD BE GOOD NOW!
# tail(df,40)
##### Pull Google Trends Data for Ticker #################################################
# https://github.com/PMassicotte/gtrendsR/issues/252
# The library to pull from google trends is a dev library that you install from here
# devtools::install_github("PMassicotte/gtrendsR")
library(gtrendsR)
#######################################################################
trendHelper <- function(keyword = NULL, geo = NULL, timeStart = NULL, timeEnd = NULL, gprop = NULL) {
dateStart <- as.Date(timeStart)
dateEnd <- as.Date(timeEnd)
curDate <- dateStart
dftemp = data.frame(date= character(), hits = numeric(), stringsAsFactors = FALSE)
while(curDate <= dateEnd) {
trend = gtrends(keyword = term,
geo = geo,
time = paste(curDate, curDate+90, sep=" "), # "today+5-y", #"today 1-m"
gprop = gprop, #c("web","news"),
# category = 0,
hl = "en-US",
low_search_volume = FALSE)
curDate <- curDate + 90
dfNew = data.frame(trend$interest_over_time$date, trend$interest_over_time$hits, stringsAsFactors = FALSE)
dftemp <- rbind(dftemp, dfNew)
print(curDate)
}
trend <- gtrends(keyword = term,
geo = geo,
time = paste(curDate, Sys.Date(), sep=" "), # "today+5-y", #"today 1-m"
gprop = gprop, #c("web","news"),
# category = 0,
hl = "en-US",
low_search_volume = FALSE)
dfNew = data.frame(trend$interest_over_time$date, trend$interest_over_time$hits, stringsAsFactors = FALSE)
dftemp <- rbind(dftemp, dfNew)
}
################################################################
trend = trendHelper(term, "US", "2015-01-01", "2015-10-01", "web")
# trend = gtrends(keyword = term,
# geo = "US",
# time = paste("2015-01-01", Sys.Date(), sep=" "), # "today+5-y", #"today 1-m"
# gprop = "web", #c("web","news"),
# # category = 0,
# hl = "en-US",
# low_search_volume = FALSE)
# head(trend$interest_over_time$hits)
# This works. Appears that the gprop cannot take more than one arg (e.g. it cannot
# be c("news", "web"))
# gtrend = gtrends("insomnia",
# gprop = 'web', #c("news"), #test case used 'news'
# geo = "US",
# time = "today 1-m")
##### WRITE THE GTREND DATA TO FILE
if(save_pulls=TRUE){
gtrendfile = trend$interest_over_time
gtrendname = paste(thePath, Sys.Date(),"_GTREND_Data.csv", sep = "")
write.table(gtrendfile, gtrendname, sep=",", col.names = TRUE, row.names = FALSE)
}
##### create a sequence to capture the time series for all dates in the query #####
trend$interest_over_time$hits = as.numeric(trend$interest_over_time$hits)
trnd.hits = xts(as.numeric(trend$interest_over_time$hits), order.by = as.Date(trend$interest_over_time$date, format="%Y-%m-%d"))
colnames(trnd.hits) = "gtrnd"
trnd.hits$gtrnd = trnd.hits$gtrnd/100
trnd.hits$gtrnd_ROC = Delt(trnd.hits$gtrnd, k=1)
# str(trnd.hits)
# tail(trnd.hits)
# write a time series that covers all dates from the beginning of the trend data
tr.series = seq.Date(from=(as.Date(trend$interest_over_time$date[1], format="%Y-%m-%d"))-7, to=Sys.Date(),by="day")
tr.series = data.frame(date=tr.series, gtrnd=0)
tr.series = xts(as.numeric(tr.series$gtrnd), order.by = as.Date(tr.series$date, format="%Y-%m-%d"))
colnames(tr.series) = "gtrnd"
tail(tr.series)
# tr.series$gtrnd = tr.series$gtrnd + trnd.hits$gtrnd
trend.series = merge(tr.series$gtrnd, trnd.hits$gtrnd)[,2]
trend.series = merge(trend.series, trnd.hits$gtrnd_ROC)
trend.series[is.na(trend.series)] = 0
colnames(trend.series) = c("g_hits", "g_hits_ROC")
# tr.series[ind,] = trnd.hits[ind,]
# str(tr.series)
# get summary info for gtrend
###### get related query summary data ###########################################
rel_query = unlist(trend$related_queries$value)
print(paste("Number of related queries for symbol: ", length(rel_query), sep = ""))
print(rel_query)
# sumdf = summary(trend)
##### DO NOT USE RIGHT NOW ################
# get the data for scaled hits for the keyword in gtrend
# time.trend = trend$interest_over_time[,c(1,2)]
# turn the trend data into an extensible time series (xts)
# time.trend = xts(time.trend[,2], order.by = as.Date(time.trend$date, format="%Y-%m-%d"))
# call the new column
# colnames(time.trend) = "g_hits"
# class(time.trend)
# head(time.trend)
# ifelse(summary(trend$interest_by_country)==0, trend$interest_by_country=NULL)
# summary(time.trend)
# class(time.trend)
# scale the hits value to be a percentage
# time.trend$g_hits = time.trend$g_hits/100
# calculate the rate of change for k=1 period lookback (because google trend data is provided
# only weekly for queries of a year or more k=1 is the best resolution you can get). I could not
# find a way to get data at a daily rate for queries spanning more than a few days/a week.
# time.trend$goog_wk_ROC = Delt(time.trend$g_hits, k=1, type="log")
# head(time.trend)
####_______________________MERGE Google Trend Data to Dataframe _________________________#####
# this does a left join which maps the weekly google trend data into the xts df
# and creates a bunch of NAs in between the actual values
# Merge the raw hits data into df first (this will have it in the data file if there's more to do
# with it later without having to run this function).
df = merge.xts(df, trend.series) # join = "left") DO NOT LEFT JOIN AS IT REMOVES THE SUNDAYS!!!
tail(df,40)
# Merge the ROC data next
# df = merge.xts(df, time.trend$goog_wk_ROC) #, join = "left")
# parse thru the zero values and fill in delta value for the previous week
# na.idx = which(is.na(df$goog_wk_ROC)) # get values which are NA in the merged data frame
val.idx = which(!df$g_hits_ROC==0) #get values that are not ZEROS
for (i in 1:(length(val.idx)-1)){
loc.0 = val.idx[i] + 1
loc.1 = val.idx[i+1] - 1
prev.wk.delta = loc.1 + 1
# print(loc.1-loc.0)
# check to make sure the range between values is a week or less
if((loc.1+1 - loc.0-1 <= 7)){ # check to make sure the range in dates is not greater than a week
# print(paste("good to go",i,sep=""))
df$g_hits_ROC[loc.0:loc.1] = df$g_hits_ROC[prev.wk.delta]
} else {}
} #end for
##### Pull Google Trend Data for Mortgage Data ######################################
mrtg.trend = gtrends(keyword = "mortgage rates",
geo = "US",
time = paste("2015-01-01", Sys.Date(), sep=" "), #"today+5-y",
gprop = "web",
hl = "en-US",
low_search_volume = FALSE)
##### create a sequence to capture the time series for all dates in the query #####
mrtg.trnd.hits = xts(as.numeric(mrtg.trend$interest_over_time$hits),
order.by = as.Date(mrtg.trend$interest_over_time$date,
format="%Y-%m-%d"))
colnames(mrtg.trnd.hits) = "gtrnd.mrtg"
mrtg.trnd.hits$gtrnd.mrtg = mrtg.trnd.hits$gtrnd.mrtg/100
# Calculate Rate of Change for the trend
mrtg.trnd.hits$gtrnd.mrtg_ROC = Delt(mrtg.trnd.hits$gtrnd.mrtg, k=1)
tail(mrtg.trnd.hits,20)
# ##### Merrge Mortgage Trend Data into df ###############################
df = merge.xts(df, mrtg.trnd.hits) # join = "left") DO NOT LEFT JOIN AS IT REMOVES THE SUNDAYS!!!
df$gtrnd.mrtg[is.na(tdf$gtrnd.mrtg)] = 0
df$gtrnd.mrtg_ROC[is.na(tdf$gtrnd.mrtg_ROC)] = 0
tail(df,40)
# Clean up the zeroes
val.idx = which(!df$gtrnd.mrtg_ROC==0) #get values that are not ZEROS
for (i in 1:(length(val.idx)-1)){
loc.0 = val.idx[i] + 1
loc.1 = val.idx[i+1] - 1
prev.wk.delta = loc.1 + 1
# check to make sure the range between values is a week or less
if((loc.1+1 - loc.0-1 <= 7)){ # check to make sure the range in dates is not greater than a week
# print(paste("good to go",i,sep=""))
df$gtrnd.mrtg_ROC[loc.0:loc.1] = df$gtrnd.mrtg_ROC[prev.wk.delta]
df$gtrnd.mrtg[loc.0:loc.1] = df$gtrnd.mrtg[prev.wk.delta]
} else {}
} #end for
##### __________________________ CALCULATE TARGETS ______________________________________ #####
# Calculate the rolling standard deviation
# tdf = df
df$Delt.1 = Delt(df$adj., k=1, type = "log") # % daily returns
# df$Delt.5 = Delt(tdf$adj., k=5, type = "log") # % return on five day lag
df$sd.month.d1 = NA # create column for month standard deviation
df$sd.qrtr.d1 = NA # create column for quarter standard deviation
df$sd.annual.d1 = NA # create column for annual standard deviation
# biz day calculations, so i don't forget
252/12
252/4 # one quarter in business days
# USE k=1 DELTS OF RETURNS FOR CALCULATIONS -------------------------------------------------
# Calculate the rolling std. dev. for the previous MONTH's time
for (i in 22:length(df$adj.)){
lookback = i-21
df$sd.month.d1[i] = sd(df$Delt.1[lookback:i-1], na.rm = TRUE)
}
# Calculate the rolling std. dev. for the previous QUARTER's time
for (i in 64:length(df$adj.)){
lookback = i-63
df$sd.qrtr.d1[i] = sd(df$Delt.1[lookback:i-1], na.rm = TRUE)
}
# Calculate the rolling std. dev. for the previous YEAR's time
for (i in 253:length(df$adj.)){
lookback = i-252
df$sd.annual.d1[i] = sd(df$Delt.1[lookback:i-1], na.rm = TRUE)
}
# take a look at results for comparison
tail(df[,c('sd.month.d1','sd.qrtr.d1','sd.annual.d1')])
# comparing these numbers, there might be some room to develop an attribute using the ratio
# between them as a momentum indicator. It may be redundant to other momentum indicators.
# USE k=5 DELTS OF RETURNS FOR CALCULATIONS (NOT USED CURRENTLY) -------------------------------------------------
# THIS WAS LEFT OUT DELIBERATELY AS IT ADDS COMPLEXITY IN MODELING RIGHT NOW AND SEEMS TO
# BE ATYPICAL WITH COMMON METHODS. IT CAN UNCOMMENTED AND APPLIED IF NECESSARY
#_____________________________________________________________________________________
# df$sd.month.d5 = NA # create column for month standard deviation
# df$sd.qrtr.d5 = NA # create column for quarter standard deviation
# df$sd.annual.d5 = NA # create column for annual standard deviation
#
# # Calculate the rolling std. dev. for the previous month's time
# for (i in 22:length(df$close)){
# lookback = i-21
# df$sd.month.d5[i] = sd(df$Delt.5[lookback:i-1], na.rm = TRUE)
# }
#
# # Calculate the rolling std. dev. for the previous quarter's time
# for (i in 64:length(df$close)){
# lookback = i-63
# df$sd.qrtr.d5[i] = sd(df$Delt.5[lookback:i-1], na.rm = TRUE)
# }
#
# # Calculate the rolling std. dev. for the previous year's time
# for (i in 253:length(df$close)){
# lookback = i-252
# df$sd.annual.d5[i] = sd(df$Delt.5[lookback:i-1], na.rm = TRUE)
# }
#
# tail(df[,c('Delt.1', 'sd.month.d5','sd.qrtr.d5','sd.annual.d5')])
####______________________GENERATE RESPONSE FOR 1,2 STDEVs____________________________####
# identify the price movements as crossing the threshold for ONE std. dev.
df$Resp.1.std = 0
df$Resp.2.std = 0
df$Resp.1.std = ifelse(df$Delt.1 >= df$sd.month.d1, 1, 0)
df$Resp.2.std = ifelse(df$Delt.1 >= 2*(df$sd.month.d1), 1, 0)
# df$Resp.1.std.ann = ifelse(df$Delt.1 >= df$sd.qrtr.d1, 1, 0)
# calculate the forward movement by stdev forward 1, 3, 5, 10 days
##### calculate the binary variables for price change >= 2 std.dev in FIVE days forward #####
df$Resp.5d.2sd=rep(0, nrow(df)) # fill the column with zeros
# tr = c(0, 1,NA,0,2)
for(i in 1:(nrow(df)-5)){
# tval = df$Delt.1[i]
bdays = df$Resp.2.std[(i+1):(i+5)]
if (sum(bdays, na.rm=TRUE)>0){
df$Resp.5d.2sd[i] = 1
# ifelse(i%%5==0,print(i),"")
}
else{df$Resp.5d.2sd[i] = 0}
} # endfor
##### calculate the binary variables for price change >= 2 std.dev TEN days forward #####
df$Resp.10d.2sd = rep(0, nrow(df)) #fill the column with zeros
for(i in 1:(nrow(df)-10)){
# tval = df$Delt.1[i]
bdays = df$Resp.2.std[(i+6):(i+10)]
if (sum(bdays, na.rm=TRUE) > 0){
df$Resp.10d.2sd[i]=1
# ifelse(i%%5==0,print(i),"")
}
else{df$Resp.10d.2sd[i] = 0}
} # endfor
# sd2ind = which(df$Resp.2.std==1)
# df[70:85,c('adj.', 'Delt.1', 'Resp.2.std', 'sd.month.d1','Resp.5d.2sd')]
##### REMOVE NAs FROM THE DATA TABLE ####################################################
nrow(df)
df = na.omit(df)
####_____________________________ Write the file ____________________________________#####
write.zoo(df,
paste(thePath, Sys.Date(), "_AGGREGATE_", ticker, ".csv", sep=""),
sep=",",
row.names=FALSE)
} #END FUNCTION | /Final/finalScript.R | no_license | micfloy/OS4118 | R | false | false | 17,542 | r | ##########################################
## Pull a set of data and condition it for modeling to predict price movement.
## 2018-08-16
##########################################
# reference info
# https://wikitech.wikimedia.org/wiki/Analytics/AQS/Pageviews
#
# Put-call ratio from CBOE
# http://r.789695.n4.nabble.com/as-xts-convert-all-my-numeric-data-to-character-td975564.html
# http://www.cboe.com/publish/ScheduledTask/MktData/datahouse/totalpc.csv
#
##########################################
agg_stock_data <- function(ticker=NULL, term=NULL, save_pulls = NULL) {
# Libraries required
library(quantmod)
library(xts)
library(zoo)
library(pageviews)
library(bizdays)
library(gtrendsR)
library(devtools)
library(TTR)
thePath = "C:\\Users\\Michael\\Downloads"
today = gsub( pattern = "-", replacement="", as.character(Sys.Date()))
today = paste(today, "00", sep = "")
# TESTING VALUES Symbols to generate data on
ticker = "AAPL" #, "AMZN", "T")
term = "aapl stock"
df = getSymbols(Symbols = ticker, return.class='xts', env=NULL)
# subset to include december 2013 onward, which allows for calculating moving averages and stdev
strt = as.Date(first(grep(pattern="2013-12-0+",index(df), value = TRUE)), format = "%Y-%m-%d")
end = nrow(df)
strt = which(index(df)==strt)
# df = index(df)[strt:nrow(df)] # here's how to grab the index only
# df = df[index(df)[strt:end],]
df = df[strt:end,]
# Fix the names of the standard object returned
colnames(df) = c("open","high","low","close","volume","adj.")
# head(df)
##### ______________________ Compute Stock Technicals _____________________________#######
# __________________________________________________________________________#
df$EMA3 = EMA(df$adj, n=3) # thesis
df$EMA5 = EMA(df$adj, n=5) # thesis
df$EMA10 = EMA(df$adj, n=10) # thesis
df$WPR = WPR(df[,c("high", "low", "close")])
df$RSI.10 = RSI(df$adj, n=10, maType = "EMA")
df$RSI.5 = RSI(df$adj, n=5, maType = "EMA")
df$MACD.12.26.EMA = MACD(df$adj, nFast = 12, nSlow = 26, nSig = 9, maType = "EMA")
df$stoch.3 = stoch(df[,c('high','low','close')],
n.fastK=14,
ma.fastD=list("SMA", n=3),
ma.slowD=list("SMA", n=3))
df$stoch.def = stoch(df[,c('high','low','close')])
df$ROC.3 = ROC(df$adj, n=3)
df$ROC.5 = ROC(df$adj, n=5)
df$ROC.10 = ROC(df$adj, n=10)
# Volumetric calculations
df$Vol.ROC.1 = ROC(df$volume, n=1)
df$Vol.ROC.3 = ROC(df$volume, n=3)
df$Vol.ROC.5 = ROC(df$volume, n=5)
df$Vol.ROC.10 = ROC(df$volume, n=10)
df$Vol.RSI.3 = RSI(df$volume, n=3, maType = "EMA")
df$Vol.RSI.5 = RSI(df$volume, n=5, maType = "EMA")
df$Vol.RSI.10 = RSI(df$volume, n=10, maType = "EMA")
##### ______________________ Wikipedia Counts __________________________________#####
# __________________________________________________________________________#
wikicnts = article_pageviews(project = "en.wikipedia",
article = "AAPL",
platform = "all",
user_type = "all",
start = "2015100100",
end = today,
reformat = TRUE)[,7:8] #col 7 & 8 have date and count
wikicnts = xts(as.numeric(wikicnts[,-1]),
order.by = as.Date(wikicnts[,'date'],
format="%Y-%m-%d") )
colnames(wikicnts) = "wikicnts"
# get ROC for wiki.views
wikicnts$wiki.ROC.1 = ROC(wikicnts$wikicnts, n=1, na.pad = TRUE)
wikicnts$wiki.ROC.3 = ROC(wikicnts$wikicnts, n=3, na.pad = TRUE)
# get RSI for wiki views
wikicnts$wiki.RSI.3 = RSI(wikicnts$wikicnts, n=3, maType = "EMA")
wikicnts$wiki.RSI.5 = RSI(wikicnts$wikicnts, n=5, maType = "EMA")
# tail(wikicnts,25)
# str(wikicnts)
# if(save_pulls == TRUE){
# wikiname = paste(thePath, Sys.Date(),"_WikiCnts.csv", sep = "")
# write.zoo(wikicnts, wikiname, sep=",", index.name = "Date", col.names = TRUE)
# }
##### Merge the wikicnts with the df dataframe of stock data ____________________#####
df = merge(df, wikicnts) # join='left') THIS SHOULD BE GOOD NOW!
# tail(df,40)
##### Pull Google Trends Data for Ticker #################################################
# https://github.com/PMassicotte/gtrendsR/issues/252
# The library to pull from google trends is a dev library that you install from here
# devtools::install_github("PMassicotte/gtrendsR")
library(gtrendsR)
#######################################################################
trendHelper <- function(keyword = NULL, geo = NULL, timeStart = NULL, timeEnd = NULL, gprop = NULL) {
dateStart <- as.Date(timeStart)
dateEnd <- as.Date(timeEnd)
curDate <- dateStart
dftemp = data.frame(date= character(), hits = numeric(), stringsAsFactors = FALSE)
while(curDate <= dateEnd) {
trend = gtrends(keyword = term,
geo = geo,
time = paste(curDate, curDate+90, sep=" "), # "today+5-y", #"today 1-m"
gprop = gprop, #c("web","news"),
# category = 0,
hl = "en-US",
low_search_volume = FALSE)
curDate <- curDate + 90
dfNew = data.frame(trend$interest_over_time$date, trend$interest_over_time$hits, stringsAsFactors = FALSE)
dftemp <- rbind(dftemp, dfNew)
print(curDate)
}
trend <- gtrends(keyword = term,
geo = geo,
time = paste(curDate, Sys.Date(), sep=" "), # "today+5-y", #"today 1-m"
gprop = gprop, #c("web","news"),
# category = 0,
hl = "en-US",
low_search_volume = FALSE)
dfNew = data.frame(trend$interest_over_time$date, trend$interest_over_time$hits, stringsAsFactors = FALSE)
dftemp <- rbind(dftemp, dfNew)
}
################################################################
trend = trendHelper(term, "US", "2015-01-01", "2015-10-01", "web")
# trend = gtrends(keyword = term,
# geo = "US",
# time = paste("2015-01-01", Sys.Date(), sep=" "), # "today+5-y", #"today 1-m"
# gprop = "web", #c("web","news"),
# # category = 0,
# hl = "en-US",
# low_search_volume = FALSE)
# head(trend$interest_over_time$hits)
# This works. Appears that the gprop cannot take more than one arg (e.g. it cannot
# be c("news", "web"))
# gtrend = gtrends("insomnia",
# gprop = 'web', #c("news"), #test case used 'news'
# geo = "US",
# time = "today 1-m")
##### WRITE THE GTREND DATA TO FILE
if(save_pulls=TRUE){
gtrendfile = trend$interest_over_time
gtrendname = paste(thePath, Sys.Date(),"_GTREND_Data.csv", sep = "")
write.table(gtrendfile, gtrendname, sep=",", col.names = TRUE, row.names = FALSE)
}
##### create a sequence to capture the time series for all dates in the query #####
trend$interest_over_time$hits = as.numeric(trend$interest_over_time$hits)
trnd.hits = xts(as.numeric(trend$interest_over_time$hits), order.by = as.Date(trend$interest_over_time$date, format="%Y-%m-%d"))
colnames(trnd.hits) = "gtrnd"
trnd.hits$gtrnd = trnd.hits$gtrnd/100
trnd.hits$gtrnd_ROC = Delt(trnd.hits$gtrnd, k=1)
# str(trnd.hits)
# tail(trnd.hits)
# write a time series that covers all dates from the beginning of the trend data
tr.series = seq.Date(from=(as.Date(trend$interest_over_time$date[1], format="%Y-%m-%d"))-7, to=Sys.Date(),by="day")
tr.series = data.frame(date=tr.series, gtrnd=0)
tr.series = xts(as.numeric(tr.series$gtrnd), order.by = as.Date(tr.series$date, format="%Y-%m-%d"))
colnames(tr.series) = "gtrnd"
tail(tr.series)
# tr.series$gtrnd = tr.series$gtrnd + trnd.hits$gtrnd
trend.series = merge(tr.series$gtrnd, trnd.hits$gtrnd)[,2]
trend.series = merge(trend.series, trnd.hits$gtrnd_ROC)
trend.series[is.na(trend.series)] = 0
colnames(trend.series) = c("g_hits", "g_hits_ROC")
# tr.series[ind,] = trnd.hits[ind,]
# str(tr.series)
# get summary info for gtrend
###### get related query summary data ###########################################
rel_query = unlist(trend$related_queries$value)
print(paste("Number of related queries for symbol: ", length(rel_query), sep = ""))
print(rel_query)
# sumdf = summary(trend)
##### DO NOT USE RIGHT NOW ################
# get the data for scaled hits for the keyword in gtrend
# time.trend = trend$interest_over_time[,c(1,2)]
# turn the trend data into an extensible time series (xts)
# time.trend = xts(time.trend[,2], order.by = as.Date(time.trend$date, format="%Y-%m-%d"))
# call the new column
# colnames(time.trend) = "g_hits"
# class(time.trend)
# head(time.trend)
# ifelse(summary(trend$interest_by_country)==0, trend$interest_by_country=NULL)
# summary(time.trend)
# class(time.trend)
# scale the hits value to be a percentage
# time.trend$g_hits = time.trend$g_hits/100
# calculate the rate of change for k=1 period lookback (because google trend data is provided
# only weekly for queries of a year or more k=1 is the best resolution you can get). I could not
# find a way to get data at a daily rate for queries spanning more than a few days/a week.
# time.trend$goog_wk_ROC = Delt(time.trend$g_hits, k=1, type="log")
# head(time.trend)
####_______________________MERGE Google Trend Data to Dataframe _________________________#####
# this does a left join which maps the weekly google trend data into the xts df
# and creates a bunch of NAs in between the actual values
# Merge the raw hits data into df first (this will have it in the data file if there's more to do
# with it later without having to run this function).
df = merge.xts(df, trend.series) # join = "left") DO NOT LEFT JOIN AS IT REMOVES THE SUNDAYS!!!
tail(df,40)
# Merge the ROC data next
# df = merge.xts(df, time.trend$goog_wk_ROC) #, join = "left")
# parse thru the zero values and fill in delta value for the previous week
# na.idx = which(is.na(df$goog_wk_ROC)) # get values which are NA in the merged data frame
val.idx = which(!df$g_hits_ROC==0) #get values that are not ZEROS
for (i in 1:(length(val.idx)-1)){
loc.0 = val.idx[i] + 1
loc.1 = val.idx[i+1] - 1
prev.wk.delta = loc.1 + 1
# print(loc.1-loc.0)
# check to make sure the range between values is a week or less
if((loc.1+1 - loc.0-1 <= 7)){ # check to make sure the range in dates is not greater than a week
# print(paste("good to go",i,sep=""))
df$g_hits_ROC[loc.0:loc.1] = df$g_hits_ROC[prev.wk.delta]
} else {}
} #end for
##### Pull Google Trend Data for Mortgage Data ######################################
mrtg.trend = gtrends(keyword = "mortgage rates",
geo = "US",
time = paste("2015-01-01", Sys.Date(), sep=" "), #"today+5-y",
gprop = "web",
hl = "en-US",
low_search_volume = FALSE)
##### create a sequence to capture the time series for all dates in the query #####
mrtg.trnd.hits = xts(as.numeric(mrtg.trend$interest_over_time$hits),
order.by = as.Date(mrtg.trend$interest_over_time$date,
format="%Y-%m-%d"))
colnames(mrtg.trnd.hits) = "gtrnd.mrtg"
mrtg.trnd.hits$gtrnd.mrtg = mrtg.trnd.hits$gtrnd.mrtg/100
# Calculate Rate of Change for the trend
mrtg.trnd.hits$gtrnd.mrtg_ROC = Delt(mrtg.trnd.hits$gtrnd.mrtg, k=1)
tail(mrtg.trnd.hits,20)
# ##### Merrge Mortgage Trend Data into df ###############################
df = merge.xts(df, mrtg.trnd.hits) # join = "left") DO NOT LEFT JOIN AS IT REMOVES THE SUNDAYS!!!
df$gtrnd.mrtg[is.na(tdf$gtrnd.mrtg)] = 0
df$gtrnd.mrtg_ROC[is.na(tdf$gtrnd.mrtg_ROC)] = 0
tail(df,40)
# Clean up the zeroes
val.idx = which(!df$gtrnd.mrtg_ROC==0) #get values that are not ZEROS
for (i in 1:(length(val.idx)-1)){
loc.0 = val.idx[i] + 1
loc.1 = val.idx[i+1] - 1
prev.wk.delta = loc.1 + 1
# check to make sure the range between values is a week or less
if((loc.1+1 - loc.0-1 <= 7)){ # check to make sure the range in dates is not greater than a week
# print(paste("good to go",i,sep=""))
df$gtrnd.mrtg_ROC[loc.0:loc.1] = df$gtrnd.mrtg_ROC[prev.wk.delta]
df$gtrnd.mrtg[loc.0:loc.1] = df$gtrnd.mrtg[prev.wk.delta]
} else {}
} #end for
##### __________________________ CALCULATE TARGETS ______________________________________ #####
# Calculate the rolling standard deviation
# tdf = df
df$Delt.1 = Delt(df$adj., k=1, type = "log") # % daily returns
# df$Delt.5 = Delt(tdf$adj., k=5, type = "log") # % return on five day lag
df$sd.month.d1 = NA # create column for month standard deviation
df$sd.qrtr.d1 = NA # create column for quarter standard deviation
df$sd.annual.d1 = NA # create column for annual standard deviation
# biz day calculations, so i don't forget
252/12
252/4 # one quarter in business days
# USE k=1 DELTS OF RETURNS FOR CALCULATIONS -------------------------------------------------
# Calculate the rolling std. dev. for the previous MONTH's time
for (i in 22:length(df$adj.)){
lookback = i-21
df$sd.month.d1[i] = sd(df$Delt.1[lookback:i-1], na.rm = TRUE)
}
# Calculate the rolling std. dev. for the previous QUARTER's time
for (i in 64:length(df$adj.)){
lookback = i-63
df$sd.qrtr.d1[i] = sd(df$Delt.1[lookback:i-1], na.rm = TRUE)
}
# Calculate the rolling std. dev. for the previous YEAR's time
for (i in 253:length(df$adj.)){
lookback = i-252
df$sd.annual.d1[i] = sd(df$Delt.1[lookback:i-1], na.rm = TRUE)
}
# take a look at results for comparison
tail(df[,c('sd.month.d1','sd.qrtr.d1','sd.annual.d1')])
# comparing these numbers, there might be some room to develop an attribute using the ratio
# between them as a momentum indicator. It may be redundant to other momentum indicators.
# USE k=5 DELTS OF RETURNS FOR CALCULATIONS (NOT USED CURRENTLY) -------------------------------------------------
# THIS WAS LEFT OUT DELIBERATELY AS IT ADDS COMPLEXITY IN MODELING RIGHT NOW AND SEEMS TO
# BE ATYPICAL WITH COMMON METHODS. IT CAN UNCOMMENTED AND APPLIED IF NECESSARY
#_____________________________________________________________________________________
# df$sd.month.d5 = NA # create column for month standard deviation
# df$sd.qrtr.d5 = NA # create column for quarter standard deviation
# df$sd.annual.d5 = NA # create column for annual standard deviation
#
# # Calculate the rolling std. dev. for the previous month's time
# for (i in 22:length(df$close)){
# lookback = i-21
# df$sd.month.d5[i] = sd(df$Delt.5[lookback:i-1], na.rm = TRUE)
# }
#
# # Calculate the rolling std. dev. for the previous quarter's time
# for (i in 64:length(df$close)){
# lookback = i-63
# df$sd.qrtr.d5[i] = sd(df$Delt.5[lookback:i-1], na.rm = TRUE)
# }
#
# # Calculate the rolling std. dev. for the previous year's time
# for (i in 253:length(df$close)){
# lookback = i-252
# df$sd.annual.d5[i] = sd(df$Delt.5[lookback:i-1], na.rm = TRUE)
# }
#
# tail(df[,c('Delt.1', 'sd.month.d5','sd.qrtr.d5','sd.annual.d5')])
####______________________GENERATE RESPONSE FOR 1,2 STDEVs____________________________####
# identify the price movements as crossing the threshold for ONE std. dev.
df$Resp.1.std = 0
df$Resp.2.std = 0
df$Resp.1.std = ifelse(df$Delt.1 >= df$sd.month.d1, 1, 0)
df$Resp.2.std = ifelse(df$Delt.1 >= 2*(df$sd.month.d1), 1, 0)
# df$Resp.1.std.ann = ifelse(df$Delt.1 >= df$sd.qrtr.d1, 1, 0)
# calculate the forward movement by stdev forward 1, 3, 5, 10 days
##### calculate the binary variables for price change >= 2 std.dev in FIVE days forward #####
df$Resp.5d.2sd=rep(0, nrow(df)) # fill the column with zeros
# tr = c(0, 1,NA,0,2)
for(i in 1:(nrow(df)-5)){
# tval = df$Delt.1[i]
bdays = df$Resp.2.std[(i+1):(i+5)]
if (sum(bdays, na.rm=TRUE)>0){
df$Resp.5d.2sd[i] = 1
# ifelse(i%%5==0,print(i),"")
}
else{df$Resp.5d.2sd[i] = 0}
} # endfor
##### calculate the binary variables for price change >= 2 std.dev TEN days forward #####
df$Resp.10d.2sd = rep(0, nrow(df)) #fill the column with zeros
for(i in 1:(nrow(df)-10)){
# tval = df$Delt.1[i]
bdays = df$Resp.2.std[(i+6):(i+10)]
if (sum(bdays, na.rm=TRUE) > 0){
df$Resp.10d.2sd[i]=1
# ifelse(i%%5==0,print(i),"")
}
else{df$Resp.10d.2sd[i] = 0}
} # endfor
# sd2ind = which(df$Resp.2.std==1)
# df[70:85,c('adj.', 'Delt.1', 'Resp.2.std', 'sd.month.d1','Resp.5d.2sd')]
##### REMOVE NAs FROM THE DATA TABLE ####################################################
nrow(df)
df = na.omit(df)
####_____________________________ Write the file ____________________________________#####
write.zoo(df,
paste(thePath, Sys.Date(), "_AGGREGATE_", ticker, ".csv", sep=""),
sep=",",
row.names=FALSE)
} #END FUNCTION |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lagmatrix.R
\name{lagmatrix}
\alias{lagmatrix}
\title{Creates Lagmatrix of Estimated Coefficients}
\usage{
lagmatrix(fit, model, returnplot = F)
}
\arguments{
\item{fit}{Fitted VAR, VARX or VARMA model.}
\item{model}{Type of model that was estimated: VAR, VARX or VARMA.}
\item{returnplot}{TRUE or FALSE: return plot of lag matrix or not.}
}
\value{
A list with estimated lag matrix of the VAR model, or lag matrices of the VARX or VARMA model. The rows contain the responses, the columns contain the predictors.
}
\description{
Creates Lagmatrix of Estimated Coefficients
}
\examples{
data(Y)
data(X)
VARXfit <- sparseVARX(Y=Y, X=X) # sparse VARX
Lhats <- lagmatrix(fit=VARXfit, model="VARX")
}
| /man/lagmatrix.Rd | no_license | cuptea/bigtime | R | false | true | 776 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lagmatrix.R
\name{lagmatrix}
\alias{lagmatrix}
\title{Creates Lagmatrix of Estimated Coefficients}
\usage{
lagmatrix(fit, model, returnplot = F)
}
\arguments{
\item{fit}{Fitted VAR, VARX or VARMA model.}
\item{model}{Type of model that was estimated: VAR, VARX or VARMA.}
\item{returnplot}{TRUE or FALSE: return plot of lag matrix or not.}
}
\value{
A list with estimated lag matrix of the VAR model, or lag matrices of the VARX or VARMA model. The rows contain the responses, the columns contain the predictors.
}
\description{
Creates Lagmatrix of Estimated Coefficients
}
\examples{
data(Y)
data(X)
VARXfit <- sparseVARX(Y=Y, X=X) # sparse VARX
Lhats <- lagmatrix(fit=VARXfit, model="VARX")
}
|
library("pheatmap")
heatMapRoots <- function(table,clusterCut){
table[table==0] <- Inf
logMinValue <- log(min(table))
table <- log10(table)
table <- table + abs(logMinValue)
table[table==Inf] <- 0
if (length(row.names(table))<=55) {
if (clusterCut == 0) {
pheatmap(table,clustering_method = "ward.D2", cluster_cols = F, cluster_rows = F)
}else{
pheatmap(table, cutree_rows = clusterCut,clustering_method = "ward.D2", cluster_cols = F)
}
}else{
if (clusterCut == 0) {
pheatmap(table,clustering_method = "ward.D2", cluster_cols = F,cluster_rows = F,show_rownames = F)
}else{
pheatmap(table, cutree_rows = clusterCut,clustering_method = "ward.D2", cluster_cols = F,show_rownames = F)
}
}
}
| /Components/Comp_heatmaps.R | no_license | ceslobfer/Rootapp | R | false | false | 794 | r | library("pheatmap")
heatMapRoots <- function(table,clusterCut){
table[table==0] <- Inf
logMinValue <- log(min(table))
table <- log10(table)
table <- table + abs(logMinValue)
table[table==Inf] <- 0
if (length(row.names(table))<=55) {
if (clusterCut == 0) {
pheatmap(table,clustering_method = "ward.D2", cluster_cols = F, cluster_rows = F)
}else{
pheatmap(table, cutree_rows = clusterCut,clustering_method = "ward.D2", cluster_cols = F)
}
}else{
if (clusterCut == 0) {
pheatmap(table,clustering_method = "ward.D2", cluster_cols = F,cluster_rows = F,show_rownames = F)
}else{
pheatmap(table, cutree_rows = clusterCut,clustering_method = "ward.D2", cluster_cols = F,show_rownames = F)
}
}
}
|
\name{survcomp-package}
\alias{survcomp-package}
\alias{survcomp}
\docType{package}
\title{
Performance Assessment and Comparison for Survival Analysis
}
\description{
Functions to perform the performance assessment and comparison of risk prediction (survival) models.
}
\details{
\tabular{ll}{
Package: \tab survcomp\cr
Type: \tab Package\cr
Version: \tab 1.1.6\cr
Date: \tab 2010-08-23\cr
License: \tab GPL-3\cr
}
}
\author{
Benjamin Haibe-Kains
- Computational Biology and Functional Genomics Laboratory, Dana-Farber Cancer Institute, Boston, MA, USA
\url{http://compbio.dfci.harvard.edu/}
- Center for Cancer Computational Biology, Dana-Farber Cancer Institute, Boston, MA, USA
\url{http://cccb.dfci.harvard.edu/index.html}
Former labs:
- Machine Learning Group (MLG), Universite Libre de Bruxelles, Bruxelles, Belgium
\url{http://www.ulb.ac.be/di/mlg/}
- Breast Cancer Translational Research Laboratory (BCTRL), Institut Jules Bordet, Bruxelles, Belgium
\url{http://www.bordet.be/en/services/medical/array/practical.htm}
\bold{Maintainer}: Benjamin Haibe-Kains
\email{bhaibeka@jimmy.harvard.edu}
\email{bhaibeka@ulb.ac.be}
}
%\references{}
\keyword{ survival }
\keyword{ htest }
\seealso{
\code{survival}, \code{Hmisc}, \code{Design}, \code{prodlim}, \code{survivalROC}, \code{ipred}, \code{bootstrap}
}
%\examples{}
| /man/survcomp-package.Rd | no_license | cran/survcomp | R | false | false | 1,338 | rd | \name{survcomp-package}
\alias{survcomp-package}
\alias{survcomp}
\docType{package}
\title{
Performance Assessment and Comparison for Survival Analysis
}
\description{
Functions to perform the performance assessment and comparison of risk prediction (survival) models.
}
\details{
\tabular{ll}{
Package: \tab survcomp\cr
Type: \tab Package\cr
Version: \tab 1.1.6\cr
Date: \tab 2010-08-23\cr
License: \tab GPL-3\cr
}
}
\author{
Benjamin Haibe-Kains
- Computational Biology and Functional Genomics Laboratory, Dana-Farber Cancer Institute, Boston, MA, USA
\url{http://compbio.dfci.harvard.edu/}
- Center for Cancer Computational Biology, Dana-Farber Cancer Institute, Boston, MA, USA
\url{http://cccb.dfci.harvard.edu/index.html}
Former labs:
- Machine Learning Group (MLG), Universite Libre de Bruxelles, Bruxelles, Belgium
\url{http://www.ulb.ac.be/di/mlg/}
- Breast Cancer Translational Research Laboratory (BCTRL), Institut Jules Bordet, Bruxelles, Belgium
\url{http://www.bordet.be/en/services/medical/array/practical.htm}
\bold{Maintainer}: Benjamin Haibe-Kains
\email{bhaibeka@jimmy.harvard.edu}
\email{bhaibeka@ulb.ac.be}
}
%\references{}
\keyword{ survival }
\keyword{ htest }
\seealso{
\code{survival}, \code{Hmisc}, \code{Design}, \code{prodlim}, \code{survivalROC}, \code{ipred}, \code{bootstrap}
}
%\examples{}
|
#!/usr/bin/Rscript
## script for generating bar plots and ratio tables
library(ggplot2)
library(RColorBrewer)
###############################################################################
## set directories and get arguments
###############################################################################
main.dir <- 'D:/uni/work/m_project/gold_final/final'
data.dir <- 'D:/uni/work/m_project/gold_final/data'
assembler.names <- c('metaVelvet', 'rayMeta', 'idba-ud')
blast.names <- c('dc-megablast', 'megablast')
gold.names <- c('gold10', 'gold100', 'gold1000')
kmer.names <- c('31', '43', '55', '67', '79', '91')
source(file.path(main.dir, 'make_plot_util.R'))
###############################################################################
## do for each goldstandard
###############################################################################
for (gold.file in gold.names) {
## set input files
file.list <- makeNameList(gold.file)
## load count files
raw.count.list <- lapply(file.list, function(x) return(lapply(x, readCounts)))
## summarize countsl
sum.count.list <- lapply(raw.count.list, function(x) return(lapply(x, sumCounts)))
## make plot frame
plot.frame <- makePlotFrame(sum.count.list)
## make bar plot
makeBarPlot(plot.frame, paste(paste(gold.file, 'BarPlot', sep='_'), 'pdf', sep='.'))
## make ratio table
writeRatioTable(sum.count.list, paste(paste(gold.file, 'RatioTable', sep='_'), 'txt', sep='.'))
}
| /scripts/make_plot.R | no_license | desiro/master_project | R | false | false | 1,519 | r | #!/usr/bin/Rscript
## script for generating bar plots and ratio tables
library(ggplot2)
library(RColorBrewer)
###############################################################################
## set directories and get arguments
###############################################################################
main.dir <- 'D:/uni/work/m_project/gold_final/final'
data.dir <- 'D:/uni/work/m_project/gold_final/data'
assembler.names <- c('metaVelvet', 'rayMeta', 'idba-ud')
blast.names <- c('dc-megablast', 'megablast')
gold.names <- c('gold10', 'gold100', 'gold1000')
kmer.names <- c('31', '43', '55', '67', '79', '91')
source(file.path(main.dir, 'make_plot_util.R'))
###############################################################################
## do for each goldstandard
###############################################################################
for (gold.file in gold.names) {
## set input files
file.list <- makeNameList(gold.file)
## load count files
raw.count.list <- lapply(file.list, function(x) return(lapply(x, readCounts)))
## summarize countsl
sum.count.list <- lapply(raw.count.list, function(x) return(lapply(x, sumCounts)))
## make plot frame
plot.frame <- makePlotFrame(sum.count.list)
## make bar plot
makeBarPlot(plot.frame, paste(paste(gold.file, 'BarPlot', sep='_'), 'pdf', sep='.'))
## make ratio table
writeRatioTable(sum.count.list, paste(paste(gold.file, 'RatioTable', sep='_'), 'txt', sep='.'))
}
|
comments1 <- post1$comments
comments2 <- post2$comments
comments3 <- post3$comments
comments4 <- post4$comments
comments5 <- post5$comments
comments6 <- post6$comments
comments7 <- post7$comments
comments8 <- post8$comments
comments9 <- post9$comments
comments10 <- post10$comments
comments11 <- post11$comments
comments12 <- post12$comments
comments13 <- post13$comments
comments14 <- post14$comments
comments15 <- post15$comments
comments16 <- post16$comments
comments17 <- post17$comments
comments18 <- post18$comments
comments19 <- post19$comments
comments20 <- post20$comments
comments21 <- post21$comments
comments22 <- post22$comments
comments23 <- post23$comments
comments24 <- post24$comments
comments25 <- post25$comments
comments26 <- post26$comments
comments27 <- post27$comments
comments28 <- post28$comments
comments29 <- post29$comments
comments30 <- post30$comments
comments31 <- post31$comments
comments32 <- post32$comments
comments33 <- post33$comments
comments34 <- post34$comments
comments35 <- post35$comments
comments36 <- post36$comments
comments37 <- post37$comments
comments38 <- post38$comments
comments39 <- post39$comments
comments40 <- post40$comments
comments41 <- post41$comments
comments42 <- post42$comments
comments43 <- post43$comments
comments44 <- post44$comments
comments45 <- post45$comments
comments46 <- post46$comments
comments47 <- post47$comments
comments48 <- post48$comments
comments49 <- post49$comments
comments50 <- post50$comments
comments51 <- post51$comments
comments52 <- post52$comments
comments53 <- post53$comments
comments54 <- post54$comments
comments55 <- post55$comments
comments56 <- post56$comments
comments57 <- post57$comments
comments58 <- post58$comments
comments59 <- post59$comments
comments60 <- post60$comments
comments61 <- post61$comments
comments62 <- post62$comments
comments63 <- post63$comments
comments64 <- post64$comments
comments65 <- post65$comments
comments66 <- post66$comments
comments67 <- post67$comments
comments68 <- post68$comments
comments69 <- post69$comments
comments70 <- post70$comments
comments71 <- post71$comments
comments72 <- post72$comments
comments73 <- post73$comments
comments74 <- post74$comments
comments75 <- post75$comments
comments76 <- post76$comments
comments77 <- post77$comments
comments78 <- post78$comments
comments79 <- post79$comments
comments80 <- post80$comments
comments81 <- post81$comments
comments82 <- post82$comments
comments83 <- post83$comments
comments84 <- post84$comments
comments85 <- post85$comments
comments86 <- post86$comments
comments87 <- post87$comments
comments88 <- post88$comments
comments89 <- post89$comments
comments90 <- post90$comments
comments91 <- post91$comments
comments92 <- post92$comments
comments93 <- post93$comments
comments94 <- post94$comments
comments95 <- post95$comments
comments96 <- post96$comments
comments97 <- post97$comments
comments98 <- post98$comments
comments99 <- post99$comments
comments100 <- post100$comments
comments101 <- post101$comments
comments102 <- post102$comments
comments103 <- post103$comments
comments104 <- post104$comments
comments105 <- post105$comments
comments106 <- post106$comments
comments107 <- post107$comments
comments108 <- post108$comments
comments109 <- post109$comments
comments110 <- post110$comments
comments111 <- post111$comments
comments112 <- post112$comments
comments113 <- post113$comments
| /comementsToDF.R | no_license | TonyNdungu/faceboocollector | R | false | false | 3,513 | r | comments1 <- post1$comments
comments2 <- post2$comments
comments3 <- post3$comments
comments4 <- post4$comments
comments5 <- post5$comments
comments6 <- post6$comments
comments7 <- post7$comments
comments8 <- post8$comments
comments9 <- post9$comments
comments10 <- post10$comments
comments11 <- post11$comments
comments12 <- post12$comments
comments13 <- post13$comments
comments14 <- post14$comments
comments15 <- post15$comments
comments16 <- post16$comments
comments17 <- post17$comments
comments18 <- post18$comments
comments19 <- post19$comments
comments20 <- post20$comments
comments21 <- post21$comments
comments22 <- post22$comments
comments23 <- post23$comments
comments24 <- post24$comments
comments25 <- post25$comments
comments26 <- post26$comments
comments27 <- post27$comments
comments28 <- post28$comments
comments29 <- post29$comments
comments30 <- post30$comments
comments31 <- post31$comments
comments32 <- post32$comments
comments33 <- post33$comments
comments34 <- post34$comments
comments35 <- post35$comments
comments36 <- post36$comments
comments37 <- post37$comments
comments38 <- post38$comments
comments39 <- post39$comments
comments40 <- post40$comments
comments41 <- post41$comments
comments42 <- post42$comments
comments43 <- post43$comments
comments44 <- post44$comments
comments45 <- post45$comments
comments46 <- post46$comments
comments47 <- post47$comments
comments48 <- post48$comments
comments49 <- post49$comments
comments50 <- post50$comments
comments51 <- post51$comments
comments52 <- post52$comments
comments53 <- post53$comments
comments54 <- post54$comments
comments55 <- post55$comments
comments56 <- post56$comments
comments57 <- post57$comments
comments58 <- post58$comments
comments59 <- post59$comments
comments60 <- post60$comments
comments61 <- post61$comments
comments62 <- post62$comments
comments63 <- post63$comments
comments64 <- post64$comments
comments65 <- post65$comments
comments66 <- post66$comments
comments67 <- post67$comments
comments68 <- post68$comments
comments69 <- post69$comments
comments70 <- post70$comments
comments71 <- post71$comments
comments72 <- post72$comments
comments73 <- post73$comments
comments74 <- post74$comments
comments75 <- post75$comments
comments76 <- post76$comments
comments77 <- post77$comments
comments78 <- post78$comments
comments79 <- post79$comments
comments80 <- post80$comments
comments81 <- post81$comments
comments82 <- post82$comments
comments83 <- post83$comments
comments84 <- post84$comments
comments85 <- post85$comments
comments86 <- post86$comments
comments87 <- post87$comments
comments88 <- post88$comments
comments89 <- post89$comments
comments90 <- post90$comments
comments91 <- post91$comments
comments92 <- post92$comments
comments93 <- post93$comments
comments94 <- post94$comments
comments95 <- post95$comments
comments96 <- post96$comments
comments97 <- post97$comments
comments98 <- post98$comments
comments99 <- post99$comments
comments100 <- post100$comments
comments101 <- post101$comments
comments102 <- post102$comments
comments103 <- post103$comments
comments104 <- post104$comments
comments105 <- post105$comments
comments106 <- post106$comments
comments107 <- post107$comments
comments108 <- post108$comments
comments109 <- post109$comments
comments110 <- post110$comments
comments111 <- post111$comments
comments112 <- post112$comments
comments113 <- post113$comments
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/survival_yppe.R
\name{survfit.yppe}
\alias{survfit.yppe}
\title{Survival function for the YPPE model}
\usage{
\method{survfit}{yppe}(object, newdata, ...)
}
\arguments{
\item{object}{an object of the class yppe}
\item{newdata}{a data frame containing the set of explanatory variables.}
\item{...}{further arguments passed to or from other methods.}
}
\value{
a list containing the estimated survival probabilities.
}
\description{
Survival function for the YPPE model
}
\examples{
\donttest{
# ML approach:
library(YPPE)
mle <- yppe(Surv(time, status)~arm, data=ipass, approach="mle")
summary(mle)
ekm <- survfit(Surv(time, status)~arm, data=ipass)
newdata <- data.frame(arm=0:1)
St <- survfit(mle, newdata)
time <- sort(ipass$time)
plot(ekm, col=1:2)
lines(time, St[[1]])
lines(time, St[[2]], col=2)
# Bayesian approach:
bayes <- yppe(Surv(time, status)~arm, data=ipass, approach="bayes")
summary(bayes)
ekm <- survfit(Surv(time, status)~arm, data=ipass)
newdata <- data.frame(arm=0:1)
St <- survfit(bayes, newdata)
time <- sort(ipass$time)
plot(ekm, col=1:2)
lines(time, St[[1]])
lines(time, St[[2]], col=2)
}
}
| /man/survfit-methods.Rd | no_license | cran/YPPE | R | false | true | 1,196 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/survival_yppe.R
\name{survfit.yppe}
\alias{survfit.yppe}
\title{Survival function for the YPPE model}
\usage{
\method{survfit}{yppe}(object, newdata, ...)
}
\arguments{
\item{object}{an object of the class yppe}
\item{newdata}{a data frame containing the set of explanatory variables.}
\item{...}{further arguments passed to or from other methods.}
}
\value{
a list containing the estimated survival probabilities.
}
\description{
Survival function for the YPPE model
}
\examples{
\donttest{
# ML approach:
library(YPPE)
mle <- yppe(Surv(time, status)~arm, data=ipass, approach="mle")
summary(mle)
ekm <- survfit(Surv(time, status)~arm, data=ipass)
newdata <- data.frame(arm=0:1)
St <- survfit(mle, newdata)
time <- sort(ipass$time)
plot(ekm, col=1:2)
lines(time, St[[1]])
lines(time, St[[2]], col=2)
# Bayesian approach:
bayes <- yppe(Surv(time, status)~arm, data=ipass, approach="bayes")
summary(bayes)
ekm <- survfit(Surv(time, status)~arm, data=ipass)
newdata <- data.frame(arm=0:1)
St <- survfit(bayes, newdata)
time <- sort(ipass$time)
plot(ekm, col=1:2)
lines(time, St[[1]])
lines(time, St[[2]], col=2)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pda.utils.R
\name{pda.create.ensemble}
\alias{pda.create.ensemble}
\title{Create ensemble record for PDA ensemble}
\usage{
pda.create.ensemble(settings, con, workflow.id)
}
\arguments{
\item{all}{params are the identically named variables in pda.mcmc / pda.emulator}
}
\value{
Ensemble ID of the created ensemble
}
\description{
Create PDA Ensemble
}
\author{
Ryan Kelly
}
| /modules/assim.batch/man/pda.create.ensemble.Rd | permissive | araiho/pecan | R | false | true | 451 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pda.utils.R
\name{pda.create.ensemble}
\alias{pda.create.ensemble}
\title{Create ensemble record for PDA ensemble}
\usage{
pda.create.ensemble(settings, con, workflow.id)
}
\arguments{
\item{all}{params are the identically named variables in pda.mcmc / pda.emulator}
}
\value{
Ensemble ID of the created ensemble
}
\description{
Create PDA Ensemble
}
\author{
Ryan Kelly
}
|
\name{boundingBox}
\alias{boundingBox}
\alias{boundingBox,GInteractions-method}
\alias{boundingBox,InteractionSet-method}
\title{Get the bounding box}
\description{Computing a minimum bounding box for groups of pairwise interactions.}
\usage{
\S4method{boundingBox}{GInteractions}(x, f)
# Note, the same call is used for the InteractionSet method.
}
\arguments{
\item{x}{A GInteractions or InteractionSet object.}
\item{f}{
A factor or vector of length equal to that of \code{x}, indicating the group to which each pairwise interaction belongs.
}
}
\details{
For any group of pairwise interactions, the minimum bounding box is the smallest rectangle in the interaction space that contains all interactions in the group.
Each side of the box has coordinates spanning the most extreme anchor regions on the corresponding chromosome.
This is often useful for summarizing clusters of interactions.
Grouping of interactions is specified using \code{f}, where interactions in \code{x} with the same level of \code{f} are considered to be in the same group.
If \code{f} is not specified, all interactions in \code{x} are assumed to be in a single group (named as ``1'').
An error will be raised if a group spans multiple chromosomes for either the first or second anchor regions.
The function returns a GRangesList object with entries \code{first} and \code{second}.
This contains the coordinates for the sides of the bounding box across the first or second anchor regions, respectively.
The internal GRanges are named according to the levels of \code{f},
such that the entries with matching names across \code{first} and \code{second} constitute the bounding box for the group corresponding to that level of \code{f}.
It is recommended to run \code{\link{swapAnchors}} prior to computing the bounding box for intra-chromosomal groups.
If all \code{anchor1 >= anchor2} or all \code{anchor1 <= anchor2}, all interactions will lie on one side of the diagonal of the intra-chromosomal interaction space.
This results in the smallest possible minimum bounding box, which will only increase in size if interactions are placed on the other side of the diagonal.
Alternatively, users can specify a StrictGInteractions object as an input into \code{x}, in which \code{anchor1 >= anchor2} is enforced automatically.
% Bit of a pain to prove, but basically, if you flip a point to be above the diagonal, the Chebyshev distance to a point below the diagonal will always increase.
% This means that you must increase the size of one of your sides of your bounding box.
}
\seealso{
\code{\link{GInteractions-class}}
}
\author{
Aaron Lun
}
\examples{
example(GInteractions, echo=FALSE)
# Making up a sensible grouping.
gi <- sort(gi)
all.chrs <- as.character(seqnames(regions(gi)))
f <- paste0(all.chrs[anchors(gi, type="first", id=TRUE)], ".",
all.chrs[anchors(gi, type="second", id=TRUE)])
boundingBox(gi, f)
boundingBox(swapAnchors(gi), f)
# Fails for multiple chromosomes
try(out <- boundingBox(gi))
in.A <- f=="chrA.chrA"
out <- boundingBox(gi[in.A])
}
| /man/boundingBox.Rd | no_license | ttriche/InteractionSet | R | false | false | 3,074 | rd | \name{boundingBox}
\alias{boundingBox}
\alias{boundingBox,GInteractions-method}
\alias{boundingBox,InteractionSet-method}
\title{Get the bounding box}
\description{Computing a minimum bounding box for groups of pairwise interactions.}
\usage{
\S4method{boundingBox}{GInteractions}(x, f)
# Note, the same call is used for the InteractionSet method.
}
\arguments{
\item{x}{A GInteractions or InteractionSet object.}
\item{f}{
A factor or vector of length equal to that of \code{x}, indicating the group to which each pairwise interaction belongs.
}
}
\details{
For any group of pairwise interactions, the minimum bounding box is the smallest rectangle in the interaction space that contains all interactions in the group.
Each side of the box has coordinates spanning the most extreme anchor regions on the corresponding chromosome.
This is often useful for summarizing clusters of interactions.
Grouping of interactions is specified using \code{f}, where interactions in \code{x} with the same level of \code{f} are considered to be in the same group.
If \code{f} is not specified, all interactions in \code{x} are assumed to be in a single group (named as ``1'').
An error will be raised if a group spans multiple chromosomes for either the first or second anchor regions.
The function returns a GRangesList object with entries \code{first} and \code{second}.
This contains the coordinates for the sides of the bounding box across the first or second anchor regions, respectively.
The internal GRanges are named according to the levels of \code{f},
such that the entries with matching names across \code{first} and \code{second} constitute the bounding box for the group corresponding to that level of \code{f}.
It is recommended to run \code{\link{swapAnchors}} prior to computing the bounding box for intra-chromosomal groups.
If all \code{anchor1 >= anchor2} or all \code{anchor1 <= anchor2}, all interactions will lie on one side of the diagonal of the intra-chromosomal interaction space.
This results in the smallest possible minimum bounding box, which will only increase in size if interactions are placed on the other side of the diagonal.
Alternatively, users can specify a StrictGInteractions object as an input into \code{x}, in which \code{anchor1 >= anchor2} is enforced automatically.
% Bit of a pain to prove, but basically, if you flip a point to be above the diagonal, the Chebyshev distance to a point below the diagonal will always increase.
% This means that you must increase the size of one of your sides of your bounding box.
}
\seealso{
\code{\link{GInteractions-class}}
}
\author{
Aaron Lun
}
\examples{
example(GInteractions, echo=FALSE)
# Making up a sensible grouping.
gi <- sort(gi)
all.chrs <- as.character(seqnames(regions(gi)))
f <- paste0(all.chrs[anchors(gi, type="first", id=TRUE)], ".",
all.chrs[anchors(gi, type="second", id=TRUE)])
boundingBox(gi, f)
boundingBox(swapAnchors(gi), f)
# Fails for multiple chromosomes
try(out <- boundingBox(gi))
in.A <- f=="chrA.chrA"
out <- boundingBox(gi[in.A])
}
|
ml_get_constructor <- function(jobj) {
jobj %>%
jobj_class(simple_name = FALSE) %>%
purrr::map(ml_map_class) %>%
purrr::compact() %>%
purrr::map(~ paste0("new_", .x)) %>%
purrr::keep(~ exists(.x, where = asNamespace("sparklyr"), mode = "function")) %>%
rlang::flatten_chr() %>%
head(1)
}
#' Wrap a Spark ML JVM object
#'
#' Identifies the associated sparklyr ML constructor for the JVM object by inspecting its
#' class and performing a lookup. The lookup table is specified by the
#' `sparkml/class_mapping.json` files of sparklyr and the loaded extensions.
#'
#' @param jobj The jobj for the pipeline stage.
#'
#' @keywords internal
#' @export
ml_call_constructor <- function(jobj) {
do.call(ml_get_constructor(jobj), list(jobj = jobj))
}
new_ml_pipeline_stage <- function(jobj, ..., class = character()) {
structure(
list(
uid = invoke(jobj, "uid"),
param_map = ml_get_param_map(jobj),
...,
.jobj = jobj
),
class = c(class, "ml_pipeline_stage")
)
}
| /R/ml_constructor_utils.R | permissive | mtoto/sparklyr | R | false | false | 1,030 | r | ml_get_constructor <- function(jobj) {
jobj %>%
jobj_class(simple_name = FALSE) %>%
purrr::map(ml_map_class) %>%
purrr::compact() %>%
purrr::map(~ paste0("new_", .x)) %>%
purrr::keep(~ exists(.x, where = asNamespace("sparklyr"), mode = "function")) %>%
rlang::flatten_chr() %>%
head(1)
}
#' Wrap a Spark ML JVM object
#'
#' Identifies the associated sparklyr ML constructor for the JVM object by inspecting its
#' class and performing a lookup. The lookup table is specified by the
#' `sparkml/class_mapping.json` files of sparklyr and the loaded extensions.
#'
#' @param jobj The jobj for the pipeline stage.
#'
#' @keywords internal
#' @export
ml_call_constructor <- function(jobj) {
do.call(ml_get_constructor(jobj), list(jobj = jobj))
}
new_ml_pipeline_stage <- function(jobj, ..., class = character()) {
structure(
list(
uid = invoke(jobj, "uid"),
param_map = ml_get_param_map(jobj),
...,
.jobj = jobj
),
class = c(class, "ml_pipeline_stage")
)
}
|
# Copyright (C) 2008-2009 - INRIA - Michael Baudin
# Copyright (C) 2009-2010 - DIGITEO - Michael Baudin
# Copyright (C) 2010-$year$ - Sebastien Bihorel
#
# This file must be used under the terms of the CeCILL.
# This source file is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at
# http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
#
# This source code is a R port of the optimsimplex component
# originally written by Michael Baudin for Scilab.
optimsimplex.log <- function(this=NULL,msg=NULL){
if (this$verbose==TRUE)
cat(sprintf('%s\n',msg))
return(this)
}
| /R/optimsimplex.log.R | no_license | sbihorel/optimsimplex | R | false | false | 675 | r | # Copyright (C) 2008-2009 - INRIA - Michael Baudin
# Copyright (C) 2009-2010 - DIGITEO - Michael Baudin
# Copyright (C) 2010-$year$ - Sebastien Bihorel
#
# This file must be used under the terms of the CeCILL.
# This source file is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at
# http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
#
# This source code is a R port of the optimsimplex component
# originally written by Michael Baudin for Scilab.
optimsimplex.log <- function(this=NULL,msg=NULL){
if (this$verbose==TRUE)
cat(sprintf('%s\n',msg))
return(this)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_fun.R
\name{nc}
\alias{nc}
\title{function to convert to character then numeric}
\usage{
nc(x)
}
\arguments{
\item{x}{any factor to convert to numeric}
}
\value{
Returns supplied parameter as numeric
}
\description{
The function is a shorthand for converting factors to numeric
}
\examples{
num <- nc(test_df$power)
}
| /man/nc.Rd | no_license | romainfrancois/PVplr | R | false | true | 425 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_fun.R
\name{nc}
\alias{nc}
\title{function to convert to character then numeric}
\usage{
nc(x)
}
\arguments{
\item{x}{any factor to convert to numeric}
}
\value{
Returns supplied parameter as numeric
}
\description{
The function is a shorthand for converting factors to numeric
}
\examples{
num <- nc(test_df$power)
}
|
article_index <- function(package) {
if (is.null(package)) {
context_get("article_index")
} else if (is_devtools_package(package)) {
# Use live docs for in-development packages
article_index_local(package)
} else {
article_index_remote(package)
}
}
article_index_local <- function(package, path = find.package(package)) {
if (!is_installed(package)) {
return(character())
}
vig_path <- dir(
file.path(path, "vignettes"),
pattern = "\\.[rR]md$",
recursive = TRUE
)
out_path <- gsub("\\.[rR]md$", ".html", vig_path)
vig_name <- gsub("\\.[rR]md$", "", basename(vig_path))
set_names(out_path, vig_name)
}
article_index_remote <- function(package) {
# Ideally will use published metadata because that includes all articles
# not just vignettes
metadata <- remote_metadata(package)
if (!is.null(metadata)) {
return(metadata$articles)
}
# Otherwise, fallback to vignette index
path <- system.file("Meta", "vignette.rds", package = package)
if (path == "") {
return(NULL)
}
meta <- readRDS(path)
name <- tools::file_path_sans_ext(meta$File)
set_names(meta$PDF, name)
}
find_article <- function(package, name) {
index <- article_index(package)
if (has_name(index, name)) {
index[[name]]
} else {
NULL
}
}
| /R/link-article-index.R | no_license | jimhester/pkgdown | R | false | false | 1,307 | r | article_index <- function(package) {
if (is.null(package)) {
context_get("article_index")
} else if (is_devtools_package(package)) {
# Use live docs for in-development packages
article_index_local(package)
} else {
article_index_remote(package)
}
}
article_index_local <- function(package, path = find.package(package)) {
if (!is_installed(package)) {
return(character())
}
vig_path <- dir(
file.path(path, "vignettes"),
pattern = "\\.[rR]md$",
recursive = TRUE
)
out_path <- gsub("\\.[rR]md$", ".html", vig_path)
vig_name <- gsub("\\.[rR]md$", "", basename(vig_path))
set_names(out_path, vig_name)
}
article_index_remote <- function(package) {
# Ideally will use published metadata because that includes all articles
# not just vignettes
metadata <- remote_metadata(package)
if (!is.null(metadata)) {
return(metadata$articles)
}
# Otherwise, fallback to vignette index
path <- system.file("Meta", "vignette.rds", package = package)
if (path == "") {
return(NULL)
}
meta <- readRDS(path)
name <- tools::file_path_sans_ext(meta$File)
set_names(meta$PDF, name)
}
find_article <- function(package, name) {
index <- article_index(package)
if (has_name(index, name)) {
index[[name]]
} else {
NULL
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/corrupt_data.R
\name{corrupt.character}
\alias{corrupt.character}
\title{corrupt.string}
\usage{
corrupt.character(val, error_rate = 0.2, ...)
}
\arguments{
\item{val}{The value to be corrupted. Should be string}
\item{error_rate}{The probability of the value being corrupted. Default is 2.}
\item{scale}{The scale applied to the error.}
}
\value{
The value passed in, or a corrupted value with probability error rate.
}
\description{
A general string corruption function. Either adds, deletes, or substitutes single characters in the string.
The number of insertions, deletions, and replacements is generated from the normal, scaled by scale.
A built in corruption function, ready to be passed to corrupt data. Does not need to be passed to corrupt.factory.
}
\author{
Sam Murray<slmurray@andrew.cmu.edu>
}
| /man/corrupt.character.Rd | no_license | Sam-Murray/RecordLinkUtil | R | false | true | 888 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/corrupt_data.R
\name{corrupt.character}
\alias{corrupt.character}
\title{corrupt.string}
\usage{
corrupt.character(val, error_rate = 0.2, ...)
}
\arguments{
\item{val}{The value to be corrupted. Should be string}
\item{error_rate}{The probability of the value being corrupted. Default is 2.}
\item{scale}{The scale applied to the error.}
}
\value{
The value passed in, or a corrupted value with probability error rate.
}
\description{
A general string corruption function. Either adds, deletes, or substitutes single characters in the string.
The number of insertions, deletions, and replacements is generated from the normal, scaled by scale.
A built in corruption function, ready to be passed to corrupt data. Does not need to be passed to corrupt.factory.
}
\author{
Sam Murray<slmurray@andrew.cmu.edu>
}
|
# Updated October 15, 2018
# set working directory
setwd("C:/Users/Zara/Repositories/ISSRE2018/Tutorial Files")
# modify this to the path for your directory
# Download kohonen package documentation from:
#https://cran.r-project.org/web/packages/kohonen/kohonen.pdf
# load packages
library(rmarkdown)
library(knitr)
library(dplyr)
library(kohonen)
library(dummies)
library(ggplot2)
library(sp)
library(reshape2)
library(RColorBrewer)
library(magrittr)
#read data
SR1 <- read.csv(file = "SR-example-data.csv", head=TRUE, sep =",")
#exploratory analysis
summary(SR1)
SR1_SOM <- SR1[, -c(1)] #remove first column
summary(SR1_SOM)
str(SR1_SOM)
names(SR1_SOM)
hist(SR1_SOM$EventDur)
select(SR1_SOM, EventDur) %>% filter(EventDur <= 1000) -> d #select values less tha 1000
hist(d$EventDur, breaks = 15)
#create frequency tables of error codes by minute
# EC1
EC1_table <- table(SR1_SOM$Time, SR1_SOM$EC1)
head(EC1_table)
margin.table(EC1_table, 1) #summed over time, by minute
margin.table(EC1_table, 2) #summed over EC1 = 1 or 2
barplot(margin.table(EC1_table, 1),
col = c("darkblue", "maroon"), xlab = "Time", ylab = "Count of Total Records") #summed over Time
# EC2
EC2_table <- table(SR1_SOM$Time, SR1_SOM$EC2)
head(EC2_table)
margin.table(EC2_table, 2) #summed over EC2 values
EC2_counts <- table(SR1_SOM$EC2, SR1_SOM$Time) #summed over EC2 values by minute
head(EC2_counts)
barplot(EC2_counts, main="Distribution of EC2 by Minute",
xlab="Time", col = rainbow(7),
legend = rownames(EC2_counts), beside=TRUE)
# EC3
EC3_table <- table(SR1_SOM$Time, SR1_SOM$EC3)
head(EC3_table)
margin.table(EC3_table, 2) #summed over EC3 values
EC3_counts <- table(SR1_SOM$EC3, SR1_SOM$Time) #summed over EC3 values by minute
head(EC3_counts)
barplot(EC3_counts, main="Distribution of EC3 by Minute",
xlab="Time", col = rainbow(10),
legend = rownames(EC3_counts), beside=TRUE)
################
# Colour palette definition
display.brewer.all()
#blue color for large cluster
colors <- brewer.pal(10, "Paired") #first 2 colors are blue
pal <- colorRampPalette(colors)
my_palette <- c(pal(10))
# Palette defined by kohonen package
coolBlueHotRed <- function(n, alpha = 1) {
rainbow(n, end=4/6, alpha=alpha)[n:1]
}
# SOM Model #################################################
# scale the data
#center is TRUE then centering done by subtracting the column means (omitting NAs) of x from their
#corresponding columns
#scale is TRUE then scaling done by dividing the (centered) columns of x by their standard deviations
#if center is TRUE, and the root mean square otherwise.
SR1_SOM.sc <- scale(SR1_SOM, center = TRUE, scale = TRUE)
summary(SR1_SOM.sc)
set.seed(3)
# you can experiment with other grid sizes
som_grid <- somgrid(xdim = 15, ydim=10, topo="hexagonal")
som_model <- som(SR1_SOM.sc, grid = som_grid, rlen = 100)
#rlen is the number of times the complete data set will be presented to the network
summary(som_model)
print(som_model)
# Changes by iteration (specified with rlen, try different values)
plot(som_model, type = "changes", main = "SR1: SOM")
#plot SOMs
#counts per node - empty nodes shown in gray
plot(som_model, type = "counts", main="SR1: Node Counts")
#shows the sum of the distances to all immediate neighbours.
#also known as a U-matrix plot.
#Units near a class boundary likely to have higher average distances to their neighbours
plot(som_model, type="dist.neighbours", main = "SR1: SOM neighbour distances", palette.name=grey.colors)
#code spread
plot(som_model, type = "codes", main = "SR1: Codebook Vectors")
#shows the mean distance of objects mapped to a unit to the codebook vector of that unit.
#The smaller the distances, the better the objects are represented by the codebook vectors
plot(som_model, type = "quality", main="SR1: Node Quality/Distance")
# Plot the original scale heatmap for all variables
# (it will be from training set if a training dataset was created;
# we did not do that, used all the variables for the model)
var <- 1 #column number 1 is EventDuration
plot(som_model, type = "property", property = as.data.frame(som_model$codes)[,var],
main=names(som_model$SR1_SOM)[var],palette.name=coolBlueHotRed )
# Plot the original scale heatmap for all variables
par(mfrow=c(2,3)) #6 plots per page
for (i in 1:6) {
var <- i #define the variable to plot
var_unscaled <- aggregate(as.numeric(SR1_SOM[,var]),
by=list(som_model$unit.classif), FUN=mean, simplify=TRUE)[,2]
plot(som_model, type = "property", property=var_unscaled, main=names(SR1_SOM)[var],
palette.name=coolBlueHotRed)
}
par(mfrow=c(1,1))
#Create plots by minute
# try on your own
######################################################################
# ------------------ Clustering SOM results -------------------
# Show the WCSS (within cluster sum of squares) metric for kmeans
# for different clustering sizes.
# Can be used as a "rough" indicator of the ideal number of clusters
# --> have to convert som_model$codes from list to dataframe, else gives error
mySR1_SOM <- as.data.frame((som_model$codes))
wss <- ((nrow(mySR1_SOM))-1)*sum(apply(mySR1_SOM,2,var))
for (i in 2:15) wss[i] <- sum(kmeans(mySR1_SOM,
centers=i)$withinss)
par(mar=c(5.1,4.1,4.1,2.1))
plot(1:15, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares", main="Within cluster sum of squares (WCSS)")
# Form clusters on grid
## use hierarchical clustering to cluster the codebook vectors;
## last parameter is number of clusters
som_cluster <- cutree(hclust(dist(as.data.frame((som_model$codes)))), 8)
som_cluster #this shows which cluster each SOM node belongs to
# Show the map with different colours for every cluster
# See plot.kohonen documentation for details
plot(som_model, type="mapping", #shows where objects are mapped
pch = ",",
#labels = as.integer(SR1_SOM$EC1), col = as.integer(SR1_SOM$EC1),
#labels = as.integer(SR1_SOM$Time), col = as.integer(SR1_SOM$Time),
bgcol = my_palette[som_cluster], main = "SR1 - 8 Clusters")
add.cluster.boundaries(som_model, som_cluster, lwd = 5, col = "maroon")
#identify.kohonen
#show the same plot with the codes instead of colours and points
plot(som_model, type="codes", codeRendering = "segments", bgcol = my_palette[som_cluster],
main = "SR1 - 8 Clusters")
add.cluster.boundaries(som_model, som_cluster, lwd = 3, col = "brown")
identify(som_model, som_cluster)
#######################################################################
# look at documentaion to use
plot(som_model, type = c("codes", "changes", "counts",
"dist.neighbours", "mapping", "property", "quality"),
whatmap = NULL, classif = NULL, labels = NULL,
pchs = NULL, main = NULL, palette.name = NULL,
ncolors = 20, bgcol = NULL, zlim = NULL,
heatkey = TRUE, property, codeRendering = NULL,
keepMargins = FALSE, heatkeywidth = .2,
shape = c("round", "straight"), border = "black")
add.cluster.boundaries(x, clustering, lwd = 5)
| /Tutorial Files/SOM.R | no_license | zahrakhoshmanesh/ISSRE2018 | R | false | false | 7,074 | r | # Updated October 15, 2018
# set working directory
setwd("C:/Users/Zara/Repositories/ISSRE2018/Tutorial Files")
# modify this to the path for your directory
# Download kohonen package documentation from:
#https://cran.r-project.org/web/packages/kohonen/kohonen.pdf
# load packages
library(rmarkdown)
library(knitr)
library(dplyr)
library(kohonen)
library(dummies)
library(ggplot2)
library(sp)
library(reshape2)
library(RColorBrewer)
library(magrittr)
#read data
SR1 <- read.csv(file = "SR-example-data.csv", head=TRUE, sep =",")
#exploratory analysis
summary(SR1)
SR1_SOM <- SR1[, -c(1)] #remove first column
summary(SR1_SOM)
str(SR1_SOM)
names(SR1_SOM)
hist(SR1_SOM$EventDur)
select(SR1_SOM, EventDur) %>% filter(EventDur <= 1000) -> d #select values less tha 1000
hist(d$EventDur, breaks = 15)
#create frequency tables of error codes by minute
# EC1
EC1_table <- table(SR1_SOM$Time, SR1_SOM$EC1)
head(EC1_table)
margin.table(EC1_table, 1) #summed over time, by minute
margin.table(EC1_table, 2) #summed over EC1 = 1 or 2
barplot(margin.table(EC1_table, 1),
col = c("darkblue", "maroon"), xlab = "Time", ylab = "Count of Total Records") #summed over Time
# EC2
EC2_table <- table(SR1_SOM$Time, SR1_SOM$EC2)
head(EC2_table)
margin.table(EC2_table, 2) #summed over EC2 values
EC2_counts <- table(SR1_SOM$EC2, SR1_SOM$Time) #summed over EC2 values by minute
head(EC2_counts)
barplot(EC2_counts, main="Distribution of EC2 by Minute",
xlab="Time", col = rainbow(7),
legend = rownames(EC2_counts), beside=TRUE)
# EC3
EC3_table <- table(SR1_SOM$Time, SR1_SOM$EC3)
head(EC3_table)
margin.table(EC3_table, 2) #summed over EC3 values
EC3_counts <- table(SR1_SOM$EC3, SR1_SOM$Time) #summed over EC3 values by minute
head(EC3_counts)
barplot(EC3_counts, main="Distribution of EC3 by Minute",
xlab="Time", col = rainbow(10),
legend = rownames(EC3_counts), beside=TRUE)
################
# Colour palette definition
display.brewer.all()
#blue color for large cluster
colors <- brewer.pal(10, "Paired") #first 2 colors are blue
pal <- colorRampPalette(colors)
my_palette <- c(pal(10))
# Palette defined by kohonen package
coolBlueHotRed <- function(n, alpha = 1) {
rainbow(n, end=4/6, alpha=alpha)[n:1]
}
# SOM Model #################################################
# scale the data
#center is TRUE then centering done by subtracting the column means (omitting NAs) of x from their
#corresponding columns
#scale is TRUE then scaling done by dividing the (centered) columns of x by their standard deviations
#if center is TRUE, and the root mean square otherwise.
SR1_SOM.sc <- scale(SR1_SOM, center = TRUE, scale = TRUE)
summary(SR1_SOM.sc)
set.seed(3)
# you can experiment with other grid sizes
som_grid <- somgrid(xdim = 15, ydim=10, topo="hexagonal")
som_model <- som(SR1_SOM.sc, grid = som_grid, rlen = 100)
#rlen is the number of times the complete data set will be presented to the network
summary(som_model)
print(som_model)
# Changes by iteration (specified with rlen, try different values)
plot(som_model, type = "changes", main = "SR1: SOM")
#plot SOMs
#counts per node - empty nodes shown in gray
plot(som_model, type = "counts", main="SR1: Node Counts")
#shows the sum of the distances to all immediate neighbours.
#also known as a U-matrix plot.
#Units near a class boundary likely to have higher average distances to their neighbours
plot(som_model, type="dist.neighbours", main = "SR1: SOM neighbour distances", palette.name=grey.colors)
#code spread
plot(som_model, type = "codes", main = "SR1: Codebook Vectors")
#shows the mean distance of objects mapped to a unit to the codebook vector of that unit.
#The smaller the distances, the better the objects are represented by the codebook vectors
plot(som_model, type = "quality", main="SR1: Node Quality/Distance")
# Plot the original scale heatmap for all variables
# (it will be from training set if a training dataset was created;
# we did not do that, used all the variables for the model)
var <- 1 #column number 1 is EventDuration
plot(som_model, type = "property", property = as.data.frame(som_model$codes)[,var],
main=names(som_model$SR1_SOM)[var],palette.name=coolBlueHotRed )
# Plot the original scale heatmap for all variables
par(mfrow=c(2,3)) #6 plots per page
for (i in 1:6) {
var <- i #define the variable to plot
var_unscaled <- aggregate(as.numeric(SR1_SOM[,var]),
by=list(som_model$unit.classif), FUN=mean, simplify=TRUE)[,2]
plot(som_model, type = "property", property=var_unscaled, main=names(SR1_SOM)[var],
palette.name=coolBlueHotRed)
}
par(mfrow=c(1,1))
#Create plots by minute
# try on your own
######################################################################
# ------------------ Clustering SOM results -------------------
# Show the WCSS (within cluster sum of squares) metric for kmeans
# for different clustering sizes.
# Can be used as a "rough" indicator of the ideal number of clusters
# --> have to convert som_model$codes from list to dataframe, else gives error
mySR1_SOM <- as.data.frame((som_model$codes))
wss <- ((nrow(mySR1_SOM))-1)*sum(apply(mySR1_SOM,2,var))
for (i in 2:15) wss[i] <- sum(kmeans(mySR1_SOM,
centers=i)$withinss)
par(mar=c(5.1,4.1,4.1,2.1))
plot(1:15, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares", main="Within cluster sum of squares (WCSS)")
# Form clusters on grid
## use hierarchical clustering to cluster the codebook vectors;
## last parameter is number of clusters
som_cluster <- cutree(hclust(dist(as.data.frame((som_model$codes)))), 8)
som_cluster #this shows which cluster each SOM node belongs to
# Show the map with different colours for every cluster
# See plot.kohonen documentation for details
plot(som_model, type="mapping", #shows where objects are mapped
pch = ",",
#labels = as.integer(SR1_SOM$EC1), col = as.integer(SR1_SOM$EC1),
#labels = as.integer(SR1_SOM$Time), col = as.integer(SR1_SOM$Time),
bgcol = my_palette[som_cluster], main = "SR1 - 8 Clusters")
add.cluster.boundaries(som_model, som_cluster, lwd = 5, col = "maroon")
#identify.kohonen
#show the same plot with the codes instead of colours and points
plot(som_model, type="codes", codeRendering = "segments", bgcol = my_palette[som_cluster],
main = "SR1 - 8 Clusters")
add.cluster.boundaries(som_model, som_cluster, lwd = 3, col = "brown")
identify(som_model, som_cluster)
#######################################################################
# look at documentaion to use
plot(som_model, type = c("codes", "changes", "counts",
"dist.neighbours", "mapping", "property", "quality"),
whatmap = NULL, classif = NULL, labels = NULL,
pchs = NULL, main = NULL, palette.name = NULL,
ncolors = 20, bgcol = NULL, zlim = NULL,
heatkey = TRUE, property, codeRendering = NULL,
keepMargins = FALSE, heatkeywidth = .2,
shape = c("round", "straight"), border = "black")
add.cluster.boundaries(x, clustering, lwd = 5)
|
createChildNames = function(modFileVector) {
# create children tags
childTags = "^<class>|^<association>|^<composition>|^<modelconstraints>"
# identify locations where child tags are present
childTagsIdx = grepl(childTags, modFileVector)
# add attribute "name =" to tag
modFileVector[childTagsIdx] = sub(">", paste0(" name = ",'"'), modFileVector[childTagsIdx])
# close tag
modFileVector[childTagsIdx] = paste0(modFileVector[childTagsIdx], paste0('"', ">"))
return(modFileVector)
}
createElementTags = function(fileVector) {
# create open element tags for fileVector
elementTags = "^attributes$|^operations$|^constraints$"
# match element tags to fileVector
elementTagsMatch = grep(elementTags, fileVector, value = TRUE)
elementTagsIdx = grepl(elementTags, fileVector)
# replace elementtag with <elementtag>
fileVector[elementTagsIdx] = paste0("<",elementTagsMatch,">")
return(fileVector)
}
addTagsToElement = function(fileVector) {
tagRoots = c("attributes", "operations", "constraints")
openTags = paste0("<", tagRoots, ">")
closeTags = paste0("</", tagRoots, ">")
closeElementTags = "</class>|</modelconstraints>|</association>|</composition"
newFileVector = fileVector
priorTagIdx = NULL
addCount = -1
for(i in 1:length(fileVector)) {
tag = fileVector[i]
tagIdx = which(openTags == tag)
if(length(tagIdx) > 0) {
if(!is.null(priorTagIdx)) {
newFileVector = append(newFileVector, closeTags[priorTagIdx], i + addCount)
addCount =addCount +1
}
priorTagIdx = tagIdx
}
else if (grepl(closeElementTags, tag) == TRUE){
if(!is.null(priorTagIdx)) {
newFileVector = append(newFileVector, closeTags[priorTagIdx], i + addCount)
addCount =addCount +1
}
priorTagIdx = NULL
}
}
return(newFileVector)
}
| /R/fileVector.R | no_license | FluvialLandscapeLab/USEgangsta | R | false | false | 1,839 | r | createChildNames = function(modFileVector) {
# create children tags
childTags = "^<class>|^<association>|^<composition>|^<modelconstraints>"
# identify locations where child tags are present
childTagsIdx = grepl(childTags, modFileVector)
# add attribute "name =" to tag
modFileVector[childTagsIdx] = sub(">", paste0(" name = ",'"'), modFileVector[childTagsIdx])
# close tag
modFileVector[childTagsIdx] = paste0(modFileVector[childTagsIdx], paste0('"', ">"))
return(modFileVector)
}
createElementTags = function(fileVector) {
# create open element tags for fileVector
elementTags = "^attributes$|^operations$|^constraints$"
# match element tags to fileVector
elementTagsMatch = grep(elementTags, fileVector, value = TRUE)
elementTagsIdx = grepl(elementTags, fileVector)
# replace elementtag with <elementtag>
fileVector[elementTagsIdx] = paste0("<",elementTagsMatch,">")
return(fileVector)
}
addTagsToElement = function(fileVector) {
tagRoots = c("attributes", "operations", "constraints")
openTags = paste0("<", tagRoots, ">")
closeTags = paste0("</", tagRoots, ">")
closeElementTags = "</class>|</modelconstraints>|</association>|</composition"
newFileVector = fileVector
priorTagIdx = NULL
addCount = -1
for(i in 1:length(fileVector)) {
tag = fileVector[i]
tagIdx = which(openTags == tag)
if(length(tagIdx) > 0) {
if(!is.null(priorTagIdx)) {
newFileVector = append(newFileVector, closeTags[priorTagIdx], i + addCount)
addCount =addCount +1
}
priorTagIdx = tagIdx
}
else if (grepl(closeElementTags, tag) == TRUE){
if(!is.null(priorTagIdx)) {
newFileVector = append(newFileVector, closeTags[priorTagIdx], i + addCount)
addCount =addCount +1
}
priorTagIdx = NULL
}
}
return(newFileVector)
}
|
#' balancePMV7730
#'
#' Calculate the thermal balance of the body in W/mq.
#'
#' @param pmv numeric Predicted Mean Vote Fanger following ISO 7730.
#' @param M numeric Metabolism of the person (met).
#' @return balancePMV7730 in W/mq
#'
#'
#' @author Istituto per la Bioeconomia CNR Firenze Italy Alfonso Crisci \email{alfonso.crisci@@ibe.cnr.it}
#' @keywords balancePMV7730
#'
#' @export
#'
#'
#'
#'
balancePMV7730=function(pmv,M) {
ct$assign("pmv", as.array(pmv))
ct$assign("M", as.array(M))
ct$eval("var res=[]; for(var i=0, len=pmv.length; i < len; i++){ res[i]=balancePMV7730(pmv[i],M[i])};")
res=ct$get("res")
return(ifelse(res==9999,NA,res))
}
| /R/balancePMV7730.r | permissive | alfcrisci/rBiometeo | R | false | false | 787 | r | #' balancePMV7730
#'
#' Calculate the thermal balance of the body in W/mq.
#'
#' @param pmv numeric Predicted Mean Vote Fanger following ISO 7730.
#' @param M numeric Metabolism of the person (met).
#' @return balancePMV7730 in W/mq
#'
#'
#' @author Istituto per la Bioeconomia CNR Firenze Italy Alfonso Crisci \email{alfonso.crisci@@ibe.cnr.it}
#' @keywords balancePMV7730
#'
#' @export
#'
#'
#'
#'
balancePMV7730=function(pmv,M) {
ct$assign("pmv", as.array(pmv))
ct$assign("M", as.array(M))
ct$eval("var res=[]; for(var i=0, len=pmv.length; i < len; i++){ res[i]=balancePMV7730(pmv[i],M[i])};")
res=ct$get("res")
return(ifelse(res==9999,NA,res))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binomial.R
\name{bin_skewness}
\alias{bin_skewness}
\title{bin_skewness}
\usage{
bin_skewness(trials, prob)
}
\arguments{
\item{trials}{number of trials}
\item{prob}{parameter p to the binomial distribution}
}
\value{
skewness of binomial
}
\description{
computes binomial skewness
}
\examples{
# skewness
bin_skewness(10, 0.3)
}
| /binomial/man/bin_skewness.Rd | no_license | stat133-sp19/hw-stat133-GavrilM | R | false | true | 411 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binomial.R
\name{bin_skewness}
\alias{bin_skewness}
\title{bin_skewness}
\usage{
bin_skewness(trials, prob)
}
\arguments{
\item{trials}{number of trials}
\item{prob}{parameter p to the binomial distribution}
}
\value{
skewness of binomial
}
\description{
computes binomial skewness
}
\examples{
# skewness
bin_skewness(10, 0.3)
}
|
library(dplyr)
source(file = "fun.R")
cell.lines = c("A498", "AGS", "Colo205", "DU145", "MDA-MB-468", "SF295", "SW620", "UACC62")
# Calculate best task's synergies per cell line (highest average F1-Score)
best.list = list()
for (cell.line in cell.lines) {
best.list[[cell.line]] = run.sintef.task(task = "ConExcess", cell.line = cell.line, ci_method = "hsa", consecutive_excess_count = 2.0, excess_threshold = -0.17)
}
saveRDS(best.list, file = "best_data.rds")
| /run_best_task.R | permissive | druglogics/sintef-obs-synergies | R | false | false | 467 | r | library(dplyr)
source(file = "fun.R")
cell.lines = c("A498", "AGS", "Colo205", "DU145", "MDA-MB-468", "SF295", "SW620", "UACC62")
# Calculate best task's synergies per cell line (highest average F1-Score)
best.list = list()
for (cell.line in cell.lines) {
best.list[[cell.line]] = run.sintef.task(task = "ConExcess", cell.line = cell.line, ci_method = "hsa", consecutive_excess_count = 2.0, excess_threshold = -0.17)
}
saveRDS(best.list, file = "best_data.rds")
|
setClassUnion("characterOrNULL", c("character", "NULL"))
setClassUnion("sdcProblemOrNULL", c("sdcProblem", "NULL"))
setClassUnion("listOrNull", c("list", "NULL"))
tau_BatchObj <- setClass("tau_BatchObj",
slots = c(
path="characterOrNULL",
id="characterOrNULL",
logbook="characterOrNULL",
microdata="characterOrNULL",
metadata="characterOrNULL",
table="characterOrNULL",
safetyrules="characterOrNULL",
readInput="characterOrNULL",
solver="listOrNull",
suppress="characterOrNULL",
writetable="characterOrNULL",
is_table="logical",
obj="sdcProblemOrNULL"
),
# Set the default values for the slots. (optional)
prototype=list(
path=NULL,
id=NULL,
logbook=NULL,
microdata=NULL,
metadata=NULL,
table=NULL,
safetyrules=NULL,
readInput=NULL,
solver=NULL,
suppress=NULL,
writetable=NULL,
is_table=FALSE,
obj=NULL
),
validity=function(object) {
if (length(object@is_table)!=1) {
stop("length(is_table)!=1\n")
}
if (!is.null(object@solver)) {
if (object@solver$solver=="CPLEX" && !file.exists(object@solver$license)) {
stop("No valid licensefile given!\n")
}
}
return(TRUE)
}
)
## define generics
setGeneric(name="setPath", def=function(obj, f) { standardGeneric("setPath") })
setGeneric(name="setId", def=function(obj, f) { standardGeneric("setId") })
setGeneric(name="setLogbook", def=function(obj, f) { standardGeneric("setLogbook") })
setGeneric(name="setMicrodata", def=function(obj, f) { standardGeneric("setMicrodata") })
setGeneric(name="setMetadata", def=function(obj, f) { standardGeneric("setMetadata") })
setGeneric(name="setTable", def=function(obj, f) { standardGeneric("setTable") })
setGeneric(name="setSafetyrules", def=function(obj, f) { standardGeneric("setSafetyrules") })
setGeneric(name="setReadInput", def=function(obj, f) { standardGeneric("setReadInput") })
setGeneric(name="setSolver", def=function(obj, f) { standardGeneric("setSolver") })
setGeneric(name="setSuppress", def=function(obj, f) { standardGeneric("setSuppress") })
setGeneric(name="setWritetable", def=function(obj, f) { standardGeneric("setWritetable") })
setGeneric(name="setIs_table", def=function(obj, f) { standardGeneric("setIs_table") })
setGeneric(name="setObj", def=function(obj, f) { standardGeneric("setObj") })
setMethod(f="setPath", signature="tau_BatchObj",
definition=function(obj, f) {
obj@path <- f
validObject(obj)
return(obj)
})
setMethod(f="setId", signature="tau_BatchObj",
definition=function(obj, f) {
obj@id <- f
validObject(obj)
return(obj)
})
setMethod(f="setLogbook", signature="tau_BatchObj",
definition=function(obj, f) {
obj@logbook <- f
validObject(obj)
return(obj)
})
setMethod(f="setMicrodata", signature="tau_BatchObj",
definition=function(obj, f) {
obj@microdata <- f
validObject(obj)
return(obj)
})
setMethod(f="setMetadata", signature="tau_BatchObj",
definition=function(obj, f) {
obj@metadata <- f
validObject(obj)
return(obj)
})
setMethod(f="setTable", signature="tau_BatchObj",
definition=function(obj, f) {
obj@table <- f
validObject(obj)
return(obj)
})
setMethod(f="setSafetyrules", signature="tau_BatchObj",
definition=function(obj, f) {
obj@safetyrules <- f
validObject(obj)
return(obj)
})
setMethod(f="setReadInput", signature="tau_BatchObj",
definition=function(obj, f) {
obj@readInput <- f
validObject(obj)
return(obj)
})
setMethod(f="setSolver", signature="tau_BatchObj",
definition=function(obj, f) {
obj@solver <- f
validObject(obj)
return(obj)
})
setMethod(f="setSuppress", signature="tau_BatchObj",
definition=function(obj, f) {
obj@suppress <- f
validObject(obj)
return(obj)
})
setMethod(f="setWritetable", signature="tau_BatchObj",
definition=function(obj, f) {
obj@writetable <- f
validObject(obj)
return(obj)
})
setMethod(f="setIs_table", signature="tau_BatchObj",
definition=function(obj, f) {
obj@is_table <- f
validObject(obj)
return(obj)
})
setMethod(f="setObj", signature="tau_BatchObj",
definition=function(obj, f) {
obj@obj <- f
validObject(obj)
return(obj)
})
setGeneric(name="writeBatchFile", def=function(obj) { standardGeneric("writeBatchFile") })
setMethod(f="writeBatchFile", signature=c("tau_BatchObj"),
definition=function(obj) {
is_table <- obj@is_table
path <- obj@path
cmds <- list()
## metainformation
cmds <- append(cmds, "//This batch file was generated by sdcTable")
cmds <- append(cmds, paste("//Date:", Sys.time()))
cmds <- append(cmds, "//")
## tau-argus batch file
f_log <- normalizePath(file.path(path, obj@logbook), winslash="/", mustWork=FALSE)
f_data <- normalizePath(file.path(path, obj@microdata), winslash="/", mustWork=TRUE)
f_metadata <- normalizePath(file.path(path, obj@metadata), winslash="/", mustWork=TRUE)
cmds <- append(cmds, paste("<LOGBOOK>", dQuote(f_log)))
if (is_table) {
cmds <- append(cmds, paste("<OPENTABLEDATA>", dQuote(f_data)))
} else {
cmds <- append(cmds, paste("<OPENMICRODATA>", dQuote(f_data)))
}
cmds <- append(cmds, paste("<OPENMETADATA>", dQuote(f_metadata)))
cmds <- append(cmds, paste("<SPECIFYTABLE>", obj@table))
cmds <- append(cmds, paste("<SAFETYRULE>", obj@safetyrules))
cmds <- append(cmds, obj@readInput)
if (obj@solver$solver=="CPLEX") {
f_license <- normalizePath(obj@solver$license, winslash="/", mustWork=TRUE)
cmds <- append(cmds, paste0("<SOLVER> ", obj@solver$solver,",", dQuote(f_license)))
} else {
cmds <- append(cmds, paste("<SOLVER>", obj@solver$solver))
}
cmds <- append(cmds, paste("<SUPPRESS>", obj@suppress))
cmds <- append(cmds, paste("<WRITETABLE>", obj@writetable))
fBatch <- generateStandardizedNames(path=obj@path, lab=paste0("batch_",obj@id), ext=".arb")
#cat("writing batch-file",shQuote(fBatch),"\n")
cmds <- unlist(cmds)
cmds[length(cmds)] <- paste0(cmds[length(cmds)],"\r")
cat(cmds, sep="\r\n", file=fBatch)
invisible(fBatch)
})
| /R/classes_tauBatch.r | no_license | mattdowle/sdcTable | R | false | false | 6,011 | r | setClassUnion("characterOrNULL", c("character", "NULL"))
setClassUnion("sdcProblemOrNULL", c("sdcProblem", "NULL"))
setClassUnion("listOrNull", c("list", "NULL"))
tau_BatchObj <- setClass("tau_BatchObj",
slots = c(
path="characterOrNULL",
id="characterOrNULL",
logbook="characterOrNULL",
microdata="characterOrNULL",
metadata="characterOrNULL",
table="characterOrNULL",
safetyrules="characterOrNULL",
readInput="characterOrNULL",
solver="listOrNull",
suppress="characterOrNULL",
writetable="characterOrNULL",
is_table="logical",
obj="sdcProblemOrNULL"
),
# Set the default values for the slots. (optional)
prototype=list(
path=NULL,
id=NULL,
logbook=NULL,
microdata=NULL,
metadata=NULL,
table=NULL,
safetyrules=NULL,
readInput=NULL,
solver=NULL,
suppress=NULL,
writetable=NULL,
is_table=FALSE,
obj=NULL
),
validity=function(object) {
if (length(object@is_table)!=1) {
stop("length(is_table)!=1\n")
}
if (!is.null(object@solver)) {
if (object@solver$solver=="CPLEX" && !file.exists(object@solver$license)) {
stop("No valid licensefile given!\n")
}
}
return(TRUE)
}
)
## define generics
setGeneric(name="setPath", def=function(obj, f) { standardGeneric("setPath") })
setGeneric(name="setId", def=function(obj, f) { standardGeneric("setId") })
setGeneric(name="setLogbook", def=function(obj, f) { standardGeneric("setLogbook") })
setGeneric(name="setMicrodata", def=function(obj, f) { standardGeneric("setMicrodata") })
setGeneric(name="setMetadata", def=function(obj, f) { standardGeneric("setMetadata") })
setGeneric(name="setTable", def=function(obj, f) { standardGeneric("setTable") })
setGeneric(name="setSafetyrules", def=function(obj, f) { standardGeneric("setSafetyrules") })
setGeneric(name="setReadInput", def=function(obj, f) { standardGeneric("setReadInput") })
setGeneric(name="setSolver", def=function(obj, f) { standardGeneric("setSolver") })
setGeneric(name="setSuppress", def=function(obj, f) { standardGeneric("setSuppress") })
setGeneric(name="setWritetable", def=function(obj, f) { standardGeneric("setWritetable") })
setGeneric(name="setIs_table", def=function(obj, f) { standardGeneric("setIs_table") })
setGeneric(name="setObj", def=function(obj, f) { standardGeneric("setObj") })
setMethod(f="setPath", signature="tau_BatchObj",
definition=function(obj, f) {
obj@path <- f
validObject(obj)
return(obj)
})
setMethod(f="setId", signature="tau_BatchObj",
definition=function(obj, f) {
obj@id <- f
validObject(obj)
return(obj)
})
setMethod(f="setLogbook", signature="tau_BatchObj",
definition=function(obj, f) {
obj@logbook <- f
validObject(obj)
return(obj)
})
setMethod(f="setMicrodata", signature="tau_BatchObj",
definition=function(obj, f) {
obj@microdata <- f
validObject(obj)
return(obj)
})
setMethod(f="setMetadata", signature="tau_BatchObj",
definition=function(obj, f) {
obj@metadata <- f
validObject(obj)
return(obj)
})
setMethod(f="setTable", signature="tau_BatchObj",
definition=function(obj, f) {
obj@table <- f
validObject(obj)
return(obj)
})
setMethod(f="setSafetyrules", signature="tau_BatchObj",
definition=function(obj, f) {
obj@safetyrules <- f
validObject(obj)
return(obj)
})
setMethod(f="setReadInput", signature="tau_BatchObj",
definition=function(obj, f) {
obj@readInput <- f
validObject(obj)
return(obj)
})
setMethod(f="setSolver", signature="tau_BatchObj",
definition=function(obj, f) {
obj@solver <- f
validObject(obj)
return(obj)
})
setMethod(f="setSuppress", signature="tau_BatchObj",
definition=function(obj, f) {
obj@suppress <- f
validObject(obj)
return(obj)
})
setMethod(f="setWritetable", signature="tau_BatchObj",
definition=function(obj, f) {
obj@writetable <- f
validObject(obj)
return(obj)
})
setMethod(f="setIs_table", signature="tau_BatchObj",
definition=function(obj, f) {
obj@is_table <- f
validObject(obj)
return(obj)
})
setMethod(f="setObj", signature="tau_BatchObj",
definition=function(obj, f) {
obj@obj <- f
validObject(obj)
return(obj)
})
setGeneric(name="writeBatchFile", def=function(obj) { standardGeneric("writeBatchFile") })
setMethod(f="writeBatchFile", signature=c("tau_BatchObj"),
definition=function(obj) {
is_table <- obj@is_table
path <- obj@path
cmds <- list()
## metainformation
cmds <- append(cmds, "//This batch file was generated by sdcTable")
cmds <- append(cmds, paste("//Date:", Sys.time()))
cmds <- append(cmds, "//")
## tau-argus batch file
f_log <- normalizePath(file.path(path, obj@logbook), winslash="/", mustWork=FALSE)
f_data <- normalizePath(file.path(path, obj@microdata), winslash="/", mustWork=TRUE)
f_metadata <- normalizePath(file.path(path, obj@metadata), winslash="/", mustWork=TRUE)
cmds <- append(cmds, paste("<LOGBOOK>", dQuote(f_log)))
if (is_table) {
cmds <- append(cmds, paste("<OPENTABLEDATA>", dQuote(f_data)))
} else {
cmds <- append(cmds, paste("<OPENMICRODATA>", dQuote(f_data)))
}
cmds <- append(cmds, paste("<OPENMETADATA>", dQuote(f_metadata)))
cmds <- append(cmds, paste("<SPECIFYTABLE>", obj@table))
cmds <- append(cmds, paste("<SAFETYRULE>", obj@safetyrules))
cmds <- append(cmds, obj@readInput)
if (obj@solver$solver=="CPLEX") {
f_license <- normalizePath(obj@solver$license, winslash="/", mustWork=TRUE)
cmds <- append(cmds, paste0("<SOLVER> ", obj@solver$solver,",", dQuote(f_license)))
} else {
cmds <- append(cmds, paste("<SOLVER>", obj@solver$solver))
}
cmds <- append(cmds, paste("<SUPPRESS>", obj@suppress))
cmds <- append(cmds, paste("<WRITETABLE>", obj@writetable))
fBatch <- generateStandardizedNames(path=obj@path, lab=paste0("batch_",obj@id), ext=".arb")
#cat("writing batch-file",shQuote(fBatch),"\n")
cmds <- unlist(cmds)
cmds[length(cmds)] <- paste0(cmds[length(cmds)],"\r")
cat(cmds, sep="\r\n", file=fBatch)
invisible(fBatch)
})
|
#Chargement des library
library(readxl)
library(shiny)
library(shinydashboard)
library(tidyverse)
library(ggplot2)
library(dplyr)
library(timetk)
library(readxl)
library(readr)
#Importation de la base de données
labase = read_excel("BaseCovidSN.xlsx")
#labase$Date <- format(as.Date(labase$Date), "%d-%m-%Y")
#le serveur
shinyServer(function(input,output){
output$evolution <- renderPlot({
ggplot(labase)+geom_line(aes(Date, `taux de postivité`))
})
#Pour afficher la base de donnés
output$base <- renderTable({
data.frame(labase)}
)
#Pour afficher les courbes
output$evolution <- renderPlot({
ggplot(labase)+geom_line(aes(Date, `Cas positifs`))
})
output$cas_positif <- renderPlot({
ggplot(labase, aes(Date, `Cas positifs`)) + geom_line()
})
output$cas_gueri <- renderPlot({
ggplot(labase, aes(Date, `Cas testes`)) + geom_line()
})
output$cas_deces <- renderPlot({
ggplot(labase, aes(Date, `Deces`)) + geom_line()
})
output$cas_com <- renderPlot({
ggplot(labase, aes(Date, `Cas communautaires`)) + geom_line()
})
output$cas_import <- renderPlot({
ggplot(labase, aes(Date, `Cas importes`)) + geom_line()
})
output$cas_contact <- renderPlot({
ggplot(labase, aes(Date, `Cas contact`)) + geom_line()
})
})
| /server.R | no_license | MamePendaLeye/ProjetCovid | R | false | false | 1,292 | r | #Chargement des library
library(readxl)
library(shiny)
library(shinydashboard)
library(tidyverse)
library(ggplot2)
library(dplyr)
library(timetk)
library(readxl)
library(readr)
#Importation de la base de données
labase = read_excel("BaseCovidSN.xlsx")
#labase$Date <- format(as.Date(labase$Date), "%d-%m-%Y")
#le serveur
shinyServer(function(input,output){
output$evolution <- renderPlot({
ggplot(labase)+geom_line(aes(Date, `taux de postivité`))
})
#Pour afficher la base de donnés
output$base <- renderTable({
data.frame(labase)}
)
#Pour afficher les courbes
output$evolution <- renderPlot({
ggplot(labase)+geom_line(aes(Date, `Cas positifs`))
})
output$cas_positif <- renderPlot({
ggplot(labase, aes(Date, `Cas positifs`)) + geom_line()
})
output$cas_gueri <- renderPlot({
ggplot(labase, aes(Date, `Cas testes`)) + geom_line()
})
output$cas_deces <- renderPlot({
ggplot(labase, aes(Date, `Deces`)) + geom_line()
})
output$cas_com <- renderPlot({
ggplot(labase, aes(Date, `Cas communautaires`)) + geom_line()
})
output$cas_import <- renderPlot({
ggplot(labase, aes(Date, `Cas importes`)) + geom_line()
})
output$cas_contact <- renderPlot({
ggplot(labase, aes(Date, `Cas contact`)) + geom_line()
})
})
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{calcTwoProtSeqSim}
\alias{calcTwoProtSeqSim}
\title{Protein Sequence Alignment for Two Protein Sequences}
\usage{
calcTwoProtSeqSim(seq1, seq2, type = "local", submat = "BLOSUM62")
}
\arguments{
\item{seq1}{A character string, containing one protein sequence.}
\item{seq2}{A character string, containing another protein sequence.}
\item{type}{Type of alignment, default is \code{'local'},
could be \code{'global'} or \code{'local'},
where \code{'global'} represents Needleman-Wunsch global alignment;
\code{'local'} represents Smith-Waterman local alignment.}
\item{submat}{Substitution matrix, default is \code{'BLOSUM62'},
could be one of \code{'BLOSUM45'}, \code{'BLOSUM50'}, \code{'BLOSUM62'},
\code{'BLOSUM80'}, \code{'BLOSUM100'}, \code{'PAM30'}, \code{'PAM40'},
\code{'PAM70'}, \code{'PAM120'}, \code{'PAM250'}.}
}
\value{
An Biostrings object containing the scores and other
alignment information.
}
\description{
Protein Sequence Alignment for Two Protein Sequences
}
\details{
This function implements the sequence alignment between two protein sequences.
}
\examples{
\donttest{
s1 = readFASTA(system.file('protseq/P00750.fasta', package = 'Rcpi'))[[1]]
s2 = readFASTA(system.file('protseq/P10323.fasta', package = 'Rcpi'))[[1]]
seqalign = calcTwoProtSeqSim(s1, s2)
summary(seqalign)
print(seqalign@score) }
}
\author{
Nan Xiao <\url{http://r2s.name}>
}
\seealso{
See \code{\link{calcParProtSeqSim}} for paralleled pairwise
protein similarity calculation based on sequence alignment.
See \code{\link{calcTwoProtGOSim}} for calculating the
GO semantic similarity between two groups of GO terms or two Entrez gene IDs.
}
\keyword{Needleman-Wunsch}
\keyword{Smith-Waterman}
\keyword{alignment}
\keyword{calcTwoProtSeqSim}
\keyword{global}
\keyword{local}
\keyword{parallel}
\keyword{sequence}
\keyword{similarity}
| /man/calcTwoProtSeqSim.Rd | no_license | MaythaNaif/Rcpi | R | false | false | 1,885 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{calcTwoProtSeqSim}
\alias{calcTwoProtSeqSim}
\title{Protein Sequence Alignment for Two Protein Sequences}
\usage{
calcTwoProtSeqSim(seq1, seq2, type = "local", submat = "BLOSUM62")
}
\arguments{
\item{seq1}{A character string, containing one protein sequence.}
\item{seq2}{A character string, containing another protein sequence.}
\item{type}{Type of alignment, default is \code{'local'},
could be \code{'global'} or \code{'local'},
where \code{'global'} represents Needleman-Wunsch global alignment;
\code{'local'} represents Smith-Waterman local alignment.}
\item{submat}{Substitution matrix, default is \code{'BLOSUM62'},
could be one of \code{'BLOSUM45'}, \code{'BLOSUM50'}, \code{'BLOSUM62'},
\code{'BLOSUM80'}, \code{'BLOSUM100'}, \code{'PAM30'}, \code{'PAM40'},
\code{'PAM70'}, \code{'PAM120'}, \code{'PAM250'}.}
}
\value{
An Biostrings object containing the scores and other
alignment information.
}
\description{
Protein Sequence Alignment for Two Protein Sequences
}
\details{
This function implements the sequence alignment between two protein sequences.
}
\examples{
\donttest{
s1 = readFASTA(system.file('protseq/P00750.fasta', package = 'Rcpi'))[[1]]
s2 = readFASTA(system.file('protseq/P10323.fasta', package = 'Rcpi'))[[1]]
seqalign = calcTwoProtSeqSim(s1, s2)
summary(seqalign)
print(seqalign@score) }
}
\author{
Nan Xiao <\url{http://r2s.name}>
}
\seealso{
See \code{\link{calcParProtSeqSim}} for paralleled pairwise
protein similarity calculation based on sequence alignment.
See \code{\link{calcTwoProtGOSim}} for calculating the
GO semantic similarity between two groups of GO terms or two Entrez gene IDs.
}
\keyword{Needleman-Wunsch}
\keyword{Smith-Waterman}
\keyword{alignment}
\keyword{calcTwoProtSeqSim}
\keyword{global}
\keyword{local}
\keyword{parallel}
\keyword{sequence}
\keyword{similarity}
|
# setwd("~/R/src/qe/R/one-off/高信百诺/")
# load("./data/nav_stats.RData")
# 参数 ----------------------------------------------------------------------
end_date <- "2018-05-31"
universe <- tribble(
~code,
"XT102034.XT",# 产品
"000480.OF", # 基准 1
"H00300.CSI" # 基准 2
)
universe <- universe %>%
mutate(
name = mem_wsd(code, "sec_name", end_date, end_date)$Data$SEC_NAME,
inception_date = wind_num_to_date(
mem_wsd(code, "fund_setupdate", end_date, end_date)$Data$FUND_SETUPDATE
))
start_date <-
universe$inception_date[1]
# "2014-01-30"
# "2017-12-29"
# 导入净值数据 ------------------------------------------------------------------
codes <- pull(universe[c(1,3), "code"])
nav <- wsd_price(universe$code, universe[[1, "inception_date"]], end_date)
nav_ts <- nav %>% select(-field) %>% spread(code, value) %>% df_to_xts()
ret <- nav_ts %>%
Return.calculate() %>%
`[`(, universe$code)
# 年化收益 ~ 年化波动 -------------------------------------------------------------
ret_intersect <- ret %>%
xts_to_df() %>%
{dplyr::filter_if(., is.numeric, dplyr::all_vars(!is.na(.)))} %>%
df_to_xts()
f <- function(name, i){
# nav_metrics_alpha_beta(ret_intersect) %>%
nav_metrics_risk_return(ret_intersect["2017", c(i,3)], "tibble")
}
product_names <- universe$name[1:2]
stats <- product_names %>%
imap(., f) %>%
setNames(product_names) %>%
bind_rows(.id = "产品")
stats %>%
write.csv(file = "clipboard", fileEncoding = "native.enc", row.names = FALSE)
stats[] %>% select(
"产品",
"年化收益",
"年化波动"
) %>%
ggplot() +
geom_point(aes(年化波动, 年化收益, color = 产品), size = 2) +
ggtitle("风险收益分布——与指数比较") +
labs(
color = "") +
scale_x_continuous(labels = scales::percent) +
scale_y_continuous(labels = scales::percent) +
coord_cartesian(xlim = c(0.1, 0.3), ylim = c(0, 0.3)) +
ggrepel::geom_text_repel(aes(年化波动, 年化收益, label = 产品)) +
plot_theme_no_legend()
ggsave("plots/风险收益分布——与指数比较.jpg")
# 年化收益 ~ 年化波动 - 3个品种 ------------------------------------------------------
stats[] %>% select(
"产品",
"年化收益",
"年化波动"
) %>%
ggplot() +
geom_point(aes(年化波动, 年化收益, color = 产品), size = 2) +
ggtitle("风险收益分布——与东方红新动力比较") +
labs(
caption = str_c("数据日期: ", str_c(c(start(Ra), end(Ra)), collapse = " 至 ")),
color = "") +
scale_x_continuous(labels = scales::percent) +
scale_y_continuous(labels = scales::percent) +
coord_cartesian(xlim = c(0.1, 0.3), ylim = c(0, 0.3)) +
ggrepel::geom_text_repel(aes(年化波动, 年化收益, label = 产品)) +
plot_theme_no_legend()
ggsave("plots/风险收益分布——与东方红新动力比较.jpg")
# 年化收益 ~ 年化波动 - 2017年 ------------------------------------------------------
Ra <- Return.calculate(nav %>% na.omit())["2017"]
Rb <- Ra[, ncol(Ra)]
stats <- as.list(Ra) %>%
map(~ pu_return_stats(., Rb)) %>%
setNames(universe$name) %>%
bind_rows(.id = "产品")
stats[] %>% select(
"产品",
"年化收益",
"年化波动"
) %>%
ggplot() +
geom_point(aes(年化波动, 年化收益, color = 产品), size = 2) +
ggtitle("风险收益分布——2017年") +
labs(
caption = str_c("数据日期: ", str_c(c(start(Ra), end(Ra)), collapse = " 至 ")),
color = "") +
scale_x_continuous(labels = scales::percent) +
scale_y_continuous(labels = scales::percent) +
coord_cartesian(xlim = c(0, 0.3), ylim = c(0, 0.8)) +
ggrepel::geom_text_repel(aes(年化波动, 年化收益, label = 产品)) +
plot_theme_no_legend()
ggsave("plots/风险收益分布——2017年.jpg")
# 3年滚动年化收益 ----------------------------------------------------------------
Ra_log <- Return.calculate(nav, "log")
coredata(Ra_log)[is.nan(Ra_log)] <- NA
rolling_1y_ret_annual <- exp(rollapply(Ra_log, 52, mean) * 52) - 1
rolling_3y_ret_annual <- exp(rollapply(Ra_log, 52*3, mean, align = "left")
* 52) - 1
f_subset_xts <- function(ts){
date_range <- ts[, 1] %>% na.omit() %>% index() %>% range()
ts[str_c(date_range, collapse = "/")]
}
f_subset_xts(rolling_3y_ret_annual) %>%
setNames(universe$name) %>%
xts_to_df() %>%
mutate(DATETIME = as.Date(DATETIME)) %>%
gather("key", "value", -DATETIME) %>%
ggplot() +
ggtitle("未来3年滚动年化收益") +
geom_line(aes(DATETIME, value, color = key), size = 1) +
labs(
# caption = str_c("数据日期: ", str_c(c(start_date, end_date), collapse = " 至 ")),
# caption = str_c("数据日期: 成立日", str_c(c(start_date, end_date), collapse = " 至 ")),
color = "") +
scale_y_continuous(labels = scales::percent) +
plot_theme_no_axis_title()
ggsave("plots/3年滚动年化收益.jpg")
f_subset_xts(rolling_3y_ret_annual) %>%
dygraph(main = "") %>%
dyRangeSelector()
# 滚动风险 --------------------------------------------------------------------
rolling_1y_vol_annual <- rollapply(Ra_log, 52, sd) * sqrt(52)
f_subset_xts(rolling_1y_vol_annual) %>%
setNames(universe$name) %>%
xts_to_df() %>%
mutate(DATETIME = as.Date(DATETIME)) %>%
gather("key", "value", -DATETIME) %>%
ggplot() +
ggtitle("历史风险") +
geom_line(aes(DATETIME, value, color = key), size = 1) +
labs(
caption = "指标说明:滚动1年期收益标准差,周频率",
# caption = str_c("数据日期: 成立日", str_c(c(start_date, end_date), collapse = " 至 ")),
color = "") +
scale_y_continuous(labels = scales::percent) +
plot_theme_no_axis_title()
ggsave("plots/历史风险.jpg")
bm_vol <- rolling_1y_vol_annual[, ncol(rolling_1y_vol_annual)]
rolling_1y_vol_annual_rel <- rolling_1y_vol_annual %>%
as.list %>%
map(~ ./bm_vol) %>%
bind_cols() %>%
xts(order.by = index(rolling_1y_vol_annual))
f_subset_xts(rolling_1y_vol_annual_rel) %>%
setNames(universe$name) %>%
xts_to_df() %>%
mutate(DATETIME = as.Date(DATETIME)) %>%
gather("key", "value", -DATETIME) %>%
ggplot() +
ggtitle("历史风险—相对市场") +
geom_line(aes(DATETIME, value, color = key), size = 1) +
labs(
caption = "指标说明:滚动1年期收益标准差,产品除以沪深300指数,周频率",
# caption = str_c("数据日期: 成立日", str_c(c(start_date, end_date), collapse = " 至 ")),
color = "") +
scale_y_continuous(labels = scales::percent) +
plot_theme_no_axis_title()
ggsave("plots/历史风险—相对市场.jpg")
f_subset_xts(rolling_1y_vol_annual) %>%
dygraph() %>%
dyRangeSelector()
save.image("./data/nav_stats.RData")
| /R/nav-analysis/plot.R | no_license | dfyj/wm | R | false | false | 6,722 | r | # setwd("~/R/src/qe/R/one-off/高信百诺/")
# load("./data/nav_stats.RData")
# 参数 ----------------------------------------------------------------------
end_date <- "2018-05-31"
universe <- tribble(
~code,
"XT102034.XT",# 产品
"000480.OF", # 基准 1
"H00300.CSI" # 基准 2
)
universe <- universe %>%
mutate(
name = mem_wsd(code, "sec_name", end_date, end_date)$Data$SEC_NAME,
inception_date = wind_num_to_date(
mem_wsd(code, "fund_setupdate", end_date, end_date)$Data$FUND_SETUPDATE
))
start_date <-
universe$inception_date[1]
# "2014-01-30"
# "2017-12-29"
# 导入净值数据 ------------------------------------------------------------------
codes <- pull(universe[c(1,3), "code"])
nav <- wsd_price(universe$code, universe[[1, "inception_date"]], end_date)
nav_ts <- nav %>% select(-field) %>% spread(code, value) %>% df_to_xts()
ret <- nav_ts %>%
Return.calculate() %>%
`[`(, universe$code)
# 年化收益 ~ 年化波动 -------------------------------------------------------------
ret_intersect <- ret %>%
xts_to_df() %>%
{dplyr::filter_if(., is.numeric, dplyr::all_vars(!is.na(.)))} %>%
df_to_xts()
f <- function(name, i){
# nav_metrics_alpha_beta(ret_intersect) %>%
nav_metrics_risk_return(ret_intersect["2017", c(i,3)], "tibble")
}
product_names <- universe$name[1:2]
stats <- product_names %>%
imap(., f) %>%
setNames(product_names) %>%
bind_rows(.id = "产品")
stats %>%
write.csv(file = "clipboard", fileEncoding = "native.enc", row.names = FALSE)
stats[] %>% select(
"产品",
"年化收益",
"年化波动"
) %>%
ggplot() +
geom_point(aes(年化波动, 年化收益, color = 产品), size = 2) +
ggtitle("风险收益分布——与指数比较") +
labs(
color = "") +
scale_x_continuous(labels = scales::percent) +
scale_y_continuous(labels = scales::percent) +
coord_cartesian(xlim = c(0.1, 0.3), ylim = c(0, 0.3)) +
ggrepel::geom_text_repel(aes(年化波动, 年化收益, label = 产品)) +
plot_theme_no_legend()
ggsave("plots/风险收益分布——与指数比较.jpg")
# 年化收益 ~ 年化波动 - 3个品种 ------------------------------------------------------
stats[] %>% select(
"产品",
"年化收益",
"年化波动"
) %>%
ggplot() +
geom_point(aes(年化波动, 年化收益, color = 产品), size = 2) +
ggtitle("风险收益分布——与东方红新动力比较") +
labs(
caption = str_c("数据日期: ", str_c(c(start(Ra), end(Ra)), collapse = " 至 ")),
color = "") +
scale_x_continuous(labels = scales::percent) +
scale_y_continuous(labels = scales::percent) +
coord_cartesian(xlim = c(0.1, 0.3), ylim = c(0, 0.3)) +
ggrepel::geom_text_repel(aes(年化波动, 年化收益, label = 产品)) +
plot_theme_no_legend()
ggsave("plots/风险收益分布——与东方红新动力比较.jpg")
# 年化收益 ~ 年化波动 - 2017年 ------------------------------------------------------
Ra <- Return.calculate(nav %>% na.omit())["2017"]
Rb <- Ra[, ncol(Ra)]
stats <- as.list(Ra) %>%
map(~ pu_return_stats(., Rb)) %>%
setNames(universe$name) %>%
bind_rows(.id = "产品")
stats[] %>% select(
"产品",
"年化收益",
"年化波动"
) %>%
ggplot() +
geom_point(aes(年化波动, 年化收益, color = 产品), size = 2) +
ggtitle("风险收益分布——2017年") +
labs(
caption = str_c("数据日期: ", str_c(c(start(Ra), end(Ra)), collapse = " 至 ")),
color = "") +
scale_x_continuous(labels = scales::percent) +
scale_y_continuous(labels = scales::percent) +
coord_cartesian(xlim = c(0, 0.3), ylim = c(0, 0.8)) +
ggrepel::geom_text_repel(aes(年化波动, 年化收益, label = 产品)) +
plot_theme_no_legend()
ggsave("plots/风险收益分布——2017年.jpg")
# 3年滚动年化收益 ----------------------------------------------------------------
Ra_log <- Return.calculate(nav, "log")
coredata(Ra_log)[is.nan(Ra_log)] <- NA
rolling_1y_ret_annual <- exp(rollapply(Ra_log, 52, mean) * 52) - 1
rolling_3y_ret_annual <- exp(rollapply(Ra_log, 52*3, mean, align = "left")
* 52) - 1
f_subset_xts <- function(ts){
date_range <- ts[, 1] %>% na.omit() %>% index() %>% range()
ts[str_c(date_range, collapse = "/")]
}
f_subset_xts(rolling_3y_ret_annual) %>%
setNames(universe$name) %>%
xts_to_df() %>%
mutate(DATETIME = as.Date(DATETIME)) %>%
gather("key", "value", -DATETIME) %>%
ggplot() +
ggtitle("未来3年滚动年化收益") +
geom_line(aes(DATETIME, value, color = key), size = 1) +
labs(
# caption = str_c("数据日期: ", str_c(c(start_date, end_date), collapse = " 至 ")),
# caption = str_c("数据日期: 成立日", str_c(c(start_date, end_date), collapse = " 至 ")),
color = "") +
scale_y_continuous(labels = scales::percent) +
plot_theme_no_axis_title()
ggsave("plots/3年滚动年化收益.jpg")
f_subset_xts(rolling_3y_ret_annual) %>%
dygraph(main = "") %>%
dyRangeSelector()
# 滚动风险 --------------------------------------------------------------------
rolling_1y_vol_annual <- rollapply(Ra_log, 52, sd) * sqrt(52)
f_subset_xts(rolling_1y_vol_annual) %>%
setNames(universe$name) %>%
xts_to_df() %>%
mutate(DATETIME = as.Date(DATETIME)) %>%
gather("key", "value", -DATETIME) %>%
ggplot() +
ggtitle("历史风险") +
geom_line(aes(DATETIME, value, color = key), size = 1) +
labs(
caption = "指标说明:滚动1年期收益标准差,周频率",
# caption = str_c("数据日期: 成立日", str_c(c(start_date, end_date), collapse = " 至 ")),
color = "") +
scale_y_continuous(labels = scales::percent) +
plot_theme_no_axis_title()
ggsave("plots/历史风险.jpg")
bm_vol <- rolling_1y_vol_annual[, ncol(rolling_1y_vol_annual)]
rolling_1y_vol_annual_rel <- rolling_1y_vol_annual %>%
as.list %>%
map(~ ./bm_vol) %>%
bind_cols() %>%
xts(order.by = index(rolling_1y_vol_annual))
f_subset_xts(rolling_1y_vol_annual_rel) %>%
setNames(universe$name) %>%
xts_to_df() %>%
mutate(DATETIME = as.Date(DATETIME)) %>%
gather("key", "value", -DATETIME) %>%
ggplot() +
ggtitle("历史风险—相对市场") +
geom_line(aes(DATETIME, value, color = key), size = 1) +
labs(
caption = "指标说明:滚动1年期收益标准差,产品除以沪深300指数,周频率",
# caption = str_c("数据日期: 成立日", str_c(c(start_date, end_date), collapse = " 至 ")),
color = "") +
scale_y_continuous(labels = scales::percent) +
plot_theme_no_axis_title()
ggsave("plots/历史风险—相对市场.jpg")
f_subset_xts(rolling_1y_vol_annual) %>%
dygraph() %>%
dyRangeSelector()
save.image("./data/nav_stats.RData")
|
#' Put Strips on the Boundary of a Lattice Display
#'
#' Try to update a "trellis" object so that strips are only shown on the
#' top and left boundaries when printed, instead of in every panel as is
#' usual. This version extend the version in LatticeExtra to allow more than
#' two conditioning variables.
#'
#' @param x An object of class "trellis".
#' @param strip A function, character string, or logical that would
#' be appropriate strip and strip.left arguments respectively in a high
#' level lattice function call (see xyplot). Note, however, that the strip
#' function must be of the form of strip.default2.
#' The equivalent of strip.custom here is strip.custom2.
#' @param strip.left As strip, for the strips down the left.
#' @param top Determines the number of strips that are drawn along the top
#' of the plot. Conditioning variables 1, ..., top will be displayed along
#' the top of the plot (using strip), and variables top + 1, ..., dim(x)
#' will be displayed along the left hand side of the plot.
#' @param strip.lines height of strips in number of lines; helpful for
#' multi-line text or mathematical annotation in strips.
#' @param strip.left.lines As strip.lines, for strips down the left.
#' @param horizontal ...
#' @return An object of class "trellis", essentially the same as x, but with
#' certain properties modified.
#' @export
useOuterStrips2 <-
function(x,
strip = strip.default2,
strip.left = strip.custom2(horizontal = horizontal),
top = 1,
strip.lines = 1,
strip.left.lines = strip.lines,
horizontal = F){
dimx <- dim(x)
stopifnot(inherits(x, "trellis"))
topSeq <- seq_len(top)
topdimx <- dimx[topSeq]
leftdimx <- dimx[-topSeq]
opar <- if (is.null(x$par.settings)) list() else x$par.settings
par.settings <-
modifyList(opar,
list(layout.heights =
if (x$as.table){
list(strip = c(strip.lines * top, rep(0,
prod(leftdimx)-1)))
}
else {
list(strip = c(rep(0, prod(leftdimx)-1),
strip.lines * top))
},
layout.widths =
list(strip.left = c(strip.left.lines * length(leftdimx),
rep(0, prod(topdimx)-1)))))
if (is.character(strip)){
strip <- get(strip)
}
if (is.logical(strip) && strip){
strip <- strip.default2
}
new.strip <-
if (is.function(strip))
{
function(which.given, which.panel, var.name, ...) {
given.top <- which.given <= top
if (given.top){
topdimx <- topdimx
leftdimx <- leftdimx
prev.dim <- topdimx[seq_len(which.given - 1)]
is.top.row <- current.row() == prod(leftdimx)
is.level.change <- current.column() %% prod(prev.dim) == 1
is.first.dim <- which.given == 1
if (is.top.row && (is.level.change || is.first.dim)){
strip(which.given = which.given,
which.panel = which.panel[topSeq],
var.name = var.name[topSeq],
dimx = topdimx,
...)
}
}
}
}
else {
strip
}
if (is.character(strip.left)){
strip.left <- get(strip.left)
}
if (is.logical(strip.left) && strip.left){
strip.left <- strip.custom2(horizontal = FALSE)
}
new.strip.left <-
if (is.function(strip.left)){
function(which.given, which.panel, var.name, ...) {
which.given <- which.given - top
given.left <- which.given >= 1
if (given.left){
leftdimx <- leftdimx
prev.dim <- leftdimx[seq_len(which.given - 1)]
is.left.col <- current.column() == 1
is.level.change <- current.row() %% prod(prev.dim) == 1
is.first.dim <- which.given == 1
if (is.left.col && (is.level.change || is.first.dim)){
strip.left(which.given = which.given,
which.panel = which.panel[-topSeq],
dimx = leftdimx,
var.name = var.name[-topSeq],
...)
}
}
}
}
else {
strip.left
}
update(x,
par.settings = par.settings,
strip = new.strip,
strip.left = new.strip.left,
par.strip.text = list(lines = 0.5),
layout = c(prod(topdimx), prod(leftdimx)))
}
#' ...
#'
#' ...
#'
#' @param ... ...
strip.custom2 <-
function(...)
{
args <- list(...)
function(...)
{
dots <- list(...)
do.call("strip.default2",
lattice:::updateList(dots, args))
}
}
#' function title
#'
#' description
#'
#' @param which.given ...
#' @param which.panel ...
#' @param var.name ...
#' @param factor.levels ...
#' @param shingle.intervals ...
#' @param strip.names ...
#' @param strip.levels ...
#' @param sep ...
#' @param style ...
#' @param horizontal ...
#' @param bg ...
#' @param fg ...
#' @param par.strip.text ...
#' @param dimx ADDED ...
strip.default2 <-
function(which.given,
which.panel,
## packet.number,
## panel.number,
var.name,
factor.levels,
shingle.intervals = NULL,
strip.names = c(FALSE, TRUE),
strip.levels = c(TRUE, FALSE),
sep = " : ",
style = 1,
horizontal = TRUE,
## FIXME: not sure how to incorporate alpha in strip colors
bg = trellis.par.get("strip.background")$col[which.given],
fg = trellis.par.get("strip.shingle")$col[which.given],
par.strip.text = trellis.par.get("add.text"),
dimx)
{
prev.dim <- dimx[seq_len(which.given - 1)]
extent <- prod(prev.dim)
if (horizontal)
lattice:::pushViewport(lattice:::viewport(y = (which.given-0.5)/length(which.panel),
height = 1/length(which.panel),
width = extent,
x = extent/2,
clip = trellis.par.get("clip")$strip,
name = paste("strip.default", which.given,
sep = ".")))
else
lattice:::pushViewport(lattice:::viewport(x = 1 - (which.given-0.5)/length(which.panel),
width = 1/length(which.panel),
height = extent,
y = extent/2,
clip = trellis.par.get("clip")$strip,
name = paste("strip.default", which.given,
sep = ".")))
gp.text <-
lattice:::gpar(col = par.strip.text$col,
alpha = par.strip.text$alpha,
lineheight = par.strip.text$lineheight,
fontfamily = par.strip.text$fontfamily,
fontface = lattice:::chooseFace(par.strip.text$fontface,
par.strip.text$font),
cex = par.strip.text$cex)
name <- var.name[which.given]
level <- which.panel[which.given]
strip.names <- rep(strip.names, length.out = 2)
strip.levels <- rep(strip.levels, length.out = 2)
## str(shingle.intervals)
formatLabel <-
function(s,
abbreviate = par.strip.text$abbr,
minlength = par.strip.text$minl,
dot = par.strip.text$dot)
{
if (is.null(abbreviate)) abbreviate <- FALSE
if (is.null(minlength)) minlength <- 4
if (is.null(dot)) dot <- FALSE
if (abbreviate) abbreviate(s, minlength = minlength, dot = dot)
else s
}
factor.levels <- formatLabel(factor.levels)
if (!is.null(shingle.intervals))
{
## This usually indicates shingles, as opposed to factors.
## 'style' will be completely ignored, and shingle.intervals
## encoded using bg and fg. Names and levels are both game.
lattice:::grid.rect(gp = lattice:::gpar(fill = bg, col = bg))
t <- range(shingle.intervals)
r <- (range(shingle.intervals[level,]) - t[1]) / diff(t)
if (horizontal)
lattice:::grid.rect(x = lattice:::unit(r %*% c(.5,.5),"npc"),
width = max(lattice:::unit(c(diff(r), 1), c("npc", "mm"))),
gp = lattice:::gpar(col = fg, fill = fg))
else
lattice:::grid.rect(y = lattice:::unit(r %*% c(.5,.5),"npc"),
height = max(lattice:::unit( c(diff(r), 1), c("npc", "mm"))),
gp = lattice:::gpar(col = fg, fill = fg))
lattice:::paste.and.draw(name, factor.levels[level],
sep = sep,
horizontal = horizontal,
showl = strip.names[2],
showr = strip.levels[2],
gp = gp.text)
}
else
{
## Behaviour depends on 'style'. Will separate out coloring
## and text based on 'style'.
num <- length(factor.levels)
## coloring:
## background: all except style = 2
if (style != 2) lattice:::grid.rect(gp = lattice:::gpar(fill = bg, col = bg))
## foreground: needed only for style = 2, 3 and 4
if (num > 0 && style %in% c(2, 3, 4))
{
if (horizontal)
{
lattice:::grid.rect(x = lattice:::unit((2*level-1)/(2*num), "npc"),
width = lattice:::unit(1/num, "npc"),
gp = lattice:::gpar(fill = fg, col = fg))
}
else
{
lattice:::grid.rect(y = lattice:::unit((2*level-1)/(2*num), "npc"),
height = lattice:::unit(1/num, "npc"),
gp = lattice:::gpar(fill = fg, col = fg))
}
}
## text: [names|levels] centered only if style = 1 or 3
if (style %in% c(1, 3))
{
paste.and.draw2(name, factor.levels[level],
sep = sep,
horizontal = T,
showl = strip.names[1],
showr = strip.levels[1],
gp = gp.text)
}
## remaining cases
else if (num > 0)
{
## either all levels or only one
lid <- if (style %in% c(2, 4)) 1:num else level
if (horizontal)
{
lattice:::grid.text(label = factor.levels[lid],
x = (2 * lid - 1) / (2 * num),
gp = gp.text)
}
else
{
lattice:::grid.text(label = factor.levels[lid],
y = (2 * lid - 1) / (2 * num),
gp = gp.text)
}
}
}
lattice:::upViewport()
## border is drawn with clipping off
if (horizontal)
lattice:::pushViewport(lattice:::viewport(y = (which.given-0.5)/length(which.panel),
height = 1/length(which.panel),
width = extent,
x = extent/2,
clip = "off",
name = paste("strip.default.off",
which.given, sep = ".")))
else
lattice:::pushViewport(lattice:::viewport(x = 1 - (which.given-0.5)/length(which.panel),
width = 1/length(which.panel),
height = extent,
y = extent/2,
clip = "off",
name = paste("strip.default.off",
which.given, sep = ".")))
strip.border <- trellis.par.get("strip.border")
## draw border for strip
lattice:::grid.rect(gp =
lattice:::gpar(col = rep(strip.border$col,
length.out = which.given)[which.given],
lty = rep(strip.border$lty,
length.out = which.given)[which.given],
lwd = rep(strip.border$lwd,
length.out = which.given)[which.given],
alpha = rep(strip.border$alpha,
length.out = which.given)[which.given],
fill = "transparent"))
lattice:::upViewport()
}
#' function title
#'
#' description
#'
#' @param left ...
#' @param right ...
#' @param sep ...
#' @param horizontal ...
#' @param center ...
#' @param showl ...
#' @param showr ...
#' @param gp ...
paste.and.draw2 <- function(left, right, sep = " : ", horizontal = TRUE,
center = TRUE, showl = TRUE, showr = TRUE, gp = lattice:::gpar())
{
if (showl || showr) {
shows <- showl && showr
wsep <- lattice:::unit(0.5 * shows, "strwidth", list(sep))
offset <- lattice:::unit(0.5, "npc")
if (center)
offset <- offset + (if (showl)
lattice:::unit(0.5, "strwidth", list(left))
else lattice:::unit(0, "mm")) - (if (showr)
lattice:::unit(0.5 * showr, "strwidth", list(right))
else lattice:::unit(0, "mm"))
if (horizontal) {
if (shows)
lattice:::grid.text(sep, x = offset, gp = gp)
if (showl)
lattice:::grid.text(left, x = offset - wsep, gp = gp, just = "right")
if (showr)
lattice:::grid.text(right, x = offset + wsep, gp = gp,
just = "left")
}
else {
if (shows)
lattice:::grid.text(sep, y = offset, gp = gp, rot = 0)
if (showl)
lattice:::grid.text(left, y = offset - wsep, gp = gp, just = "right",
rot = 0)
if (showr)
lattice:::grid.text(right, y = offset + wsep, gp = gp,
just = "left", rot = 0)
}
}
}
| /R/useOuterStrips2.R | no_license | rjbgoudie/utils.rjbg | R | false | false | 14,652 | r | #' Put Strips on the Boundary of a Lattice Display
#'
#' Try to update a "trellis" object so that strips are only shown on the
#' top and left boundaries when printed, instead of in every panel as is
#' usual. This version extend the version in LatticeExtra to allow more than
#' two conditioning variables.
#'
#' @param x An object of class "trellis".
#' @param strip A function, character string, or logical that would
#' be appropriate strip and strip.left arguments respectively in a high
#' level lattice function call (see xyplot). Note, however, that the strip
#' function must be of the form of strip.default2.
#' The equivalent of strip.custom here is strip.custom2.
#' @param strip.left As strip, for the strips down the left.
#' @param top Determines the number of strips that are drawn along the top
#' of the plot. Conditioning variables 1, ..., top will be displayed along
#' the top of the plot (using strip), and variables top + 1, ..., dim(x)
#' will be displayed along the left hand side of the plot.
#' @param strip.lines height of strips in number of lines; helpful for
#' multi-line text or mathematical annotation in strips.
#' @param strip.left.lines As strip.lines, for strips down the left.
#' @param horizontal ...
#' @return An object of class "trellis", essentially the same as x, but with
#' certain properties modified.
#' @export
useOuterStrips2 <-
function(x,
strip = strip.default2,
strip.left = strip.custom2(horizontal = horizontal),
top = 1,
strip.lines = 1,
strip.left.lines = strip.lines,
horizontal = F){
dimx <- dim(x)
stopifnot(inherits(x, "trellis"))
topSeq <- seq_len(top)
topdimx <- dimx[topSeq]
leftdimx <- dimx[-topSeq]
opar <- if (is.null(x$par.settings)) list() else x$par.settings
par.settings <-
modifyList(opar,
list(layout.heights =
if (x$as.table){
list(strip = c(strip.lines * top, rep(0,
prod(leftdimx)-1)))
}
else {
list(strip = c(rep(0, prod(leftdimx)-1),
strip.lines * top))
},
layout.widths =
list(strip.left = c(strip.left.lines * length(leftdimx),
rep(0, prod(topdimx)-1)))))
if (is.character(strip)){
strip <- get(strip)
}
if (is.logical(strip) && strip){
strip <- strip.default2
}
new.strip <-
if (is.function(strip))
{
function(which.given, which.panel, var.name, ...) {
given.top <- which.given <= top
if (given.top){
topdimx <- topdimx
leftdimx <- leftdimx
prev.dim <- topdimx[seq_len(which.given - 1)]
is.top.row <- current.row() == prod(leftdimx)
is.level.change <- current.column() %% prod(prev.dim) == 1
is.first.dim <- which.given == 1
if (is.top.row && (is.level.change || is.first.dim)){
strip(which.given = which.given,
which.panel = which.panel[topSeq],
var.name = var.name[topSeq],
dimx = topdimx,
...)
}
}
}
}
else {
strip
}
if (is.character(strip.left)){
strip.left <- get(strip.left)
}
if (is.logical(strip.left) && strip.left){
strip.left <- strip.custom2(horizontal = FALSE)
}
new.strip.left <-
if (is.function(strip.left)){
function(which.given, which.panel, var.name, ...) {
which.given <- which.given - top
given.left <- which.given >= 1
if (given.left){
leftdimx <- leftdimx
prev.dim <- leftdimx[seq_len(which.given - 1)]
is.left.col <- current.column() == 1
is.level.change <- current.row() %% prod(prev.dim) == 1
is.first.dim <- which.given == 1
if (is.left.col && (is.level.change || is.first.dim)){
strip.left(which.given = which.given,
which.panel = which.panel[-topSeq],
dimx = leftdimx,
var.name = var.name[-topSeq],
...)
}
}
}
}
else {
strip.left
}
update(x,
par.settings = par.settings,
strip = new.strip,
strip.left = new.strip.left,
par.strip.text = list(lines = 0.5),
layout = c(prod(topdimx), prod(leftdimx)))
}
#' ...
#'
#' ...
#'
#' @param ... ...
strip.custom2 <-
function(...)
{
args <- list(...)
function(...)
{
dots <- list(...)
do.call("strip.default2",
lattice:::updateList(dots, args))
}
}
#' function title
#'
#' description
#'
#' @param which.given ...
#' @param which.panel ...
#' @param var.name ...
#' @param factor.levels ...
#' @param shingle.intervals ...
#' @param strip.names ...
#' @param strip.levels ...
#' @param sep ...
#' @param style ...
#' @param horizontal ...
#' @param bg ...
#' @param fg ...
#' @param par.strip.text ...
#' @param dimx ADDED ...
strip.default2 <-
function(which.given,
which.panel,
## packet.number,
## panel.number,
var.name,
factor.levels,
shingle.intervals = NULL,
strip.names = c(FALSE, TRUE),
strip.levels = c(TRUE, FALSE),
sep = " : ",
style = 1,
horizontal = TRUE,
## FIXME: not sure how to incorporate alpha in strip colors
bg = trellis.par.get("strip.background")$col[which.given],
fg = trellis.par.get("strip.shingle")$col[which.given],
par.strip.text = trellis.par.get("add.text"),
dimx)
{
prev.dim <- dimx[seq_len(which.given - 1)]
extent <- prod(prev.dim)
if (horizontal)
lattice:::pushViewport(lattice:::viewport(y = (which.given-0.5)/length(which.panel),
height = 1/length(which.panel),
width = extent,
x = extent/2,
clip = trellis.par.get("clip")$strip,
name = paste("strip.default", which.given,
sep = ".")))
else
lattice:::pushViewport(lattice:::viewport(x = 1 - (which.given-0.5)/length(which.panel),
width = 1/length(which.panel),
height = extent,
y = extent/2,
clip = trellis.par.get("clip")$strip,
name = paste("strip.default", which.given,
sep = ".")))
gp.text <-
lattice:::gpar(col = par.strip.text$col,
alpha = par.strip.text$alpha,
lineheight = par.strip.text$lineheight,
fontfamily = par.strip.text$fontfamily,
fontface = lattice:::chooseFace(par.strip.text$fontface,
par.strip.text$font),
cex = par.strip.text$cex)
name <- var.name[which.given]
level <- which.panel[which.given]
strip.names <- rep(strip.names, length.out = 2)
strip.levels <- rep(strip.levels, length.out = 2)
## str(shingle.intervals)
formatLabel <-
function(s,
abbreviate = par.strip.text$abbr,
minlength = par.strip.text$minl,
dot = par.strip.text$dot)
{
if (is.null(abbreviate)) abbreviate <- FALSE
if (is.null(minlength)) minlength <- 4
if (is.null(dot)) dot <- FALSE
if (abbreviate) abbreviate(s, minlength = minlength, dot = dot)
else s
}
factor.levels <- formatLabel(factor.levels)
if (!is.null(shingle.intervals))
{
## This usually indicates shingles, as opposed to factors.
## 'style' will be completely ignored, and shingle.intervals
## encoded using bg and fg. Names and levels are both game.
lattice:::grid.rect(gp = lattice:::gpar(fill = bg, col = bg))
t <- range(shingle.intervals)
r <- (range(shingle.intervals[level,]) - t[1]) / diff(t)
if (horizontal)
lattice:::grid.rect(x = lattice:::unit(r %*% c(.5,.5),"npc"),
width = max(lattice:::unit(c(diff(r), 1), c("npc", "mm"))),
gp = lattice:::gpar(col = fg, fill = fg))
else
lattice:::grid.rect(y = lattice:::unit(r %*% c(.5,.5),"npc"),
height = max(lattice:::unit( c(diff(r), 1), c("npc", "mm"))),
gp = lattice:::gpar(col = fg, fill = fg))
lattice:::paste.and.draw(name, factor.levels[level],
sep = sep,
horizontal = horizontal,
showl = strip.names[2],
showr = strip.levels[2],
gp = gp.text)
}
else
{
## Behaviour depends on 'style'. Will separate out coloring
## and text based on 'style'.
num <- length(factor.levels)
## coloring:
## background: all except style = 2
if (style != 2) lattice:::grid.rect(gp = lattice:::gpar(fill = bg, col = bg))
## foreground: needed only for style = 2, 3 and 4
if (num > 0 && style %in% c(2, 3, 4))
{
if (horizontal)
{
lattice:::grid.rect(x = lattice:::unit((2*level-1)/(2*num), "npc"),
width = lattice:::unit(1/num, "npc"),
gp = lattice:::gpar(fill = fg, col = fg))
}
else
{
lattice:::grid.rect(y = lattice:::unit((2*level-1)/(2*num), "npc"),
height = lattice:::unit(1/num, "npc"),
gp = lattice:::gpar(fill = fg, col = fg))
}
}
## text: [names|levels] centered only if style = 1 or 3
if (style %in% c(1, 3))
{
paste.and.draw2(name, factor.levels[level],
sep = sep,
horizontal = T,
showl = strip.names[1],
showr = strip.levels[1],
gp = gp.text)
}
## remaining cases
else if (num > 0)
{
## either all levels or only one
lid <- if (style %in% c(2, 4)) 1:num else level
if (horizontal)
{
lattice:::grid.text(label = factor.levels[lid],
x = (2 * lid - 1) / (2 * num),
gp = gp.text)
}
else
{
lattice:::grid.text(label = factor.levels[lid],
y = (2 * lid - 1) / (2 * num),
gp = gp.text)
}
}
}
lattice:::upViewport()
## border is drawn with clipping off
if (horizontal)
lattice:::pushViewport(lattice:::viewport(y = (which.given-0.5)/length(which.panel),
height = 1/length(which.panel),
width = extent,
x = extent/2,
clip = "off",
name = paste("strip.default.off",
which.given, sep = ".")))
else
lattice:::pushViewport(lattice:::viewport(x = 1 - (which.given-0.5)/length(which.panel),
width = 1/length(which.panel),
height = extent,
y = extent/2,
clip = "off",
name = paste("strip.default.off",
which.given, sep = ".")))
strip.border <- trellis.par.get("strip.border")
## draw border for strip
lattice:::grid.rect(gp =
lattice:::gpar(col = rep(strip.border$col,
length.out = which.given)[which.given],
lty = rep(strip.border$lty,
length.out = which.given)[which.given],
lwd = rep(strip.border$lwd,
length.out = which.given)[which.given],
alpha = rep(strip.border$alpha,
length.out = which.given)[which.given],
fill = "transparent"))
lattice:::upViewport()
}
#' function title
#'
#' description
#'
#' @param left ...
#' @param right ...
#' @param sep ...
#' @param horizontal ...
#' @param center ...
#' @param showl ...
#' @param showr ...
#' @param gp ...
paste.and.draw2 <- function(left, right, sep = " : ", horizontal = TRUE,
center = TRUE, showl = TRUE, showr = TRUE, gp = lattice:::gpar())
{
if (showl || showr) {
shows <- showl && showr
wsep <- lattice:::unit(0.5 * shows, "strwidth", list(sep))
offset <- lattice:::unit(0.5, "npc")
if (center)
offset <- offset + (if (showl)
lattice:::unit(0.5, "strwidth", list(left))
else lattice:::unit(0, "mm")) - (if (showr)
lattice:::unit(0.5 * showr, "strwidth", list(right))
else lattice:::unit(0, "mm"))
if (horizontal) {
if (shows)
lattice:::grid.text(sep, x = offset, gp = gp)
if (showl)
lattice:::grid.text(left, x = offset - wsep, gp = gp, just = "right")
if (showr)
lattice:::grid.text(right, x = offset + wsep, gp = gp,
just = "left")
}
else {
if (shows)
lattice:::grid.text(sep, y = offset, gp = gp, rot = 0)
if (showl)
lattice:::grid.text(left, y = offset - wsep, gp = gp, just = "right",
rot = 0)
if (showr)
lattice:::grid.text(right, y = offset + wsep, gp = gp,
just = "left", rot = 0)
}
}
}
|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.7,family="gaussian",standardize=FALSE)
sink('./Model/EN/Lasso/breast/breast_074.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Lasso/breast/breast_074.R | no_license | leon1003/QSMART | R | false | false | 351 | r | library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.7,family="gaussian",standardize=FALSE)
sink('./Model/EN/Lasso/breast/breast_074.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
context("Labeled scatter plot")
dat <- structure(c(-0.378331991098098, -0.81165552109611, 0.217951487400431,
1.5286107300616, -1.8772553732612, -0.427440736452652, 1.81260348695547,
0.0210023953600193, 0.00559155278839169, 0.784822531228735, -1.46479949238719,
-1.68259776871402, -1.51650839628927, 0.955458580527818, -1.34834878068353,
0.876829966842232, -0.114809313591938, -0.293554423897848, -0.789860399916284,
-0.609446296323656, -0.141567243052079, -0.453809127391586, 0.39578896667589,
-0.37481125870847), .Dim = c(12L, 2L), .Dimnames = list(c("nLAGph",
"7hVoOM", "iBnrwa", "o3Dw2c", "uMZoXB", "C1IrLI", "G3ueGs", "TveROQ",
"QCBe1q", "z0bGc5", "zsakBj", "z60tz8"), c("X", "Y")))
zgroup <- rep(LETTERS[1:3], 4)
logos <- sprintf("https://displayrcors.azureedge.net/images/%s_grey.svg",
c("apple", "soup", "bread", "car", "chicken", "rocket",
"sickperson", "thumbsup", "elephant", "tree", "weight", "tools"))
test_that("Max labels", {
expect_warning(pp <- LabeledScatter(dat, scatter.max.labels = 5), "Some labels have been hidden")
})
test_that("Logos", {
expect_error(pp <- LabeledScatter(dat, logos = logos), NA)
expect_error(pp <- LabeledScatter(dat, logos = paste(logos, collapse=","), logo.size = 0.2), NA)
expect_error(pp <- LabeledScatter(dat, logos = c("Typo", logos[-1]), logo.size = 0.2), NA)
})
test_that("Trend lines", {
expect_error(pp <- LabeledScatter(dat, trend.line = TRUE), NA)
expect_error(pp <- LabeledScatter(dat, scatter.colors = zgroup,
scatter.colors.as.categorical = T, trend.line = TRUE), NA)
expect_error(pp <- LabeledScatter(list(dat, dat+0.5, dat+1), trend.line = TRUE), NA)
expect_warning(pp <- LabeledScatter(list(dat, dat+0.5, dat+1), trend.line = FALSE), "Tables have been automatically assigned names")
expect_error(pp <- LabeledScatter(list(dat, dat+rnorm(24)), trend.line = TRUE, logos = logos, logo.size = 0.2), NA)
# DS-1658
tab3 <- structure(c(1, 2, 3, 4), .Dim = c(4L, 1L), .Dimnames = list(c("Apple", "Microsoft", "Google", "Yahoo"), "Price"))
tab4 <- structure(c(1, 2, 3, 4), .Dim = c(4L, 1L), .Dimnames = list(c("Apple","Microsoft", "Google", "Yahoo"), "Price"))
expect_warning(LabeledScatter(list(tab3, tab4)))
})
#z <- cbind(1:5, 1:5)
#rownames(z) <- letters[1:5]
#test_that("LabeledScatter called from Scatter", {
# expect_error(Scatter(z, scatter.labels.as.hovertext = TRUE), NA)
# expect_warning(Scatter(list(z, z+1, z+2)))
# expect_error(Scatter(z, scatter.labels.as.hovertext = TRUE, logos=sprintf("https://displayrcors.azureedge.net/images/%s_grey.svg", c("apple", #"elephant", "cow", "chicken", "stickman"))), NA)
#})
test_that("Labeled Scatter accepts unused arguments",
{
x <- structure(1:10, .Names = c("a", "b", "c", "d", "e", "f", "g", "h", "i", "j"))
expect_error(Scatter(x, scatter.labels.as.hovertext = F, fit.type = "None"), NA)
})
test_that("Warning is given for numeric color with qualitative palette",
{
expect_warning(Scatter(1:10, 1:10, scatter.colors = 1:10, scatter.colors.as.categorical = FALSE),
"For a numeric 'colors' variable, a qualitative palette should not be used")
})
| /tests/testthat/test-labeledscatterplot.R | no_license | cedchin/flipStandardCharts | R | false | false | 3,211 | r | context("Labeled scatter plot")
dat <- structure(c(-0.378331991098098, -0.81165552109611, 0.217951487400431,
1.5286107300616, -1.8772553732612, -0.427440736452652, 1.81260348695547,
0.0210023953600193, 0.00559155278839169, 0.784822531228735, -1.46479949238719,
-1.68259776871402, -1.51650839628927, 0.955458580527818, -1.34834878068353,
0.876829966842232, -0.114809313591938, -0.293554423897848, -0.789860399916284,
-0.609446296323656, -0.141567243052079, -0.453809127391586, 0.39578896667589,
-0.37481125870847), .Dim = c(12L, 2L), .Dimnames = list(c("nLAGph",
"7hVoOM", "iBnrwa", "o3Dw2c", "uMZoXB", "C1IrLI", "G3ueGs", "TveROQ",
"QCBe1q", "z0bGc5", "zsakBj", "z60tz8"), c("X", "Y")))
zgroup <- rep(LETTERS[1:3], 4)
logos <- sprintf("https://displayrcors.azureedge.net/images/%s_grey.svg",
c("apple", "soup", "bread", "car", "chicken", "rocket",
"sickperson", "thumbsup", "elephant", "tree", "weight", "tools"))
test_that("Max labels", {
expect_warning(pp <- LabeledScatter(dat, scatter.max.labels = 5), "Some labels have been hidden")
})
test_that("Logos", {
expect_error(pp <- LabeledScatter(dat, logos = logos), NA)
expect_error(pp <- LabeledScatter(dat, logos = paste(logos, collapse=","), logo.size = 0.2), NA)
expect_error(pp <- LabeledScatter(dat, logos = c("Typo", logos[-1]), logo.size = 0.2), NA)
})
test_that("Trend lines", {
expect_error(pp <- LabeledScatter(dat, trend.line = TRUE), NA)
expect_error(pp <- LabeledScatter(dat, scatter.colors = zgroup,
scatter.colors.as.categorical = T, trend.line = TRUE), NA)
expect_error(pp <- LabeledScatter(list(dat, dat+0.5, dat+1), trend.line = TRUE), NA)
expect_warning(pp <- LabeledScatter(list(dat, dat+0.5, dat+1), trend.line = FALSE), "Tables have been automatically assigned names")
expect_error(pp <- LabeledScatter(list(dat, dat+rnorm(24)), trend.line = TRUE, logos = logos, logo.size = 0.2), NA)
# DS-1658
tab3 <- structure(c(1, 2, 3, 4), .Dim = c(4L, 1L), .Dimnames = list(c("Apple", "Microsoft", "Google", "Yahoo"), "Price"))
tab4 <- structure(c(1, 2, 3, 4), .Dim = c(4L, 1L), .Dimnames = list(c("Apple","Microsoft", "Google", "Yahoo"), "Price"))
expect_warning(LabeledScatter(list(tab3, tab4)))
})
#z <- cbind(1:5, 1:5)
#rownames(z) <- letters[1:5]
#test_that("LabeledScatter called from Scatter", {
# expect_error(Scatter(z, scatter.labels.as.hovertext = TRUE), NA)
# expect_warning(Scatter(list(z, z+1, z+2)))
# expect_error(Scatter(z, scatter.labels.as.hovertext = TRUE, logos=sprintf("https://displayrcors.azureedge.net/images/%s_grey.svg", c("apple", #"elephant", "cow", "chicken", "stickman"))), NA)
#})
test_that("Labeled Scatter accepts unused arguments",
{
x <- structure(1:10, .Names = c("a", "b", "c", "d", "e", "f", "g", "h", "i", "j"))
expect_error(Scatter(x, scatter.labels.as.hovertext = F, fit.type = "None"), NA)
})
test_that("Warning is given for numeric color with qualitative palette",
{
expect_warning(Scatter(1:10, 1:10, scatter.colors = 1:10, scatter.colors.as.categorical = FALSE),
"For a numeric 'colors' variable, a qualitative palette should not be used")
})
|
\name{bfslice_eqp_u}
\alias{bfslice_eqp_u}
\docType{package}
\title{
Dependency detection between a level \eqn{k} (\eqn{k > 1}) categorical variable and a continuous variable via Bayes factor with given size of each group.
}
\description{
Dependency detection between a level \eqn{k} (\eqn{k > 1}) categorical variable \code{x} and a continuous variable \code{y} via Bayes factor with \eqn{O(n^{1/2})}-resolution. The basic idea is almost the same as \code{\link{bfslice_u}}. The only different is that \code{\link{bfslice_eqp_u}} groups samples into approximate \eqn{O(n^{1/2})} groups which contain approximate \eqn{O(n^{1/2})} samples and treat the groups as a sample to calculate Bayes facor.
}
\usage{
bfslice_eqp_u(x, dim, lambda, alpha)
}
\arguments{
\item{x}{Vector: observations of categorical variable, \eqn{0,1,\ldots,k-1} for level \eqn{k} categorical variable, should be ranked according to values of continuous variable \code{y}, either ascending or descending.}
\item{dim}{Level of \code{x}, equals \eqn{k}.}
\item{lambda}{\code{lambda} corresponds to the probability that makes slice in each possible position. \code{lambda} should be greater than 0.}
\item{alpha}{\code{alpha} is hyper-parameter of the prior distribution of frequency in each slice. \code{alpha} should be greater than 0 and less equal than \eqn{k}.}
}
\value{
Value of Bayes factor (nonnegative). Bayes factor could be treated as a statistic and one can take some threshold then calculates the corresponded Type I error rate. One can also take the value of Bayes factor for judgement.
}
\seealso{
\code{\link{bfslice_u}, \link{bfslice_eqp_c}}.
}
\references{
Jiang, B., Ye, C. and Liu, J.S. Bayesian nonparametric tests via sliced inverse modeling. \emph{Bayesian Analysis}, 12(1): 89-112, 2017.
}
\examples{
n <- 1000
mu <- 0.2
y <- c(rnorm(n, -mu, 1), rnorm(n, mu, 1))
x <- c(rep(0, n), rep(1, n))
x <- x[order(y)]
dim <- max(x) + 1
lambda <- 1.0
alpha <- 1.0
bfval <- bfslice_eqp_u(x, dim, lambda, alpha)
}
| /issuestests/dslice/man/bfslice_eqp_u.Rd | no_license | akhikolla/RcppDeepStateTest | R | false | false | 2,018 | rd | \name{bfslice_eqp_u}
\alias{bfslice_eqp_u}
\docType{package}
\title{
Dependency detection between a level \eqn{k} (\eqn{k > 1}) categorical variable and a continuous variable via Bayes factor with given size of each group.
}
\description{
Dependency detection between a level \eqn{k} (\eqn{k > 1}) categorical variable \code{x} and a continuous variable \code{y} via Bayes factor with \eqn{O(n^{1/2})}-resolution. The basic idea is almost the same as \code{\link{bfslice_u}}. The only different is that \code{\link{bfslice_eqp_u}} groups samples into approximate \eqn{O(n^{1/2})} groups which contain approximate \eqn{O(n^{1/2})} samples and treat the groups as a sample to calculate Bayes facor.
}
\usage{
bfslice_eqp_u(x, dim, lambda, alpha)
}
\arguments{
\item{x}{Vector: observations of categorical variable, \eqn{0,1,\ldots,k-1} for level \eqn{k} categorical variable, should be ranked according to values of continuous variable \code{y}, either ascending or descending.}
\item{dim}{Level of \code{x}, equals \eqn{k}.}
\item{lambda}{\code{lambda} corresponds to the probability that makes slice in each possible position. \code{lambda} should be greater than 0.}
\item{alpha}{\code{alpha} is hyper-parameter of the prior distribution of frequency in each slice. \code{alpha} should be greater than 0 and less equal than \eqn{k}.}
}
\value{
Value of Bayes factor (nonnegative). Bayes factor could be treated as a statistic and one can take some threshold then calculates the corresponded Type I error rate. One can also take the value of Bayes factor for judgement.
}
\seealso{
\code{\link{bfslice_u}, \link{bfslice_eqp_c}}.
}
\references{
Jiang, B., Ye, C. and Liu, J.S. Bayesian nonparametric tests via sliced inverse modeling. \emph{Bayesian Analysis}, 12(1): 89-112, 2017.
}
\examples{
n <- 1000
mu <- 0.2
y <- c(rnorm(n, -mu, 1), rnorm(n, mu, 1))
x <- c(rep(0, n), rep(1, n))
x <- x[order(y)]
dim <- max(x) + 1
lambda <- 1.0
alpha <- 1.0
bfval <- bfslice_eqp_u(x, dim, lambda, alpha)
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/adapt.R
\name{adapt}
\alias{adapt}
\title{Adapt parameters}
\usage{
adapt(object, samples, total)
}
\arguments{
\item{object}{the node object.}
\item{samples}{the set of samples.}
\item{total}{the sum of the likelihood of the samples.}
}
\value{
the updated node
}
\description{
S3 generic function for adapting a node's parameters given a set of samples.
}
\keyword{internal}
| /man/adapt.Rd | permissive | mickash/Adaptive-Bayesian-Networks | R | false | false | 466 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/adapt.R
\name{adapt}
\alias{adapt}
\title{Adapt parameters}
\usage{
adapt(object, samples, total)
}
\arguments{
\item{object}{the node object.}
\item{samples}{the set of samples.}
\item{total}{the sum of the likelihood of the samples.}
}
\value{
the updated node
}
\description{
S3 generic function for adapting a node's parameters given a set of samples.
}
\keyword{internal}
|
plot.mc.pixel.R<-function(mc.object, spp.string, quants=c(0.2, 0.5, 0.8), plot.trials = TRUE, annual.plots = TRUE, connector = "sum", page.layout = c(4, 4), Rylimits = c(0, 3), color.tags = NULL)
{
# auxiliary function to plot abundances from Monte Carlo trials
# George Watters
# code last edited 18 July 2006
#
if(!is.null(mc.object$color.tags)&&max(mc.object$color.tags>15)){
stop("FAULT -- software not designed to deal with plotting more than 15 colors in a single panel.\nIf you're sure you want to do this we can easily edit the color table.")
}
if(!is.null(quants)&&length(quants) > 3){stop("FAULT: Sorry, you can only plot 3 quantiles.")}
#
tt.data1 <- eval(parse(text = paste(as.character(quote(mc.object)),"$R$R$",spp.string,sep="")))
ntrials <- mc.object$setup$ntrials
nssmus <- mc.object$setup$nssmus
nyears <- mc.object$setup$nyears
nseasons <- mc.object$setup$nseasons
ntimes <- mc.object$setup$ntimes
#
# get the desired data as determined by the arguments annual.plots and connector
# then standardize these data as appropriate
if(annual.plots){
season.vector <- rep(1:nseasons, length.out = ntimes)
year.vector <- rep(1:(ntimes/nseasons),each = nseasons)
time.label <- "year"
if(is.character(connector)){
plot.time <- unique(year.vector)
tt.data2 <- array(0,dim=c(length(unique(year.vector)),nssmus,ntrials))
for(j in 1:ntrials){
for(i in 1:nssmus){
tt.denom <- sum(tt.data1[1:nseasons,i,j])
# if you have already used relative.mc() then the denominator should be 1
if(!is.null(mc.object$setup$relative)){tt.denom<-1}
# also if you have already used relative.mc() then means are probably more
# sensible than sums
if(!is.null(mc.object$setup$relative)){
tt.y <- as.vector(tapply(tt.data1[,i,j],list(year.vector),mean,na.rm=TRUE))
} else {
tt.y <- as.vector(tapply(tt.data1[,i,j],list(year.vector),sum))
}
tt.data2[,i,j] <- tt.y/tt.denom
}
}
title.prefix <- "sumR/sumR[yr1]"
if(!is.null(mc.object$setup$relative)){title.prefix <- "avg(relative R)"}
}
if(is.numeric(connector)){
# first check that the connector is a feasible season
if(connector > nseasons){stop("FAULT: connector season > nseasons")}
keepers <- (season.vector == connector)
plot.time <- year.vector[keepers]
tt.data2 <- array(0,dim=c(length(unique(year.vector)),nssmus,ntrials))
for(j in 1:ntrials){
for(i in 1:nssmus){
tt.denom <- tt.data1[connector,i,j]
# if you have already used relative.mc() then the denominator should be 1
if(!is.null(mc.object$setup$relative)){tt.denom<-1}
tt.y <- tt.data1[(1:ntimes)[keepers],i,j]
tt.data2[,i,j] <- tt.y/tt.denom
}
}
title.prefix <- paste("R",connector,"/R",connector,"[yr1]",sep="")
if(!is.null(mc.object$setup$relative)){title.prefix <- paste("relative R",connector,sep="")}
}
}
else {
plot.time <- 1:ntimes
tt.data2 <- array(0,dim=c(ntimes,nssmus,ntrials))
for(j in 1:ntrials){
for(i in 1:nssmus){
tt.denom <- tt.data1[1,i,j]
# if you have already used relative.mc() then the denominator should be 1
if(!is.null(mc.object$setup$relative)){tt.denom<-1}
tt.y <- tt.data1[,i,j]
tt.data2[,i,j] <- tt.y/tt.denom
}
}
title.prefix <- "R/R[1]"
if(!is.null(mc.object$setup$relative)){title.prefix <- "relative R"}
time.label<-"season"
}
#
# now compute quantiles
if(is.null(quants)){
quants <- rep(NA, 3)
plot.trials <- TRUE
}
if(!is.na(quants[2])) {
ttmed <- apply(tt.data2, 2, FUN = function(x, prob)
{
apply(x, 1, quantile, probs = prob, na.rm = TRUE)
}
, prob = quants[2])
}
if(!is.na(quants[1])) {
ttlow <- apply(tt.data2, 2, FUN = function(x, prob)
{
apply(x, 1, quantile, probs = prob, na.rm = TRUE)
}
, prob = quants[1])
}
if(!is.na(quants[3])) {
tthigh <- apply(tt.data2, 2, FUN = function(x, prob)
{
apply(x, 1, quantile, probs = prob, na.rm = TRUE)
}
, prob = quants[3])
}
title.suffix <- paste("quantiles = ", deparse(quants), sep="")
title.string <- paste(spp.string, title.prefix, title.suffix, sep = " -- ")
red.width <- ifelse(plot.trials,2,1)
#
# set up the color table
# now actually turn the color.tags into colors that are interpretable by the plot functions
# black, blue, green, yellow, magenta, orange, cyan, lightgoldenrod, blueviolet, springgreen, gray47, aquamarine3, orange4, purple, yellow4
if(!is.null(mc.object$color.tags)){
tt.colors <- colors()[c(24,26,254,652,450,498,68,410,31,610,200,11,502,547,656)]
tt.colors <- tt.colors[match(mc.object$color.tags,1:15)]
}
else{
tt.colors <- rep("black",ntrials)
}
#
# create list for plot titles
ssmu.orig <- mc.object$setup$orig.SSMU
pxl.list <- mc.object$setup$pixel.list
title.list <- NULL
for (k in 1:ssmu.orig){
no.pixel <- pxl.list[k]
for (m in 1:no.pixel){
title.list <- c(title.list, c(paste0(k, ".", m)))
}
}
#
# now do the plotting
windows()
origpar <- par(no.readonly=TRUE)
#par(oma = c(0, 0, 2, 0), mfrow = page.layout)
par(oma = c(4, 2, 4, 3), mar=c(4,4,1,0)+0.1, mgp=c(2,0.75,0), xpd=FALSE, mfrow = page.layout)
panel.count <- 1
left.col.panels <- seq(from=1,to=page.layout[1]*page.layout[2],by=page.layout[2])
bottom.panels <- (1:(page.layout[1]*page.layout[2]))[max(left.col.panels):((page.layout[1]*page.layout[2]))]
for(i in 1:nssmus) {
if(panel.count > (page.layout[1] * page.layout[2])) {
panel.count <- 1
par(origpar)
windows()
#par(oma = c(0, 0, 2, 0), mfrow = page.layout)
par(oma = c(4, 2, 4, 3), mar=c(4,4,1,0)+0.1, mgp=c(2,0.75,0), xpd=FALSE, mfrow = page.layout)
}
if(is.element(panel.count,left.col.panels)){ylabel<-"relative recruitment"}else{ylabel<-""}
if(is.element(panel.count,bottom.panels)){xlabel<-time.label}else{xlabel<-""}
if(!all(is.na(tt.data2[, i, 1]))) {
plot(plot.time, tt.data2[, i, 1], type = "n", ylim = Rylimits, ylab = ylabel,
xlab = xlabel,axes=FALSE)
box()
axis(1,cex.axis=0.8)
axis(2,cex.axis=0.8)
if(plot.trials){
for(j in 1:ntrials) {
lines(plot.time, tt.data2[, i, j], col = tt.colors[j])
}
}
if(!is.na(quants[2])){
lines(plot.time, ttmed[, i], col = "red", lwd = red.width, lty = 1)
}
if(!is.na(quants[1])) {
lines(plot.time, ttlow[, i], col = "red", lwd = red.width, lty = 2)
}
if(!is.na(quants[3])) {
lines(plot.time, tthigh[, i], col = "red", lwd = red.width, lty = 2)
}
}
else {
plot(range(plot.time), Rylimits, type = "n", ylab = ylabel, xlab = xlabel, axes=FALSE)
box()
axis(1,cex.axis=0.8)
axis(2,cex.axis=0.8)
}
title(main=paste("Pixel ", title.list[i], sep = ""), line = 0.5, outer = FALSE, cex.main = 0.9)
panel.count <- panel.count + 1
if(panel.count > (page.layout[1] * page.layout[2])) {
mtext(title.string, line = 1, cex = 0.75, outer = TRUE)
}
}
mtext(title.string, line = 1, cex = 0.75, outer = TRUE)
}
| /plot.mc.pixel.R.r | no_license | EmilyKlein/KPFM2_MPA_FBM | R | false | false | 7,363 | r | plot.mc.pixel.R<-function(mc.object, spp.string, quants=c(0.2, 0.5, 0.8), plot.trials = TRUE, annual.plots = TRUE, connector = "sum", page.layout = c(4, 4), Rylimits = c(0, 3), color.tags = NULL)
{
# auxiliary function to plot abundances from Monte Carlo trials
# George Watters
# code last edited 18 July 2006
#
if(!is.null(mc.object$color.tags)&&max(mc.object$color.tags>15)){
stop("FAULT -- software not designed to deal with plotting more than 15 colors in a single panel.\nIf you're sure you want to do this we can easily edit the color table.")
}
if(!is.null(quants)&&length(quants) > 3){stop("FAULT: Sorry, you can only plot 3 quantiles.")}
#
tt.data1 <- eval(parse(text = paste(as.character(quote(mc.object)),"$R$R$",spp.string,sep="")))
ntrials <- mc.object$setup$ntrials
nssmus <- mc.object$setup$nssmus
nyears <- mc.object$setup$nyears
nseasons <- mc.object$setup$nseasons
ntimes <- mc.object$setup$ntimes
#
# get the desired data as determined by the arguments annual.plots and connector
# then standardize these data as appropriate
if(annual.plots){
season.vector <- rep(1:nseasons, length.out = ntimes)
year.vector <- rep(1:(ntimes/nseasons),each = nseasons)
time.label <- "year"
if(is.character(connector)){
plot.time <- unique(year.vector)
tt.data2 <- array(0,dim=c(length(unique(year.vector)),nssmus,ntrials))
for(j in 1:ntrials){
for(i in 1:nssmus){
tt.denom <- sum(tt.data1[1:nseasons,i,j])
# if you have already used relative.mc() then the denominator should be 1
if(!is.null(mc.object$setup$relative)){tt.denom<-1}
# also if you have already used relative.mc() then means are probably more
# sensible than sums
if(!is.null(mc.object$setup$relative)){
tt.y <- as.vector(tapply(tt.data1[,i,j],list(year.vector),mean,na.rm=TRUE))
} else {
tt.y <- as.vector(tapply(tt.data1[,i,j],list(year.vector),sum))
}
tt.data2[,i,j] <- tt.y/tt.denom
}
}
title.prefix <- "sumR/sumR[yr1]"
if(!is.null(mc.object$setup$relative)){title.prefix <- "avg(relative R)"}
}
if(is.numeric(connector)){
# first check that the connector is a feasible season
if(connector > nseasons){stop("FAULT: connector season > nseasons")}
keepers <- (season.vector == connector)
plot.time <- year.vector[keepers]
tt.data2 <- array(0,dim=c(length(unique(year.vector)),nssmus,ntrials))
for(j in 1:ntrials){
for(i in 1:nssmus){
tt.denom <- tt.data1[connector,i,j]
# if you have already used relative.mc() then the denominator should be 1
if(!is.null(mc.object$setup$relative)){tt.denom<-1}
tt.y <- tt.data1[(1:ntimes)[keepers],i,j]
tt.data2[,i,j] <- tt.y/tt.denom
}
}
title.prefix <- paste("R",connector,"/R",connector,"[yr1]",sep="")
if(!is.null(mc.object$setup$relative)){title.prefix <- paste("relative R",connector,sep="")}
}
}
else {
plot.time <- 1:ntimes
tt.data2 <- array(0,dim=c(ntimes,nssmus,ntrials))
for(j in 1:ntrials){
for(i in 1:nssmus){
tt.denom <- tt.data1[1,i,j]
# if you have already used relative.mc() then the denominator should be 1
if(!is.null(mc.object$setup$relative)){tt.denom<-1}
tt.y <- tt.data1[,i,j]
tt.data2[,i,j] <- tt.y/tt.denom
}
}
title.prefix <- "R/R[1]"
if(!is.null(mc.object$setup$relative)){title.prefix <- "relative R"}
time.label<-"season"
}
#
# now compute quantiles
if(is.null(quants)){
quants <- rep(NA, 3)
plot.trials <- TRUE
}
if(!is.na(quants[2])) {
ttmed <- apply(tt.data2, 2, FUN = function(x, prob)
{
apply(x, 1, quantile, probs = prob, na.rm = TRUE)
}
, prob = quants[2])
}
if(!is.na(quants[1])) {
ttlow <- apply(tt.data2, 2, FUN = function(x, prob)
{
apply(x, 1, quantile, probs = prob, na.rm = TRUE)
}
, prob = quants[1])
}
if(!is.na(quants[3])) {
tthigh <- apply(tt.data2, 2, FUN = function(x, prob)
{
apply(x, 1, quantile, probs = prob, na.rm = TRUE)
}
, prob = quants[3])
}
title.suffix <- paste("quantiles = ", deparse(quants), sep="")
title.string <- paste(spp.string, title.prefix, title.suffix, sep = " -- ")
red.width <- ifelse(plot.trials,2,1)
#
# set up the color table
# now actually turn the color.tags into colors that are interpretable by the plot functions
# black, blue, green, yellow, magenta, orange, cyan, lightgoldenrod, blueviolet, springgreen, gray47, aquamarine3, orange4, purple, yellow4
if(!is.null(mc.object$color.tags)){
tt.colors <- colors()[c(24,26,254,652,450,498,68,410,31,610,200,11,502,547,656)]
tt.colors <- tt.colors[match(mc.object$color.tags,1:15)]
}
else{
tt.colors <- rep("black",ntrials)
}
#
# create list for plot titles
ssmu.orig <- mc.object$setup$orig.SSMU
pxl.list <- mc.object$setup$pixel.list
title.list <- NULL
for (k in 1:ssmu.orig){
no.pixel <- pxl.list[k]
for (m in 1:no.pixel){
title.list <- c(title.list, c(paste0(k, ".", m)))
}
}
#
# now do the plotting
windows()
origpar <- par(no.readonly=TRUE)
#par(oma = c(0, 0, 2, 0), mfrow = page.layout)
par(oma = c(4, 2, 4, 3), mar=c(4,4,1,0)+0.1, mgp=c(2,0.75,0), xpd=FALSE, mfrow = page.layout)
panel.count <- 1
left.col.panels <- seq(from=1,to=page.layout[1]*page.layout[2],by=page.layout[2])
bottom.panels <- (1:(page.layout[1]*page.layout[2]))[max(left.col.panels):((page.layout[1]*page.layout[2]))]
for(i in 1:nssmus) {
if(panel.count > (page.layout[1] * page.layout[2])) {
panel.count <- 1
par(origpar)
windows()
#par(oma = c(0, 0, 2, 0), mfrow = page.layout)
par(oma = c(4, 2, 4, 3), mar=c(4,4,1,0)+0.1, mgp=c(2,0.75,0), xpd=FALSE, mfrow = page.layout)
}
if(is.element(panel.count,left.col.panels)){ylabel<-"relative recruitment"}else{ylabel<-""}
if(is.element(panel.count,bottom.panels)){xlabel<-time.label}else{xlabel<-""}
if(!all(is.na(tt.data2[, i, 1]))) {
plot(plot.time, tt.data2[, i, 1], type = "n", ylim = Rylimits, ylab = ylabel,
xlab = xlabel,axes=FALSE)
box()
axis(1,cex.axis=0.8)
axis(2,cex.axis=0.8)
if(plot.trials){
for(j in 1:ntrials) {
lines(plot.time, tt.data2[, i, j], col = tt.colors[j])
}
}
if(!is.na(quants[2])){
lines(plot.time, ttmed[, i], col = "red", lwd = red.width, lty = 1)
}
if(!is.na(quants[1])) {
lines(plot.time, ttlow[, i], col = "red", lwd = red.width, lty = 2)
}
if(!is.na(quants[3])) {
lines(plot.time, tthigh[, i], col = "red", lwd = red.width, lty = 2)
}
}
else {
plot(range(plot.time), Rylimits, type = "n", ylab = ylabel, xlab = xlabel, axes=FALSE)
box()
axis(1,cex.axis=0.8)
axis(2,cex.axis=0.8)
}
title(main=paste("Pixel ", title.list[i], sep = ""), line = 0.5, outer = FALSE, cex.main = 0.9)
panel.count <- panel.count + 1
if(panel.count > (page.layout[1] * page.layout[2])) {
mtext(title.string, line = 1, cex = 0.75, outer = TRUE)
}
}
mtext(title.string, line = 1, cex = 0.75, outer = TRUE)
}
|
#' The number of segregating sites
#'
#' The density function of the total number of segregating sites
#'
#' The density of the total number of segregating sites can be obtained
#' by the aid of the block counting process together with the reward
#' transformation and the discretization. For more information on this topic see \code{vignette("PhaseTypeGenetics")} or
#' Hobolth et al. (2019): \emph{Phase-type distributions in population genetics}.
#'
#' @param n the sample size (n >= 1).
#' @param theta the mutation parameter (theta > 0).
#' @param k a non-negative number or a non-negative vector.
#' @param plot a logical value indicating whether the function should
#' plot the density of the total number of segregating sites for the
#' given values of k.
#'
#' @return The function returns the probabilities \eqn{P(S=k)} for all values
#' of \eqn{k}. Hence, the returned object is of the same length as \eqn{k}.
#' If \code{plot=TRUE}, the function also plots the densities as a function of
#' \eqn{k}.
#'
#' @source Mogens Bladt and Bo Friis Nielsen (2017):
#' \emph{ Matrix-Exponential Distributions in Applied Probability}.
#' Probability Theory and Stochastic Modelling (Springer), Volume 81.
#'
#' @source Asger Hobolth, Arno Siri-Jégousse, Mogens Bladt (2019):
#' \emph{Phase-type distributions in population genetics}.
#' Theoretical Population Biology, 127, pp. 16-32.
#'
#' @seealso \code{\link{SiteFrequencies}}, \code{\link{dphasetype}}.
#'
#' @importFrom expm %^%
#'
#' @examples
#'
#' ## Computing the density for a sample of n=5
#' dSegregatingSites(n=5,theta=2,k=5)
#' dSegregatingSites(n=5,theta=2,k=1:20, plot=TRUE)
#'
#' ## We apply the function for different sample sizes
#' ## and theta=2
#' k_vec <- 0:15
#' theta <- 2
#' ## Defining a matrix of results
#' Res_Mat <- dSegregatingSites(n = 1, theta = theta, k = k_vec)
#' ## And Applying the function for all n in {2,...,20}
#' for(n in 2:20){
#'
#' Res_Mat <- cbind(Res_Mat, dSegregatingSites(n = n, theta = theta, k = k_vec))
#' }
#'
#' ## We reproduce Figure 4.1 in John Wakeley (2009):
#' ## "Coalescent Theory: An Introduction",
#' ## Roberts and Company Publishers, Colorado.
#' ## by using the package plot3D.
#' plot3D::hist3D(x=k_vec, y=1:20, z=Res_Mat, col = "grey", border = "black",
#' xlab = "k", ylab = "n", zlab = "P(S=k)",
#' main = "The probability function of the number of segregating sites",
#' sub = expression(paste("The mutation parameter is ", theta,"= 2")),
#' cex.main = 0.9, colkey = FALSE, zlim = c(0,0.4))
#'
#' @export
dSegregatingSites <- function(n, theta, k, plot =FALSE){
if(n < 1) stop("Invalid sample size. n has to be greater than or equal to 1.")
if(n != floor(n)) warning(paste("The proviede sample size n is not a natural number.\n
The function will use n= ", floor(n), " instead."))
n = floor(n)
if(theta <=0 ) stop("Invalid mutation parameter. Theta must be greater than 0.")
if(sum(k<0)>0) stop("Invalid vector of quantiles. k has to be nonnegative!")
if(!is.logical(plot)) stop(" 'plot' must be a logical value")
if(n==1){
res <- replicate(length(k),0)
}else if(n==2){
## We define the reward transformed subtransition probability matrix
P.mat <- theta/(theta+1)
p.vec <- 1/(theta+1)
## We store the results in a matrix
res <- (P.mat^k)*p.vec
}else{
## For a given number n of samples, we find the state
## space and the corresponding rate matrix for the block
## counting process in the standard coalescent
res <- BlockCountProcess(n)
## The rate matrix
Tmat <- res$Rate_Mat
## and the corresponding inital distribution
pi.vec <- c(1,replicate(nrow(Tmat)-1,0))
## In order to find the distribution for the number
## of segregating sites, we need a reward vector that
## correpsonds to xi_1+...+_xi_n-1. Hence
r.vec <- rowSums(res$StateSpace_Mat)
## As all enties in the reward vector are positive, we
## can define the reward-transformed sub-intensity matrix
## by multiplying with the matrix that has 1/r(i) on its
## diagonal
Tmat <- diag(1/r.vec)%*%Tmat
## Now we can compute the distribution of the number of
## segregating sites by using the descretization:
P.mat <- solve(diag(nrow = nrow(Tmat))-(2/theta)*Tmat)
res <- NULL
for (i in k) {
if(i%%1!=0){
warning("One or more quantiles are not natural numbers.\n
The corresponding probabilities are set to 0.")
res[which(k==i)] <- 0
}else{
res[which(k==i)] <- pi.vec%*%(P.mat%^%i)%*%(1-rowSums(P.mat))
}
}
}
if(plot){
plot(x=k, y=res, type = "l", col = "darkgrey",
xlab = "k", ylab = expression(paste("P(", S["Total"], "=k)")),
main = "The density function of the number of segregating sites",
sub = paste("The mutation parameter is equal to", theta),
cex.main = 0.9, ylim = c(0, max(res)*1.2))
}
return(res)
}
| /R/DocumentationNumberOfSegregatingSites.R | no_license | aumath-advancedr2019/PhaseTypeGenetics | R | false | false | 4,994 | r | #' The number of segregating sites
#'
#' The density function of the total number of segregating sites
#'
#' The density of the total number of segregating sites can be obtained
#' by the aid of the block counting process together with the reward
#' transformation and the discretization. For more information on this topic see \code{vignette("PhaseTypeGenetics")} or
#' Hobolth et al. (2019): \emph{Phase-type distributions in population genetics}.
#'
#' @param n the sample size (n >= 1).
#' @param theta the mutation parameter (theta > 0).
#' @param k a non-negative number or a non-negative vector.
#' @param plot a logical value indicating whether the function should
#' plot the density of the total number of segregating sites for the
#' given values of k.
#'
#' @return The function returns the probabilities \eqn{P(S=k)} for all values
#' of \eqn{k}. Hence, the returned object is of the same length as \eqn{k}.
#' If \code{plot=TRUE}, the function also plots the densities as a function of
#' \eqn{k}.
#'
#' @source Mogens Bladt and Bo Friis Nielsen (2017):
#' \emph{ Matrix-Exponential Distributions in Applied Probability}.
#' Probability Theory and Stochastic Modelling (Springer), Volume 81.
#'
#' @source Asger Hobolth, Arno Siri-Jégousse, Mogens Bladt (2019):
#' \emph{Phase-type distributions in population genetics}.
#' Theoretical Population Biology, 127, pp. 16-32.
#'
#' @seealso \code{\link{SiteFrequencies}}, \code{\link{dphasetype}}.
#'
#' @importFrom expm %^%
#'
#' @examples
#'
#' ## Computing the density for a sample of n=5
#' dSegregatingSites(n=5,theta=2,k=5)
#' dSegregatingSites(n=5,theta=2,k=1:20, plot=TRUE)
#'
#' ## We apply the function for different sample sizes
#' ## and theta=2
#' k_vec <- 0:15
#' theta <- 2
#' ## Defining a matrix of results
#' Res_Mat <- dSegregatingSites(n = 1, theta = theta, k = k_vec)
#' ## And Applying the function for all n in {2,...,20}
#' for(n in 2:20){
#'
#' Res_Mat <- cbind(Res_Mat, dSegregatingSites(n = n, theta = theta, k = k_vec))
#' }
#'
#' ## We reproduce Figure 4.1 in John Wakeley (2009):
#' ## "Coalescent Theory: An Introduction",
#' ## Roberts and Company Publishers, Colorado.
#' ## by using the package plot3D.
#' plot3D::hist3D(x=k_vec, y=1:20, z=Res_Mat, col = "grey", border = "black",
#' xlab = "k", ylab = "n", zlab = "P(S=k)",
#' main = "The probability function of the number of segregating sites",
#' sub = expression(paste("The mutation parameter is ", theta,"= 2")),
#' cex.main = 0.9, colkey = FALSE, zlim = c(0,0.4))
#'
#' @export
dSegregatingSites <- function(n, theta, k, plot =FALSE){
if(n < 1) stop("Invalid sample size. n has to be greater than or equal to 1.")
if(n != floor(n)) warning(paste("The proviede sample size n is not a natural number.\n
The function will use n= ", floor(n), " instead."))
n = floor(n)
if(theta <=0 ) stop("Invalid mutation parameter. Theta must be greater than 0.")
if(sum(k<0)>0) stop("Invalid vector of quantiles. k has to be nonnegative!")
if(!is.logical(plot)) stop(" 'plot' must be a logical value")
if(n==1){
res <- replicate(length(k),0)
}else if(n==2){
## We define the reward transformed subtransition probability matrix
P.mat <- theta/(theta+1)
p.vec <- 1/(theta+1)
## We store the results in a matrix
res <- (P.mat^k)*p.vec
}else{
## For a given number n of samples, we find the state
## space and the corresponding rate matrix for the block
## counting process in the standard coalescent
res <- BlockCountProcess(n)
## The rate matrix
Tmat <- res$Rate_Mat
## and the corresponding inital distribution
pi.vec <- c(1,replicate(nrow(Tmat)-1,0))
## In order to find the distribution for the number
## of segregating sites, we need a reward vector that
## correpsonds to xi_1+...+_xi_n-1. Hence
r.vec <- rowSums(res$StateSpace_Mat)
## As all enties in the reward vector are positive, we
## can define the reward-transformed sub-intensity matrix
## by multiplying with the matrix that has 1/r(i) on its
## diagonal
Tmat <- diag(1/r.vec)%*%Tmat
## Now we can compute the distribution of the number of
## segregating sites by using the descretization:
P.mat <- solve(diag(nrow = nrow(Tmat))-(2/theta)*Tmat)
res <- NULL
for (i in k) {
if(i%%1!=0){
warning("One or more quantiles are not natural numbers.\n
The corresponding probabilities are set to 0.")
res[which(k==i)] <- 0
}else{
res[which(k==i)] <- pi.vec%*%(P.mat%^%i)%*%(1-rowSums(P.mat))
}
}
}
if(plot){
plot(x=k, y=res, type = "l", col = "darkgrey",
xlab = "k", ylab = expression(paste("P(", S["Total"], "=k)")),
main = "The density function of the number of segregating sites",
sub = paste("The mutation parameter is equal to", theta),
cex.main = 0.9, ylim = c(0, max(res)*1.2))
}
return(res)
}
|
# Copyright (C) 2020 Abdelmoneim Amer Desouki,
# Data Science Group, Paderborn University, Germany.
# All right reserved.
# Email: desouki@mail.upb.de
#
# This file is part of rBMF package
#
# rBMF is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# rBMF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with rBMF. If not, see <http://www.gnu.org/licenses/>.
## this code is converted from MATLAB code provided by the authors
# 18/1/2019
GreConDPlus<-function( X, no_of_factors=NULL, w ,verbose=2){#[ A, B ] =
cpp_uzaver <-function(D,y,U,M){
};
func <- 'List cpp_uzaver(Rcpp::NumericVector D, int y, NumericMatrix U, NumericMatrix M) {
int m=M.nrow();
int n=M.ncol();
int i,j;
int cost = 0;
//Rcout<<"n="<< n << ", m=" << m << "y=" << y << ", D[y-1]="<<D[y-1]<<std::endl;
D(y-1) = 1;
/*Rcout<<"U[0,0]"<< U[0,0] << ", U[0,1]" << U[0,1] << ",U[1,0]" << U[1,0] << std::endl;
for(i=0; i<m; i++){
for(j=0; j<n; j++) Rcout << U(i,j) << " ";
Rcout<<std::endl;
}
for(j=0; j<n; j++) Rcout<<D(j)<<" ";
*/
Rcpp::NumericVector A(m);
Rcpp::NumericVector B(n);
// sipka dolu
for(i=0; i<m; i++)
{
A(i) = 0;
for(j=0; j<n; j++)
{
if (M(i,j) >= D(j)) A(i) = 1;
else
{
A(i) = 0;
break;
}
}
}
for(j=0; j<n; j++)
{
B(j) = 0;
for(i=0; i<m; i++)
{
if (M(i,j) >= A(i)) B(j) = 1;
else
{
B(j) = 0;
break;
}
}
}
// spocitame plochu
for(i=0; i<m; i++)
{
for(j=0; j<n; j++)
{
if(A(i) && B(j) && U(i,j)) cost++;
}
}
D(y-1) = 0;
List ret;
ret["cost"] = cost;
ret["A"] = A;
ret["B"] = B;
return ret;
}'
Rcpp::cppFunction(func)
M = X==1
# M = logical(vstup);
# [m, n] = size(M);
m=nrow(M)
n=ncol(M)
coverage = matrix(0,nrow=m, ncol=n);
U = M;
k = 0;
# a = logical([]);
# b = logical([]);
# E = logical([]); % expansion of the factor
# F = logical([]); % sloupcova expanze faktoru
a=NULL
b=NULL
E=NULL
FF=NULL
# % run the calculation
while(max(U)>0){
max_cost = -Inf;
D = rep(FALSE,n)#false(1,n);
D_old = rep(FALSE,n)#false(1,n);
flag = TRUE;#podminka
D_between = rep(FALSE,n)#false(1,n);
# % select the GreConD help factor
while(flag){
for(j in 1:n){
if(!D[j]){
# tmp = r_uzaver(D,j,U,M);
tmp = cpp_uzaver(D,j,U+0,M+0);
# if(k==1) browser();
cost=tmp[[1]]; A=tmp[[2]]; B=tmp[[3]]
if(verbose>2) print(sprintf("k=%d,j=%d,cost=%d, |A|=%d, U=%d",k,j,cost,sum(A),sum(U)));
if (cost > max_cost){
max_cost = cost;
D_between = B;
C = A;
}
}
}
D = D_between;
if(min(D==D_old)){
flag = FALSE;
}else{
flag = TRUE;
D_old = D;
}
}
# % e, f represents the expansion factor factor [C, D]
# browser()
tmp = expansion(C, D, U, M, w);
e=tmp[[1]]; f=tmp[[2]];
C = C | e #or(C, e);
D = D | f #or(D, f);
if(is.null(a)){
a = matrix(C,ncol=1)
}else{
a = cbind(a,C)#[a, C];
}
# b = [b; D];
b=rbind(b,D)
if(is.null(E)){
E = matrix(e,ncol=1)
}else{
E = cbind(E,e)
}#E = [E, e];
# F = [F; f];
FF = rbind(FF,f)
k = k + 1;
if(verbose>=2) print(k);
# if k==pocet_faktoru && pocet_faktoru
# break;
# end
if (!is.null(no_of_factors) && k==no_of_factors){#NB:no expansion for last factor!!!!!
break;
}
# % remove from U and add to cover matrix
ix=as.matrix(expand.grid(which(C==1),which(D==1)),ncol=2)
U[ix] = FALSE;
# browser()
coverage[ix] = coverage[ix] + 1;
# % overime whether it is possible to remove some factor (totally cancel)
for (i in 1:nrow(b)){
if (i >= nrow(b)){#size(b, 1)
break;
}
ix2=as.matrix(expand.grid(which(a[,i]==1),which(b[i,]==1)),ncol=2)
cc = coverage[ix2]#coverage(a(:,i), b(i,:));
if (min(cc)>=2){#min(min(c(M(a(:,i), b(i,:))))) >= 2
if(verbose>=2) print('taking the factor...');
a=a[,-i]#a(:,i) = [];
b=b[-i,]#b(i,:) = [];
E=E[,-i]#E(:,i) = []; #% delete expansion
FF=FF[-i,]#F(i,:) = [];
# coverage(a(:,i), b(i,:)) = coverage(a(:,i), b(i,:)) - 1;
coverage[ix2] = coverage[ix2] - 1
}
}
# % verify whether the overlap error can be mitigated
for(i in 1:nrow(b)){
# % loop over all columns in expansion
for( j in 1:n){
if( FF[i,j]){
z = rep(1,n)#false(1,n);
z[j] = 1;
ix3=as.matrix(expand.grid(which(a[,i]==1),which(z==1)),ncol=2)
cc = coverage[ix3]#coverage(a(:,i), z);
# % pokud je mozne odebrat expanzy (jednicky v expanzi jsou
# % pokryty jinymi faktory)
if(nrow(ix3)>0 && min(cc[M[ix3]]) >= 2){# min(min(c(M(a(:,i), z)))) >= 2
print('remove the expansion...');
b[i,j] = 0;
# coverage(a(:,i), z) = coverage(a(:,i), z) - 1;
coverage[ix3] = coverage[ix3] - 1;
FF[i,j] = 0;
}
}
}
# % loop over all lines in expansion
for( j in 1:m){
if (E[j,i]){
z = rep(0,m)#false(m,1);
z[j] = 1;
# cc = coverage(z, b(i,:));
ix4=as.matrix(expand.grid(which(z==1),which(b[i,]==1)),ncol=2)
cc = coverage[ix4]#coverage(a(:,i), z);
# browser()
# % if it is possible to remove the expansions (they are one in expansion covered by other factors)
if (nrow(ix4)>0 && min(cc[M[ix4]])>=2){#min(min(c(M(z, b(i,:))))) >= 2
print('removing the line from expansion...');
a[j,i] = 0;
# coverage(z, b(i,:)) = coverage(z, b(i,:)) - 1;
coverage[ix4] = coverage[ix4] - 1;
E[j,i] = 0;
}
}
}
}
}
A = a==1#logical(a);
B = b==1#logical(b);
return(list(A=A,B=B))
}
# % the function for expanding the concept by one row or column
expansion <- function(A, B, U, M, w){#[E, F]
# [m, n] = size(U);
m=nrow(U)
n=ncol(U)
E = rep(FALSE,m)#false(m,1);
FF = rep(FALSE,n)#false(1,n);
while( 1==1){
ix=as.matrix(expand.grid(which(A==1),which(B==1)),ncol=2)
cover = sum(U[ix])#sum(sum(U(A, B)));
overcover = sum(!M[ix])#sum(sum(~M(A, B)));
cost = cover - w * overcover;
co = 0; #% determines whether the column or row is a good one
# % the price of all columns, except for those in B
price = cover + colSums(U[A==1,]) - w * (overcover + colSums(!M[A==1,,drop=FALSE]))#sum(U(A,:)) - w * (overcover + sum(~M(A,:),1));
price[B==1] = -Inf;
colno = which.max(price);
# % the price of the best column
if (cost < price[colno]){
co = 2;
}
# % the price of all lines except those in A
# price = cover + sum(U(:,B),2) - w * (overcover + sum(~table(:,B),2));
price = cover + rowSums(U[,B==1,drop=FALSE]) - w * (overcover + rowSums(!M[,B==1,drop=FALSE]));
price[A==1] = -Inf;
rowno = which.max(price);
# % the price of the best line
if (cost < price[rowno]){
co = 1;
}
if (co == 0 ){#% zadna expanze
break;
}else{
if (co == 1){# % expanze o radek
A[rowno] = 1;
E[rowno] = 1;
}else{
if( co == 2){# % expansion by column
B[colno] = 1;
FF[colno] = 1;
}
}
}
}#while
return(list(E=E,FF=FF))
}
# library(Rcpp)
func <- 'List cpp_uzaver(Rcpp::NumericVector D, int y, NumericMatrix U, NumericMatrix M) {
int m=M.nrow();
int n=M.ncol();
int i,j;
int cost = 0;
//Rcout<<"n="<< n << ", m=" << m << "y=" << y << ", D[y-1]="<<D[y-1]<<std::endl;
D(y-1) = 1;
/*Rcout<<"U[0,0]"<< U[0,0] << ", U[0,1]" << U[0,1] << ",U[1,0]" << U[1,0] << std::endl;
for(i=0; i<m; i++){
for(j=0; j<n; j++) Rcout << U(i,j) << " ";
Rcout<<std::endl;
}
for(j=0; j<n; j++) Rcout<<D(j)<<" ";
*/
Rcpp::NumericVector A(m);
Rcpp::NumericVector B(n);
// sipka dolu
for(i=0; i<m; i++)
{
A(i) = 0;
for(j=0; j<n; j++)
{
if (M(i,j) >= D(j)) A(i) = 1;
else
{
A(i) = 0;
break;
}
}
}
for(j=0; j<n; j++)
{
B(j) = 0;
for(i=0; i<m; i++)
{
if (M(i,j) >= A(i)) B(j) = 1;
else
{
B(j) = 0;
break;
}
}
}
// spocitame plochu
for(i=0; i<m; i++)
{
for(j=0; j<n; j++)
{
if(A(i) && B(j) && U(i,j)) cost++;
}
}
D(y-1) = 0;
List ret;
ret["cost"] = cost;
ret["A"] = A;
ret["B"] = B;
return ret;
}'
# cppFunction(func)
# U=X==1
# D=rep(0,ncol(U))
# tmp=cpp_uzaver(D,1,U,U);
| /R/GreConDPlus.R | no_license | cran/rBMF | R | false | false | 10,098 | r | # Copyright (C) 2020 Abdelmoneim Amer Desouki,
# Data Science Group, Paderborn University, Germany.
# All right reserved.
# Email: desouki@mail.upb.de
#
# This file is part of rBMF package
#
# rBMF is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# rBMF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with rBMF. If not, see <http://www.gnu.org/licenses/>.
## this code is converted from MATLAB code provided by the authors
# 18/1/2019
GreConDPlus<-function( X, no_of_factors=NULL, w ,verbose=2){#[ A, B ] =
cpp_uzaver <-function(D,y,U,M){
};
func <- 'List cpp_uzaver(Rcpp::NumericVector D, int y, NumericMatrix U, NumericMatrix M) {
int m=M.nrow();
int n=M.ncol();
int i,j;
int cost = 0;
//Rcout<<"n="<< n << ", m=" << m << "y=" << y << ", D[y-1]="<<D[y-1]<<std::endl;
D(y-1) = 1;
/*Rcout<<"U[0,0]"<< U[0,0] << ", U[0,1]" << U[0,1] << ",U[1,0]" << U[1,0] << std::endl;
for(i=0; i<m; i++){
for(j=0; j<n; j++) Rcout << U(i,j) << " ";
Rcout<<std::endl;
}
for(j=0; j<n; j++) Rcout<<D(j)<<" ";
*/
Rcpp::NumericVector A(m);
Rcpp::NumericVector B(n);
// sipka dolu
for(i=0; i<m; i++)
{
A(i) = 0;
for(j=0; j<n; j++)
{
if (M(i,j) >= D(j)) A(i) = 1;
else
{
A(i) = 0;
break;
}
}
}
for(j=0; j<n; j++)
{
B(j) = 0;
for(i=0; i<m; i++)
{
if (M(i,j) >= A(i)) B(j) = 1;
else
{
B(j) = 0;
break;
}
}
}
// spocitame plochu
for(i=0; i<m; i++)
{
for(j=0; j<n; j++)
{
if(A(i) && B(j) && U(i,j)) cost++;
}
}
D(y-1) = 0;
List ret;
ret["cost"] = cost;
ret["A"] = A;
ret["B"] = B;
return ret;
}'
Rcpp::cppFunction(func)
M = X==1
# M = logical(vstup);
# [m, n] = size(M);
m=nrow(M)
n=ncol(M)
coverage = matrix(0,nrow=m, ncol=n);
U = M;
k = 0;
# a = logical([]);
# b = logical([]);
# E = logical([]); % expansion of the factor
# F = logical([]); % sloupcova expanze faktoru
a=NULL
b=NULL
E=NULL
FF=NULL
# % run the calculation
while(max(U)>0){
max_cost = -Inf;
D = rep(FALSE,n)#false(1,n);
D_old = rep(FALSE,n)#false(1,n);
flag = TRUE;#podminka
D_between = rep(FALSE,n)#false(1,n);
# % select the GreConD help factor
while(flag){
for(j in 1:n){
if(!D[j]){
# tmp = r_uzaver(D,j,U,M);
tmp = cpp_uzaver(D,j,U+0,M+0);
# if(k==1) browser();
cost=tmp[[1]]; A=tmp[[2]]; B=tmp[[3]]
if(verbose>2) print(sprintf("k=%d,j=%d,cost=%d, |A|=%d, U=%d",k,j,cost,sum(A),sum(U)));
if (cost > max_cost){
max_cost = cost;
D_between = B;
C = A;
}
}
}
D = D_between;
if(min(D==D_old)){
flag = FALSE;
}else{
flag = TRUE;
D_old = D;
}
}
# % e, f represents the expansion factor factor [C, D]
# browser()
tmp = expansion(C, D, U, M, w);
e=tmp[[1]]; f=tmp[[2]];
C = C | e #or(C, e);
D = D | f #or(D, f);
if(is.null(a)){
a = matrix(C,ncol=1)
}else{
a = cbind(a,C)#[a, C];
}
# b = [b; D];
b=rbind(b,D)
if(is.null(E)){
E = matrix(e,ncol=1)
}else{
E = cbind(E,e)
}#E = [E, e];
# F = [F; f];
FF = rbind(FF,f)
k = k + 1;
if(verbose>=2) print(k);
# if k==pocet_faktoru && pocet_faktoru
# break;
# end
if (!is.null(no_of_factors) && k==no_of_factors){#NB:no expansion for last factor!!!!!
break;
}
# % remove from U and add to cover matrix
ix=as.matrix(expand.grid(which(C==1),which(D==1)),ncol=2)
U[ix] = FALSE;
# browser()
coverage[ix] = coverage[ix] + 1;
# % overime whether it is possible to remove some factor (totally cancel)
for (i in 1:nrow(b)){
if (i >= nrow(b)){#size(b, 1)
break;
}
ix2=as.matrix(expand.grid(which(a[,i]==1),which(b[i,]==1)),ncol=2)
cc = coverage[ix2]#coverage(a(:,i), b(i,:));
if (min(cc)>=2){#min(min(c(M(a(:,i), b(i,:))))) >= 2
if(verbose>=2) print('taking the factor...');
a=a[,-i]#a(:,i) = [];
b=b[-i,]#b(i,:) = [];
E=E[,-i]#E(:,i) = []; #% delete expansion
FF=FF[-i,]#F(i,:) = [];
# coverage(a(:,i), b(i,:)) = coverage(a(:,i), b(i,:)) - 1;
coverage[ix2] = coverage[ix2] - 1
}
}
# % verify whether the overlap error can be mitigated
for(i in 1:nrow(b)){
# % loop over all columns in expansion
for( j in 1:n){
if( FF[i,j]){
z = rep(1,n)#false(1,n);
z[j] = 1;
ix3=as.matrix(expand.grid(which(a[,i]==1),which(z==1)),ncol=2)
cc = coverage[ix3]#coverage(a(:,i), z);
# % pokud je mozne odebrat expanzy (jednicky v expanzi jsou
# % pokryty jinymi faktory)
if(nrow(ix3)>0 && min(cc[M[ix3]]) >= 2){# min(min(c(M(a(:,i), z)))) >= 2
print('remove the expansion...');
b[i,j] = 0;
# coverage(a(:,i), z) = coverage(a(:,i), z) - 1;
coverage[ix3] = coverage[ix3] - 1;
FF[i,j] = 0;
}
}
}
# % loop over all lines in expansion
for( j in 1:m){
if (E[j,i]){
z = rep(0,m)#false(m,1);
z[j] = 1;
# cc = coverage(z, b(i,:));
ix4=as.matrix(expand.grid(which(z==1),which(b[i,]==1)),ncol=2)
cc = coverage[ix4]#coverage(a(:,i), z);
# browser()
# % if it is possible to remove the expansions (they are one in expansion covered by other factors)
if (nrow(ix4)>0 && min(cc[M[ix4]])>=2){#min(min(c(M(z, b(i,:))))) >= 2
print('removing the line from expansion...');
a[j,i] = 0;
# coverage(z, b(i,:)) = coverage(z, b(i,:)) - 1;
coverage[ix4] = coverage[ix4] - 1;
E[j,i] = 0;
}
}
}
}
}
A = a==1#logical(a);
B = b==1#logical(b);
return(list(A=A,B=B))
}
# % the function for expanding the concept by one row or column
expansion <- function(A, B, U, M, w){#[E, F]
# [m, n] = size(U);
m=nrow(U)
n=ncol(U)
E = rep(FALSE,m)#false(m,1);
FF = rep(FALSE,n)#false(1,n);
while( 1==1){
ix=as.matrix(expand.grid(which(A==1),which(B==1)),ncol=2)
cover = sum(U[ix])#sum(sum(U(A, B)));
overcover = sum(!M[ix])#sum(sum(~M(A, B)));
cost = cover - w * overcover;
co = 0; #% determines whether the column or row is a good one
# % the price of all columns, except for those in B
price = cover + colSums(U[A==1,]) - w * (overcover + colSums(!M[A==1,,drop=FALSE]))#sum(U(A,:)) - w * (overcover + sum(~M(A,:),1));
price[B==1] = -Inf;
colno = which.max(price);
# % the price of the best column
if (cost < price[colno]){
co = 2;
}
# % the price of all lines except those in A
# price = cover + sum(U(:,B),2) - w * (overcover + sum(~table(:,B),2));
price = cover + rowSums(U[,B==1,drop=FALSE]) - w * (overcover + rowSums(!M[,B==1,drop=FALSE]));
price[A==1] = -Inf;
rowno = which.max(price);
# % the price of the best line
if (cost < price[rowno]){
co = 1;
}
if (co == 0 ){#% zadna expanze
break;
}else{
if (co == 1){# % expanze o radek
A[rowno] = 1;
E[rowno] = 1;
}else{
if( co == 2){# % expansion by column
B[colno] = 1;
FF[colno] = 1;
}
}
}
}#while
return(list(E=E,FF=FF))
}
# library(Rcpp)
func <- 'List cpp_uzaver(Rcpp::NumericVector D, int y, NumericMatrix U, NumericMatrix M) {
int m=M.nrow();
int n=M.ncol();
int i,j;
int cost = 0;
//Rcout<<"n="<< n << ", m=" << m << "y=" << y << ", D[y-1]="<<D[y-1]<<std::endl;
D(y-1) = 1;
/*Rcout<<"U[0,0]"<< U[0,0] << ", U[0,1]" << U[0,1] << ",U[1,0]" << U[1,0] << std::endl;
for(i=0; i<m; i++){
for(j=0; j<n; j++) Rcout << U(i,j) << " ";
Rcout<<std::endl;
}
for(j=0; j<n; j++) Rcout<<D(j)<<" ";
*/
Rcpp::NumericVector A(m);
Rcpp::NumericVector B(n);
// sipka dolu
for(i=0; i<m; i++)
{
A(i) = 0;
for(j=0; j<n; j++)
{
if (M(i,j) >= D(j)) A(i) = 1;
else
{
A(i) = 0;
break;
}
}
}
for(j=0; j<n; j++)
{
B(j) = 0;
for(i=0; i<m; i++)
{
if (M(i,j) >= A(i)) B(j) = 1;
else
{
B(j) = 0;
break;
}
}
}
// spocitame plochu
for(i=0; i<m; i++)
{
for(j=0; j<n; j++)
{
if(A(i) && B(j) && U(i,j)) cost++;
}
}
D(y-1) = 0;
List ret;
ret["cost"] = cost;
ret["A"] = A;
ret["B"] = B;
return ret;
}'
# cppFunction(func)
# U=X==1
# D=rep(0,ncol(U))
# tmp=cpp_uzaver(D,1,U,U);
|
# {
# 'created_on' : '5 May 2017',
# 'aim_of_rscript' : 'Using logical operators in R',
# 'coded_by' : 'Rishikesh Agrawani',
# }
integers.vect = c(4, 5, 6, 0, 1, 2)
evens.vect = c(0, 8, 4, 6, 10, 12)
cat(integers.vect, " & ",evens.vect, " = ", integers.vect & evens.vect)
cat('\n\n')
cat(integers.vect, " | ",evens.vect," = ", integers.vect | evens.vect)
cat("\n\n")
cat(integers.vect, " && ",evens.vect," = ", integers.vect && evens.vect)
cat("\n\n")
cat(integers.vect, " || ",evens.vect," = ", integers.vect || evens.vect)
cat("\n\n")
cat("!",evens.vect," = ",!evens.vect)
cat("\n\n")
complex.vect.1 = c(67+0i, 0+0i, 9+3i, FALSE, 0, -6,TRUE)
complex.vect.2 = c(0+8i, TRUE, 89.3, 45, 1, -7, FALSE)
cat(complex.vect.1, " & ",complex.vect.2, " = ", complex.vect.1 & complex.vect.2)
cat('\n\n')
cat(complex.vect.1, " | ",complex.vect.2, " = ", complex.vect.1 | complex.vect.2)
cat('\n\n')
cat(complex.vect.1, " && ",complex.vect.2, " = ", complex.vect.1 && complex.vect.2)
cat('\n\n')
cat(complex.vect.1, " || ",complex.vect.2, " = ", complex.vect.1 || complex.vect.2)
cat('\n\n')
# 4 5 6 0 1 2 & 0 8 4 6 10 12 = FALSE TRUE TRUE FALSE TRUE TRUE
# 4 5 6 0 1 2 | 0 8 4 6 10 12 = TRUE TRUE TRUE TRUE TRUE TRUE
# 4 5 6 0 1 2 && 0 8 4 6 10 12 = FALSE
# 4 5 6 0 1 2 || 0 8 4 6 10 12 = TRUE
# ! 0 8 4 6 10 12 = TRUE FALSE FALSE FALSE FALSE FALSE
# 67+0i 0+0i 9+3i 0+0i 0+0i -6+0i 1+0i & 0+8i 1+0i 89.3+0i 45+0i 1+0i -7+0i 0+0i = TRUE FALSE TRUE FALSE FALSE TRUE FALSE
# 67+0i 0+0i 9+3i 0+0i 0+0i -6+0i 1+0i | 0+8i 1+0i 89.3+0i 45+0i 1+0i -7+0i 0+0i = TRUE TRUE TRUE TRUE TRUE TRUE TRUE
# 67+0i 0+0i 9+3i 0+0i 0+0i -6+0i 1+0i && 0+8i 1+0i 89.3+0i 45+0i 1+0i -7+0i 0+0i = TRUE
# 67+0i 0+0i 9+3i 0+0i 0+0i -6+0i 1+0i || 0+8i 1+0i 89.3+0i 45+0i 1+0i -7+0i 0+0i = TRUE
| /src/04_operators/logical.R | permissive | hygull/rscript | R | false | false | 1,815 | r | # {
# 'created_on' : '5 May 2017',
# 'aim_of_rscript' : 'Using logical operators in R',
# 'coded_by' : 'Rishikesh Agrawani',
# }
integers.vect = c(4, 5, 6, 0, 1, 2)
evens.vect = c(0, 8, 4, 6, 10, 12)
cat(integers.vect, " & ",evens.vect, " = ", integers.vect & evens.vect)
cat('\n\n')
cat(integers.vect, " | ",evens.vect," = ", integers.vect | evens.vect)
cat("\n\n")
cat(integers.vect, " && ",evens.vect," = ", integers.vect && evens.vect)
cat("\n\n")
cat(integers.vect, " || ",evens.vect," = ", integers.vect || evens.vect)
cat("\n\n")
cat("!",evens.vect," = ",!evens.vect)
cat("\n\n")
complex.vect.1 = c(67+0i, 0+0i, 9+3i, FALSE, 0, -6,TRUE)
complex.vect.2 = c(0+8i, TRUE, 89.3, 45, 1, -7, FALSE)
cat(complex.vect.1, " & ",complex.vect.2, " = ", complex.vect.1 & complex.vect.2)
cat('\n\n')
cat(complex.vect.1, " | ",complex.vect.2, " = ", complex.vect.1 | complex.vect.2)
cat('\n\n')
cat(complex.vect.1, " && ",complex.vect.2, " = ", complex.vect.1 && complex.vect.2)
cat('\n\n')
cat(complex.vect.1, " || ",complex.vect.2, " = ", complex.vect.1 || complex.vect.2)
cat('\n\n')
# 4 5 6 0 1 2 & 0 8 4 6 10 12 = FALSE TRUE TRUE FALSE TRUE TRUE
# 4 5 6 0 1 2 | 0 8 4 6 10 12 = TRUE TRUE TRUE TRUE TRUE TRUE
# 4 5 6 0 1 2 && 0 8 4 6 10 12 = FALSE
# 4 5 6 0 1 2 || 0 8 4 6 10 12 = TRUE
# ! 0 8 4 6 10 12 = TRUE FALSE FALSE FALSE FALSE FALSE
# 67+0i 0+0i 9+3i 0+0i 0+0i -6+0i 1+0i & 0+8i 1+0i 89.3+0i 45+0i 1+0i -7+0i 0+0i = TRUE FALSE TRUE FALSE FALSE TRUE FALSE
# 67+0i 0+0i 9+3i 0+0i 0+0i -6+0i 1+0i | 0+8i 1+0i 89.3+0i 45+0i 1+0i -7+0i 0+0i = TRUE TRUE TRUE TRUE TRUE TRUE TRUE
# 67+0i 0+0i 9+3i 0+0i 0+0i -6+0i 1+0i && 0+8i 1+0i 89.3+0i 45+0i 1+0i -7+0i 0+0i = TRUE
# 67+0i 0+0i 9+3i 0+0i 0+0i -6+0i 1+0i || 0+8i 1+0i 89.3+0i 45+0i 1+0i -7+0i 0+0i = TRUE
|
library(xlsx)
if(!file.exists("data")) {
file.create("data")
}
URL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FDATA.gov_NGAP.xlsx"
download.file(URL, "./data/Gas.xlsx", method="curl")
dateDownloaded <- date()
dat <- read.xlsx("./data/Gas.xlsx", sheetIndex = 1, startRow = 18, endRow = 23,colIndex = 7:15, header = TRUE)
sum(dat$Zip*dat$Ext,na.rm=T) | /Quiz 1/Quiz 1 - Question 3.R | no_license | sservaes/Getting-and-Cleaning-Data | R | false | false | 375 | r | library(xlsx)
if(!file.exists("data")) {
file.create("data")
}
URL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FDATA.gov_NGAP.xlsx"
download.file(URL, "./data/Gas.xlsx", method="curl")
dateDownloaded <- date()
dat <- read.xlsx("./data/Gas.xlsx", sheetIndex = 1, startRow = 18, endRow = 23,colIndex = 7:15, header = TRUE)
sum(dat$Zip*dat$Ext,na.rm=T) |
clean.text <- function(some_txt)
{
#some_txt = gsub("&", "", some_txt)
some_txt = gsub("(RT|via)((?:\b\\W*@\\w+)+)", "", some_txt)
some_txt = gsub("RT\\w+", "", some_txt)
some_txt = gsub("RT+", "", some_txt)
some_txt = gsub("@\\w+", "", some_txt)
some_txt = gsub("[[:punct:]]", "", some_txt)
some_txt = gsub("[[:digit:]]", "", some_txt)
some_txt = gsub("http\\w+", "", some_txt)
#some_txt = gsub("[ t]{2,}", "", some_txt)
#some_txt = gsub("^\\s+|\\s+$", "", some_txt)
# define "tolower error handling" function
try.tolower = function(x)
{
y = NA
try_error = tryCatch(tolower(x), error=function(e) e)
if (!inherits(try_error, "error"))
y = tolower(x)
return(y)
}
some_txt = sapply(some_txt, try.tolower)
some_txt = some_txt[some_txt != ""]
names(some_txt) = NULL
return(some_txt)
}
| /WordCloudCleaningFunction.R | no_license | alexzanidean/Reddit | R | false | false | 843 | r | clean.text <- function(some_txt)
{
#some_txt = gsub("&", "", some_txt)
some_txt = gsub("(RT|via)((?:\b\\W*@\\w+)+)", "", some_txt)
some_txt = gsub("RT\\w+", "", some_txt)
some_txt = gsub("RT+", "", some_txt)
some_txt = gsub("@\\w+", "", some_txt)
some_txt = gsub("[[:punct:]]", "", some_txt)
some_txt = gsub("[[:digit:]]", "", some_txt)
some_txt = gsub("http\\w+", "", some_txt)
#some_txt = gsub("[ t]{2,}", "", some_txt)
#some_txt = gsub("^\\s+|\\s+$", "", some_txt)
# define "tolower error handling" function
try.tolower = function(x)
{
y = NA
try_error = tryCatch(tolower(x), error=function(e) e)
if (!inherits(try_error, "error"))
y = tolower(x)
return(y)
}
some_txt = sapply(some_txt, try.tolower)
some_txt = some_txt[some_txt != ""]
names(some_txt) = NULL
return(some_txt)
}
|
# mboost family for boosting 'classical' beta regression
# location parameter 'mu' is modeled by additive predictor
# precision parameter 'phi' is estimated as a scalar
# uses the parametrization as 'betareg' package by Zeileis et. al.
# The parametrization in Mikis 'gamlss' package for BE() differs
# slightly, sigma = 1/sqrt(phi + 1)
BetaReg <- function(mu = NULL, phirange = c(.001, 1000)){
phi <- 1 # just to initialize, overwritten in first step
# loss is negative log likelihood; f is additive predictor (eta)
# logit link -> plogis(f) = mu
loss_mu <- function(phi, y, f) {
- 1 * (lgamma(phi) - lgamma(plogis(f) * phi) -
lgamma((1 - plogis(f)) * phi) + (plogis(f) * phi - 1) * log(y) +
((1 - plogis(f)) * phi - 1) * log(1 - y))
}
# to optimize phi
risk_phi <- function(phi, y, fit, w = 1) {
sum(w * loss_mu(y = y, f = fit, phi = phi))
}
# for output
risk <- function( y, f, w = 1) {
sum(w * loss_mu(y = y, f = f, phi = phi))
}
# ngradient is first derivative of log likelihood w.r.t. f
ngradient <- function(y, f, w = 1) {
# estimate phi
phi <<- optimize(risk_phi, interval = phirange, y = y,
fit = f, w = w)$minimum
# compute partial derivative
ngr <- exp(f)/(1 + exp(f))^2 * (phi * (qlogis(y) - (digamma(plogis(f) * phi) -
digamma((1 - plogis(f)) * phi))))
return(ngr)
}
# starting value: mean(y)
offset <- function(y, w) {
if (!is.null(mu)) {
RET <- qlogis(mu)
}
else {
RET <- qlogis(mean(y))
}
return(RET)
}
# use the Family constructor of mboost
mboost::Family(ngradient = ngradient, risk = risk, offset = offset,
response = function(f) plogis(f),
check_y <- function(y) {
if (!is.numeric(y) || !is.null(dim(y)))
stop("response must be a numeric vector")
if (any(y <= 0) | any(y >= 1))
stop("response must be >0 and <1")
y
},nuisance = function() return(phi[length(phi)]),
name = "Beta-Regression (logit link)")
}
| /R/BetaReg_family.R | no_license | JamesLinus/betaboost | R | false | false | 2,220 | r |
# mboost family for boosting 'classical' beta regression
# location parameter 'mu' is modeled by additive predictor
# precision parameter 'phi' is estimated as a scalar
# uses the parametrization as 'betareg' package by Zeileis et. al.
# The parametrization in Mikis 'gamlss' package for BE() differs
# slightly, sigma = 1/sqrt(phi + 1)
BetaReg <- function(mu = NULL, phirange = c(.001, 1000)){
phi <- 1 # just to initialize, overwritten in first step
# loss is negative log likelihood; f is additive predictor (eta)
# logit link -> plogis(f) = mu
loss_mu <- function(phi, y, f) {
- 1 * (lgamma(phi) - lgamma(plogis(f) * phi) -
lgamma((1 - plogis(f)) * phi) + (plogis(f) * phi - 1) * log(y) +
((1 - plogis(f)) * phi - 1) * log(1 - y))
}
# to optimize phi
risk_phi <- function(phi, y, fit, w = 1) {
sum(w * loss_mu(y = y, f = fit, phi = phi))
}
# for output
risk <- function( y, f, w = 1) {
sum(w * loss_mu(y = y, f = f, phi = phi))
}
# ngradient is first derivative of log likelihood w.r.t. f
ngradient <- function(y, f, w = 1) {
# estimate phi
phi <<- optimize(risk_phi, interval = phirange, y = y,
fit = f, w = w)$minimum
# compute partial derivative
ngr <- exp(f)/(1 + exp(f))^2 * (phi * (qlogis(y) - (digamma(plogis(f) * phi) -
digamma((1 - plogis(f)) * phi))))
return(ngr)
}
# starting value: mean(y)
offset <- function(y, w) {
if (!is.null(mu)) {
RET <- qlogis(mu)
}
else {
RET <- qlogis(mean(y))
}
return(RET)
}
# use the Family constructor of mboost
mboost::Family(ngradient = ngradient, risk = risk, offset = offset,
response = function(f) plogis(f),
check_y <- function(y) {
if (!is.numeric(y) || !is.null(dim(y)))
stop("response must be a numeric vector")
if (any(y <= 0) | any(y >= 1))
stop("response must be >0 and <1")
y
},nuisance = function() return(phi[length(phi)]),
name = "Beta-Regression (logit link)")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visTree.R
\name{visTreeEditor}
\alias{visTreeEditor}
\title{Run and edit a visTree, and get back in R}
\usage{
visTreeEditor(data, ...)
}
\arguments{
\item{data}{\code{rpart or data.drame}}
\item{...}{all arguments except \code{object} present in \link{visTree}}
}
\description{
Neededd packages : shiny, rpart, colourpicker, shinyWidgets
}
\examples{
\dontrun{
net <- visTreeEditor(data = iris)
net <- visTreeEditor(data = rpart(iris), main = "visTree Editor")
net
}
}
\references{
See online documentation \url{http://datastorm-open.github.io/visNetwork/}
}
\seealso{
\link{visTree}, \link{visTreeModuleServer}, \link{visNetworkEditor}
}
| /man/visTreeEditor.Rd | no_license | NatthakornK/visNetwork | R | false | true | 723 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visTree.R
\name{visTreeEditor}
\alias{visTreeEditor}
\title{Run and edit a visTree, and get back in R}
\usage{
visTreeEditor(data, ...)
}
\arguments{
\item{data}{\code{rpart or data.drame}}
\item{...}{all arguments except \code{object} present in \link{visTree}}
}
\description{
Neededd packages : shiny, rpart, colourpicker, shinyWidgets
}
\examples{
\dontrun{
net <- visTreeEditor(data = iris)
net <- visTreeEditor(data = rpart(iris), main = "visTree Editor")
net
}
}
\references{
See online documentation \url{http://datastorm-open.github.io/visNetwork/}
}
\seealso{
\link{visTree}, \link{visTreeModuleServer}, \link{visNetworkEditor}
}
|
\name{ablineclip}
\alias{ablineclip}
\title{Add a straight line to a plot}
\description{
As \samp{abline}, but has arguments \samp{x1,x2,y1,y2} as in \samp{clip}.
}
\usage{
ablineclip(a=NULL,b=NULL,h=NULL,v=NULL,reg=NULL,coef=NULL,untf=FALSE,
x1=NULL,x2=NULL,y1=NULL,y2=NULL,...)
}
\arguments{
\item{a}{Intercept.}
\item{b}{Slope.}
\item{h}{the x-value(s) for vertical line(s).}
\item{v}{the y-value(s) for horizontal line(s).}
\item{reg}{Fitted lm object. }
\item{coef}{Coefficients, typically intercept and slope.}
\item{untf}{How to plot on log coordinates, see \samp{abline}.}
\item{x1,x2,y1,y2}{Clipping limits, see \samp{clip}.}
\item{...}{Further arguments passed to \samp{abline}.}
}
\details{
\samp{ablineclip} sets a new clipping region and then calls \samp{abline}.
If any of the four clipping limits is NULL, the values from \samp{par("usr")}
are substituted. After the call to \samp{abline}, the old clipping region
is restored. In order to make \samp{clip} work, there is a call to \samp{abline}
that draws a line off the plot.
Multiple lines of the same type can be drawn in a single call, but the clipping
region must be the same for each group of lines. Thanks to Berry Boessenkool
for pointing this out.
}
\value{
None. Adds to the current plot.
}
\author{ Remko Duursma }
\seealso{\link{abline}, \link{clip}}
\examples{
x <- rnorm(100)
y <- x + rnorm(100)
lmfit <- lm(y~x)
plot(x, y, xlim=c(-3.5, 3.5))
ablineclip(lmfit, x1 = -2, x2 = 2, lty = 2)
ablineclip(h = 0, x1 = -2,x2 = 2,lty = 3, col = "red")
ablineclip(v = 0, y1 = -2.5, y2 = 1.5, lty=4, col = "green")
}
\keyword{ aplot } | /man/ablineclip.Rd | no_license | plotrix/plotrix | R | false | false | 1,632 | rd | \name{ablineclip}
\alias{ablineclip}
\title{Add a straight line to a plot}
\description{
As \samp{abline}, but has arguments \samp{x1,x2,y1,y2} as in \samp{clip}.
}
\usage{
ablineclip(a=NULL,b=NULL,h=NULL,v=NULL,reg=NULL,coef=NULL,untf=FALSE,
x1=NULL,x2=NULL,y1=NULL,y2=NULL,...)
}
\arguments{
\item{a}{Intercept.}
\item{b}{Slope.}
\item{h}{the x-value(s) for vertical line(s).}
\item{v}{the y-value(s) for horizontal line(s).}
\item{reg}{Fitted lm object. }
\item{coef}{Coefficients, typically intercept and slope.}
\item{untf}{How to plot on log coordinates, see \samp{abline}.}
\item{x1,x2,y1,y2}{Clipping limits, see \samp{clip}.}
\item{...}{Further arguments passed to \samp{abline}.}
}
\details{
\samp{ablineclip} sets a new clipping region and then calls \samp{abline}.
If any of the four clipping limits is NULL, the values from \samp{par("usr")}
are substituted. After the call to \samp{abline}, the old clipping region
is restored. In order to make \samp{clip} work, there is a call to \samp{abline}
that draws a line off the plot.
Multiple lines of the same type can be drawn in a single call, but the clipping
region must be the same for each group of lines. Thanks to Berry Boessenkool
for pointing this out.
}
\value{
None. Adds to the current plot.
}
\author{ Remko Duursma }
\seealso{\link{abline}, \link{clip}}
\examples{
x <- rnorm(100)
y <- x + rnorm(100)
lmfit <- lm(y~x)
plot(x, y, xlim=c(-3.5, 3.5))
ablineclip(lmfit, x1 = -2, x2 = 2, lty = 2)
ablineclip(h = 0, x1 = -2,x2 = 2,lty = 3, col = "red")
ablineclip(v = 0, y1 = -2.5, y2 = 1.5, lty=4, col = "green")
}
\keyword{ aplot } |
#!/usr/bin/env Rscript
ip <- as.data.frame(installed.packages()[,c(1,3:4)])
rownames(ip) <- NULL
ip <- ip[is.na(ip$Priority),1:2,drop=FALSE]
print(ip, row.names=FALSE)
| /Singularity/example_1/list_installed.R | permissive | patirot/slurm-examples | R | false | false | 168 | r | #!/usr/bin/env Rscript
ip <- as.data.frame(installed.packages()[,c(1,3:4)])
rownames(ip) <- NULL
ip <- ip[is.na(ip$Priority),1:2,drop=FALSE]
print(ip, row.names=FALSE)
|
# setwd('D:/DO/eDO_datathon')
data_folder <- 'data/input/'
# LIBRARIES --------------------------------------------------------------------
library(data.table)
source('code/tools_R.R')
# LOADS ------------------------------------------------------------------------
# read metadata
df_md <- fread(paste0(data_folder,'labels_targets/train_metadata.csv'),
encoding = "UTF-8")
# read targets
df_tg <- fread(paste0(data_folder,'labels_targets/accomodations_train_split.csv'))
df_md_split <- merge(df_md, df_tg, by.x = 'AccomodationId', by.y = 'ID', all.x = T)
df_md_validation <- df_md_split[is_test == 1,]
df_md <- df_md_split[is_test == 0,]
# Reshape metadata vector into sth easy to use
df_md$metadata <- gsub(' ','',df_md$metadata)
df_md$metadata <- gsub('\\|',',',df_md$metadata)
# GLOBAL LIST OF LABELS --------------------------------------------------------
# List all labels
lab <- unlist(lapply(df_md$metadata, function(x) unlist(strsplit(x,','))))
# World total words:
lab_glob <- get_bow(lab)
# Global frequencies of labels
# We get the very top 5 first words and remove them from the list (they do not
# contribute into the classification)
lab_to_remove <- lab_glob[1:5,]
lab_glob <- get_bow(lab)
lab_glob$freq <- lab_glob$freq/sum(lab_glob$freq)
# Check which words classify better
# Choose global frequencies (Top XX)
lab_freq_glob <- lab_glob[1:5,]
# Choose global labels (Top XX)
labs <- as.character(lab_glob$label)[1:5]
# ------------------------------------------------------------------------------
# Generate a list grouped by type, word, appearence, total images in that category
types <- unique(df_md$TYPE)
labels_grouped <- list()
for (t in types) {
lab <- unlist(lapply(df_md$metadata[df_md$TYPE == t],
function(x) unlist(strsplit(x,','))))
lab_type <- get_bow(lab)
n_images <- sum(df_md$TYPE == t)
lab_type$type <- t
lab_type$n_images <- n_images
labels_grouped <- rbind(labels_grouped, lab_type)
}
# Removed failed labels
labels_grouped <- labels_grouped[! grepl('Failed',labels_grouped$label), ]
# Find common words to all types
list_words <- list()
i <- 1
for (t in types) {
list_words[[i]] <- as.character(labels_grouped$label[labels_grouped$type == t])
i <- i+1
}
names(list_words) <- types
common_labels <- as.character(Reduce(intersect, list_words))
# For each TYPE remove all common labels and select the top five
labels_grouped_uncommon <- as.data.table(labels_grouped[!(labels_grouped$label %in% common_labels), ])
uncommon_labels <- labels_grouped_uncommon[, head(.SD, 3), by = "type"]
# GENERATE LABEL COLUMNS ACCORDING TO SELECTED LABELS --------------------------
labs <- lab_glob$label
df_lab <- add_cat(df_md, 'metadata', labs, 'n_lab')
lab <- unlist(lapply(df_md$metadata, function(x) unlist(strsplit(x,','))))
# SAVE LABELS PER IMAGE INTO A FILE --------------------------------------------
# Note that this file is used to train the unsupervised clustering of images
df_lab[ , pk := paste0(AccomodationId,'_',PictureId)]
df_lab <- df_lab[,mget(c('pk',labs))]
fwrite(df_lab,paste0(data_folder,'generated_by_us/labels_per_image/labels_per_image.csv'))
| /code/02_1_label_feature_per_image.R | no_license | patriciamv/eDO_datathon | R | false | false | 3,197 | r | # setwd('D:/DO/eDO_datathon')
data_folder <- 'data/input/'
# LIBRARIES --------------------------------------------------------------------
library(data.table)
source('code/tools_R.R')
# LOADS ------------------------------------------------------------------------
# read metadata
df_md <- fread(paste0(data_folder,'labels_targets/train_metadata.csv'),
encoding = "UTF-8")
# read targets
df_tg <- fread(paste0(data_folder,'labels_targets/accomodations_train_split.csv'))
df_md_split <- merge(df_md, df_tg, by.x = 'AccomodationId', by.y = 'ID', all.x = T)
df_md_validation <- df_md_split[is_test == 1,]
df_md <- df_md_split[is_test == 0,]
# Reshape metadata vector into sth easy to use
df_md$metadata <- gsub(' ','',df_md$metadata)
df_md$metadata <- gsub('\\|',',',df_md$metadata)
# GLOBAL LIST OF LABELS --------------------------------------------------------
# List all labels
lab <- unlist(lapply(df_md$metadata, function(x) unlist(strsplit(x,','))))
# World total words:
lab_glob <- get_bow(lab)
# Global frequencies of labels
# We get the very top 5 first words and remove them from the list (they do not
# contribute into the classification)
lab_to_remove <- lab_glob[1:5,]
lab_glob <- get_bow(lab)
lab_glob$freq <- lab_glob$freq/sum(lab_glob$freq)
# Check which words classify better
# Choose global frequencies (Top XX)
lab_freq_glob <- lab_glob[1:5,]
# Choose global labels (Top XX)
labs <- as.character(lab_glob$label)[1:5]
# ------------------------------------------------------------------------------
# Generate a list grouped by type, word, appearence, total images in that category
types <- unique(df_md$TYPE)
labels_grouped <- list()
for (t in types) {
lab <- unlist(lapply(df_md$metadata[df_md$TYPE == t],
function(x) unlist(strsplit(x,','))))
lab_type <- get_bow(lab)
n_images <- sum(df_md$TYPE == t)
lab_type$type <- t
lab_type$n_images <- n_images
labels_grouped <- rbind(labels_grouped, lab_type)
}
# Removed failed labels
labels_grouped <- labels_grouped[! grepl('Failed',labels_grouped$label), ]
# Find common words to all types
list_words <- list()
i <- 1
for (t in types) {
list_words[[i]] <- as.character(labels_grouped$label[labels_grouped$type == t])
i <- i+1
}
names(list_words) <- types
common_labels <- as.character(Reduce(intersect, list_words))
# For each TYPE remove all common labels and select the top five
labels_grouped_uncommon <- as.data.table(labels_grouped[!(labels_grouped$label %in% common_labels), ])
uncommon_labels <- labels_grouped_uncommon[, head(.SD, 3), by = "type"]
# GENERATE LABEL COLUMNS ACCORDING TO SELECTED LABELS --------------------------
labs <- lab_glob$label
df_lab <- add_cat(df_md, 'metadata', labs, 'n_lab')
lab <- unlist(lapply(df_md$metadata, function(x) unlist(strsplit(x,','))))
# SAVE LABELS PER IMAGE INTO A FILE --------------------------------------------
# Note that this file is used to train the unsupervised clustering of images
df_lab[ , pk := paste0(AccomodationId,'_',PictureId)]
df_lab <- df_lab[,mget(c('pk',labs))]
fwrite(df_lab,paste0(data_folder,'generated_by_us/labels_per_image/labels_per_image.csv'))
|
#' Run the TeX Live Manager
#'
#' Execute the \command{tlmgr} command to search for LaTeX packages, install
#' packages, update packages, and so on.
#'
#' The \code{tlmgr()} function is a wrapper of \code{system2('tlmgr')}. All
#' other \code{tlmgr_*()} functions are based on \code{tlmgr} for specific
#' tasks. Please consult the \pkg{tlmgr} manual for full details.
#' @param args A character vector of arguments to be passed to the command
#' \command{tlmgr}.
#' @param usermode (For expert users only) Whether to use TeX Live's
#' \href{https://www.tug.org/texlive/doc/tlmgr.html#USER-MODE}{user mode}. If
#' \code{TRUE}, you must have run \code{tlmgr('init-usertree')} once before.
#' This option allows you to manage a user-level texmf tree, e.g., install a
#' LaTeX package to your home directory instead of the system directory, to
#' which you do not have write permission. This option should not be needed on
#' personal computers, and has some limitations, so please read the
#' \pkg{tlmgr} manual very carefully before using it.
#' @param ... Additional arguments passed to \code{\link{system2}()} (e.g.,
#' \code{stdout = TRUE} to capture stdout).
#' @param .quiet Whether to hide the actual command before executing it.
#' @references The \pkg{tlmgr} manual:
#' \url{https://www.tug.org/texlive/doc/tlmgr.html}
#' @export
#' @examples
#' # search for a package that contains titling.sty
#' tlmgr_search('titling.sty')
#'
#' #' to match titling.sty exactly, add a slash before the keyword, e.g.
#' #' tlmgr_search('/titling.sty')
#'
#' #' use a regular expression if you want to be more precise, e.g.
#' #' tlmgr_search('/titling\\.sty$')
#'
#' # list all installed LaTeX packages
#' tlmgr(c('info', '--list', '--only-installed', '--data', 'name'))
tlmgr = function(args = character(), usermode = FALSE, ..., .quiet = FALSE) {
tweak_path()
if (!tlmgr_available()) {
warning('TeX Live does not seem to be installed. See https://yihui.name/tinytex/.')
}
if (usermode) args = c('--usermode', args)
if (!.quiet) message(paste(c('tlmgr', args), collapse = ' '))
system2('tlmgr', args, ...)
}
# add ~/bin to PATH if necessary on Linux, because sometimes PATH may not be
# inherited (https://github.com/rstudio/rstudio/issues/1878), and TinyTeX is
# installed to ~/bin by default; on Windows, prioritize win_app_dir('TinyTeX')
# if it exists (so TinyTeX can be used even when MiKTeX is installed); on macOS,
# check if it is necessary to add ~/Library/TinyTeX/bin/*/ to PATH
#' @importFrom xfun is_linux is_unix is_macos is_windows with_ext
tweak_path = function() {
# check if ~/bin/tlmgr exists (created by TinyTeX by default)
f = if (is_linux()) '~/bin/tlmgr' else if (is_windows()) {
win_app_dir('TinyTeX', 'bin', 'win32', 'tlmgr.bat', error = FALSE)
} else if (is_macos()) '~/Library/TinyTeX/bin/x86_64-darwin/tlmgr' else return()
if (!file_test('-x', f)) f = getOption('tinytex.tlmgr.path', '')
if (!file_test('-x', f)) return()
bin = normalizePath(dirname(f))
# if the pdftex from TinyTeX is already on PATH, no need to adjust the PATH
if ((p <- Sys.which('pdftex')) != '') {
p2 = xfun::with_ext(file.path(bin, 'pdftex'), xfun::file_ext(p))
if (xfun::same_path(p, p2)) return()
}
old = Sys.getenv('PATH')
one = unlist(strsplit(old, s <- .Platform$path.sep, fixed = TRUE))
Sys.setenv(PATH = paste(c(bin, setdiff(one, bin)), collapse = s))
do.call(
on.exit, list(substitute(Sys.setenv(PATH = x), list(x = old)), add = TRUE),
envir = parent.frame()
)
}
tlmgr_available = function() Sys.which('tlmgr') != ''
#' @param what A search keyword as a (Perl) regular expression.
#' @param file Whether to treat \code{what} as a filename (pattern).
#' @param all For \code{tlmgr_search()}, whether to search in everything,
#' including package names, descriptions, and filenames. For
#' \code{tlmgr_update()}, whether to update all installed packages.
#' @param global Whether to search the online TeX Live Database or locally.
#' @param word Whether to restrict the search of package names and descriptions
#' to match only full words.
#' @rdname tlmgr
#' @export
tlmgr_search = function(what, file = TRUE, all = FALSE, global = TRUE, word = FALSE, ...) {
tlmgr(c(
'search', if (file) '--file', if (all) '--all', if (global) '--global',
if (word) '--word', shQuote(what)
), ...)
}
#' @param pkgs A character vector of LaTeX package names.
#' @param path Whether to run \code{tlmgr_path('add')} after installing packages
#' (\code{path = TRUE} is a conservative default: it is only necessary to do
#' this after a binary package is installed, such as the \pkg{metafont}
#' package, which contains the executable \command{mf}, but it does not hurt
#' even if no binary packages were installed).
#' @rdname tlmgr
#' @export
tlmgr_install = function(pkgs = character(), usermode = FALSE, path = !usermode && os != 'windows') {
res = 0L
if (length(pkgs)) {
res = tlmgr(c('install', pkgs), usermode)
if (res != 0 || tl_list(pkgs, stdout = FALSE, stderr = FALSE, .quiet = TRUE) != 0) {
tlmgr_update(all = FALSE, usermode = usermode)
res = tlmgr(c('install', pkgs), usermode)
}
if ('epstopdf' %in% pkgs && is_unix() && Sys.which('gs') == '') {
if (is_macos() && Sys.which('brew') != '') {
message('Trying to install GhostScript via Homebrew for the epstopdf package.')
system('brew install ghostscript')
}
if (Sys.which('gs') == '') warning('GhostScript is required for the epstopdf package.')
}
if (path) tlmgr_path('add')
}
invisible(res)
}
#' @rdname tlmgr
#' @export
tlmgr_remove = function(pkgs = character(), usermode = FALSE) {
if (length(pkgs)) tlmgr(c('remove', pkgs), usermode)
}
#' @param self Whether to update the TeX Live Manager itself.
#' @param more_args A character vector of more arguments to be passed to the
#' command \command{tlmgr update} or \command{tlmgr conf}.
#' @param run_fmtutil Whether to run \command{fmtutil-sys --all} to (re)create
#' format and hyphenation files after updating \pkg{tlmgr}.
#' @rdname tlmgr
#' @export
tlmgr_update = function(all = TRUE, self = TRUE, more_args = character(), usermode = FALSE, run_fmtutil = TRUE) {
tlmgr(c('update', if (all) '--all', if (self && !usermode) '--self', more_args), usermode)
if (run_fmtutil) fmtutil(usermode)
}
#' @param action On Unix, add/remove symlinks of binaries to/from the system's
#' \code{PATH}. On Windows, add/remove the path to the TeXLive binary
#' directory to/from the system environment variable \code{PATH}.
#' @rdname tlmgr
#' @export
tlmgr_path = function(action = c('add', 'remove')) tlmgr(c('path', match.arg(action)))
#' @rdname tlmgr
#' @export
tlmgr_conf = function(more_args = character()) {
tlmgr(c('conf', more_args))
}
#' Add/remove R's texmf tree to/from TeX Live
#'
#' R ships a custom texmf tree containing a few LaTeX style and class files,
#' which are required when compiling R packages manuals (\file{Rd.sty}) or
#' Sweave documents (\file{Sweave.sty}). This tree can be found under the
#' directory \code{file.path(R.home('share'), 'texmf')}. This function can be
#' used to add/remove R's texmf tree to/from TeX Live via
#' \code{\link{tlmgr_conf}('auxtrees')}.
#' @param action Add/remove R's texmf tree to/from TeX Live.
#' @references See the \pkg{tlmgr} manual for detailed information about
#' \command{tlmgr conf auxtrees}. Check out
#' \url{https://tex.stackexchange.com/q/77720/9128} if you don't know what
#' \code{texmf} means.
#' @export
#' @examples
#' r_texmf('remove')
#' r_texmf('add')
#'
#' # all files under R's texmf tree
#' list.files(file.path(R.home('share'), 'texmf'), recursive = TRUE, full.names = TRUE)
r_texmf = function(action = c('add', 'remove')) {
tlmgr_conf(c('auxtrees', match.arg(action), shQuote(r_texmf_path())))
}
r_texmf_path = function() {
d = file.path(R.home('share'), 'texmf')
if (dir_exists(d)) return(d)
# retry another directory: https://github.com/yihui/tinytex/issues/60
if ('Rd.sty' %in% basename(list.files(d2 <- '/usr/share/texmf', recursive = TRUE))) {
return(d2)
}
warning("Cannot find R's texmf tree; returning '", d, "'")
d
}
#' Sizes of LaTeX packages in TeX Live
#'
#' Use the command \command{tlmgr info --list --only-installed} to obtain the
#' sizes of installed LaTeX packages.
#' @param show_total Whether to show the total size.
#' @export
#' @return A data frame of three columns: \code{package} is the package names,
#' \code{size} is the sizes in bytes, and \code{size_h} is the human-readable
#' version of sizes.
tl_sizes = function(show_total = TRUE) {
info = tl_list(NULL, 'name,size', stdout = TRUE)
info = read.table(sep = ',', text = info, stringsAsFactors = FALSE, col.names = c('package', 'size'))
info = info[order(info[, 'size'], decreasing = TRUE), , drop = FALSE]
info$size_h = sapply(info[, 'size'], auto_size)
rownames(info) = NULL
if (show_total) message('The total size is ', auto_size(sum(info$size)))
info
}
# human-readable size from bytes
auto_size = function(bytes) format(structure(bytes, class = 'object_size'), 'auto')
#' List the names of installed TeX Live packages
#'
#' Calls \command{tlmgr info --list --only-installed --data name} to obtain the
#' names of all installed TeX Live packages. Platform-specific strings in
#' package names are removed, e.g., \code{"tex"} is returned for the package
#' \pkg{tex.x86_64-darwin}.
#' @export
#' @return A character vector of package names.
tl_pkgs = function() gsub('[.].*', '', tl_list(stdout = TRUE))
tl_list = function(pkgs = NULL, field = 'name', ...) {
tlmgr(c('info', '--list', '--only-installed', '--data', field, pkgs), ...)
}
| /R/tlmgr.R | no_license | texervn/tinytex | R | false | false | 9,761 | r | #' Run the TeX Live Manager
#'
#' Execute the \command{tlmgr} command to search for LaTeX packages, install
#' packages, update packages, and so on.
#'
#' The \code{tlmgr()} function is a wrapper of \code{system2('tlmgr')}. All
#' other \code{tlmgr_*()} functions are based on \code{tlmgr} for specific
#' tasks. Please consult the \pkg{tlmgr} manual for full details.
#' @param args A character vector of arguments to be passed to the command
#' \command{tlmgr}.
#' @param usermode (For expert users only) Whether to use TeX Live's
#' \href{https://www.tug.org/texlive/doc/tlmgr.html#USER-MODE}{user mode}. If
#' \code{TRUE}, you must have run \code{tlmgr('init-usertree')} once before.
#' This option allows you to manage a user-level texmf tree, e.g., install a
#' LaTeX package to your home directory instead of the system directory, to
#' which you do not have write permission. This option should not be needed on
#' personal computers, and has some limitations, so please read the
#' \pkg{tlmgr} manual very carefully before using it.
#' @param ... Additional arguments passed to \code{\link{system2}()} (e.g.,
#' \code{stdout = TRUE} to capture stdout).
#' @param .quiet Whether to hide the actual command before executing it.
#' @references The \pkg{tlmgr} manual:
#' \url{https://www.tug.org/texlive/doc/tlmgr.html}
#' @export
#' @examples
#' # search for a package that contains titling.sty
#' tlmgr_search('titling.sty')
#'
#' #' to match titling.sty exactly, add a slash before the keyword, e.g.
#' #' tlmgr_search('/titling.sty')
#'
#' #' use a regular expression if you want to be more precise, e.g.
#' #' tlmgr_search('/titling\\.sty$')
#'
#' # list all installed LaTeX packages
#' tlmgr(c('info', '--list', '--only-installed', '--data', 'name'))
tlmgr = function(args = character(), usermode = FALSE, ..., .quiet = FALSE) {
tweak_path()
if (!tlmgr_available()) {
warning('TeX Live does not seem to be installed. See https://yihui.name/tinytex/.')
}
if (usermode) args = c('--usermode', args)
if (!.quiet) message(paste(c('tlmgr', args), collapse = ' '))
system2('tlmgr', args, ...)
}
# add ~/bin to PATH if necessary on Linux, because sometimes PATH may not be
# inherited (https://github.com/rstudio/rstudio/issues/1878), and TinyTeX is
# installed to ~/bin by default; on Windows, prioritize win_app_dir('TinyTeX')
# if it exists (so TinyTeX can be used even when MiKTeX is installed); on macOS,
# check if it is necessary to add ~/Library/TinyTeX/bin/*/ to PATH
#' @importFrom xfun is_linux is_unix is_macos is_windows with_ext
tweak_path = function() {
# check if ~/bin/tlmgr exists (created by TinyTeX by default)
f = if (is_linux()) '~/bin/tlmgr' else if (is_windows()) {
win_app_dir('TinyTeX', 'bin', 'win32', 'tlmgr.bat', error = FALSE)
} else if (is_macos()) '~/Library/TinyTeX/bin/x86_64-darwin/tlmgr' else return()
if (!file_test('-x', f)) f = getOption('tinytex.tlmgr.path', '')
if (!file_test('-x', f)) return()
bin = normalizePath(dirname(f))
# if the pdftex from TinyTeX is already on PATH, no need to adjust the PATH
if ((p <- Sys.which('pdftex')) != '') {
p2 = xfun::with_ext(file.path(bin, 'pdftex'), xfun::file_ext(p))
if (xfun::same_path(p, p2)) return()
}
old = Sys.getenv('PATH')
one = unlist(strsplit(old, s <- .Platform$path.sep, fixed = TRUE))
Sys.setenv(PATH = paste(c(bin, setdiff(one, bin)), collapse = s))
do.call(
on.exit, list(substitute(Sys.setenv(PATH = x), list(x = old)), add = TRUE),
envir = parent.frame()
)
}
tlmgr_available = function() Sys.which('tlmgr') != ''
#' @param what A search keyword as a (Perl) regular expression.
#' @param file Whether to treat \code{what} as a filename (pattern).
#' @param all For \code{tlmgr_search()}, whether to search in everything,
#' including package names, descriptions, and filenames. For
#' \code{tlmgr_update()}, whether to update all installed packages.
#' @param global Whether to search the online TeX Live Database or locally.
#' @param word Whether to restrict the search of package names and descriptions
#' to match only full words.
#' @rdname tlmgr
#' @export
tlmgr_search = function(what, file = TRUE, all = FALSE, global = TRUE, word = FALSE, ...) {
tlmgr(c(
'search', if (file) '--file', if (all) '--all', if (global) '--global',
if (word) '--word', shQuote(what)
), ...)
}
#' @param pkgs A character vector of LaTeX package names.
#' @param path Whether to run \code{tlmgr_path('add')} after installing packages
#' (\code{path = TRUE} is a conservative default: it is only necessary to do
#' this after a binary package is installed, such as the \pkg{metafont}
#' package, which contains the executable \command{mf}, but it does not hurt
#' even if no binary packages were installed).
#' @rdname tlmgr
#' @export
tlmgr_install = function(pkgs = character(), usermode = FALSE, path = !usermode && os != 'windows') {
res = 0L
if (length(pkgs)) {
res = tlmgr(c('install', pkgs), usermode)
if (res != 0 || tl_list(pkgs, stdout = FALSE, stderr = FALSE, .quiet = TRUE) != 0) {
tlmgr_update(all = FALSE, usermode = usermode)
res = tlmgr(c('install', pkgs), usermode)
}
if ('epstopdf' %in% pkgs && is_unix() && Sys.which('gs') == '') {
if (is_macos() && Sys.which('brew') != '') {
message('Trying to install GhostScript via Homebrew for the epstopdf package.')
system('brew install ghostscript')
}
if (Sys.which('gs') == '') warning('GhostScript is required for the epstopdf package.')
}
if (path) tlmgr_path('add')
}
invisible(res)
}
#' @rdname tlmgr
#' @export
tlmgr_remove = function(pkgs = character(), usermode = FALSE) {
if (length(pkgs)) tlmgr(c('remove', pkgs), usermode)
}
#' @param self Whether to update the TeX Live Manager itself.
#' @param more_args A character vector of more arguments to be passed to the
#' command \command{tlmgr update} or \command{tlmgr conf}.
#' @param run_fmtutil Whether to run \command{fmtutil-sys --all} to (re)create
#' format and hyphenation files after updating \pkg{tlmgr}.
#' @rdname tlmgr
#' @export
tlmgr_update = function(all = TRUE, self = TRUE, more_args = character(), usermode = FALSE, run_fmtutil = TRUE) {
tlmgr(c('update', if (all) '--all', if (self && !usermode) '--self', more_args), usermode)
if (run_fmtutil) fmtutil(usermode)
}
#' @param action On Unix, add/remove symlinks of binaries to/from the system's
#' \code{PATH}. On Windows, add/remove the path to the TeXLive binary
#' directory to/from the system environment variable \code{PATH}.
#' @rdname tlmgr
#' @export
tlmgr_path = function(action = c('add', 'remove')) tlmgr(c('path', match.arg(action)))
#' @rdname tlmgr
#' @export
tlmgr_conf = function(more_args = character()) {
tlmgr(c('conf', more_args))
}
#' Add/remove R's texmf tree to/from TeX Live
#'
#' R ships a custom texmf tree containing a few LaTeX style and class files,
#' which are required when compiling R packages manuals (\file{Rd.sty}) or
#' Sweave documents (\file{Sweave.sty}). This tree can be found under the
#' directory \code{file.path(R.home('share'), 'texmf')}. This function can be
#' used to add/remove R's texmf tree to/from TeX Live via
#' \code{\link{tlmgr_conf}('auxtrees')}.
#' @param action Add/remove R's texmf tree to/from TeX Live.
#' @references See the \pkg{tlmgr} manual for detailed information about
#' \command{tlmgr conf auxtrees}. Check out
#' \url{https://tex.stackexchange.com/q/77720/9128} if you don't know what
#' \code{texmf} means.
#' @export
#' @examples
#' r_texmf('remove')
#' r_texmf('add')
#'
#' # all files under R's texmf tree
#' list.files(file.path(R.home('share'), 'texmf'), recursive = TRUE, full.names = TRUE)
r_texmf = function(action = c('add', 'remove')) {
tlmgr_conf(c('auxtrees', match.arg(action), shQuote(r_texmf_path())))
}
r_texmf_path = function() {
d = file.path(R.home('share'), 'texmf')
if (dir_exists(d)) return(d)
# retry another directory: https://github.com/yihui/tinytex/issues/60
if ('Rd.sty' %in% basename(list.files(d2 <- '/usr/share/texmf', recursive = TRUE))) {
return(d2)
}
warning("Cannot find R's texmf tree; returning '", d, "'")
d
}
#' Sizes of LaTeX packages in TeX Live
#'
#' Use the command \command{tlmgr info --list --only-installed} to obtain the
#' sizes of installed LaTeX packages.
#' @param show_total Whether to show the total size.
#' @export
#' @return A data frame of three columns: \code{package} is the package names,
#' \code{size} is the sizes in bytes, and \code{size_h} is the human-readable
#' version of sizes.
tl_sizes = function(show_total = TRUE) {
info = tl_list(NULL, 'name,size', stdout = TRUE)
info = read.table(sep = ',', text = info, stringsAsFactors = FALSE, col.names = c('package', 'size'))
info = info[order(info[, 'size'], decreasing = TRUE), , drop = FALSE]
info$size_h = sapply(info[, 'size'], auto_size)
rownames(info) = NULL
if (show_total) message('The total size is ', auto_size(sum(info$size)))
info
}
# human-readable size from bytes
auto_size = function(bytes) format(structure(bytes, class = 'object_size'), 'auto')
#' List the names of installed TeX Live packages
#'
#' Calls \command{tlmgr info --list --only-installed --data name} to obtain the
#' names of all installed TeX Live packages. Platform-specific strings in
#' package names are removed, e.g., \code{"tex"} is returned for the package
#' \pkg{tex.x86_64-darwin}.
#' @export
#' @return A character vector of package names.
tl_pkgs = function() gsub('[.].*', '', tl_list(stdout = TRUE))
tl_list = function(pkgs = NULL, field = 'name', ...) {
tlmgr(c('info', '--list', '--only-installed', '--data', field, pkgs), ...)
}
|
#' Perform a simple margins analysis.
#'
#' \code{sim_margins} conducts a simple margins analysis for the purposes of
#' understanding two- and three-way interaction effects in linear regression.
#'
#' @param ... ignored.
#'
#' @details This allows the user to perform a simple margins analysis for the
#' purpose of probing interaction effects in a linear regression. Two- and
#' three-way interactions are supported, though one should be warned that
#' three-way interactions are not easy to interpret in this way.
#'
#' The function is tested with `lm`, `glm`, `svyglm`, and `merMod` inputs.
#' Others may work as well, but are not tested. In all but the linear model
#' case, be aware that not all the assumptions applied to simple slopes
#' analysis apply.
#'
#' @return
#'
#' A list object with the following components:
#'
#' \item{slopes}{A table of coefficients for the focal predictor at each
#' value of the moderator}
#' \item{ints}{A table of coefficients for the intercept at each value of the
#' moderator}
#' \item{modx.values}{The values of the moderator used in the analysis}
#'
#' @author Jacob Long <\email{long.1377@@osu.edu}>
#'
#' @inheritParams sim_slopes
#' @inheritParams margins::margins
#'
#' @seealso [margins::margins()]
#'
#' @family interaction tools
#'
#' @references
#'
#' Bauer, D. J., & Curran, P. J. (2005). Probing interactions in fixed and
#' multilevel regression: Inferential and graphical techniques.
#' \emph{Multivariate Behavioral Research}, \emph{40}(3), 373-400.
#' \url{https://doi.org/10.1207/s15327906mbr4003_5}
#'
#' Cohen, J., Cohen, P., West, S. G., & Aiken, L. S. (2003). \emph{Applied
#' multiple regression/correlation analyses for the behavioral sciences} (3rd
#' ed.). Mahwah, NJ: Lawrence Erlbaum Associates, Inc.
#'
#' Hanmer, M. J., & Kalkan, K. O. (2013). Behind the curve: Clarifying the best
#' approach to calculating predicted probabilities and marginal effects from
#' limited dependent variable models. *American Journal of Political Science*,
#' *57*, 263–277. \url{https://doi.org/10.1111/j.1540-5907.2012.00602.x}
#'
#'
#' @importFrom stats coef coefficients lm predict sd update getCall vcov relevel
#' @importFrom stats family aggregate formula
#' @import jtools
#' @export
#'
sim_margins <- function(model, pred, modx, mod2 = NULL, modx.values = NULL,
mod2.values = NULL, data = NULL, cond.int = FALSE,
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 1000,
digits = getOption("jtools-digits", default = 2),
pvals = TRUE, confint = FALSE, ci.width = .95,
cluster = NULL, modx.labels = NULL, mod2.labels = NULL,
...) {
if (!requireNamespace("margins")) {
stop_wrap("You must have the margins package installed to use this
function.")
}
# Evaluate the modx, mod2, pred args
pred <- quo_name(enexpr(pred))
modx <- quo_name(enexpr(modx))
if (modx == "NULL") {modx <- NULL}
mod2 <- quo_name(enexpr(mod2))
if (mod2 == "NULL") {mod2 <- NULL}
# Warn user if interaction term is absent
if (!check_interactions(as.formula(formula(model)), c(pred, modx, mod2))) {
warn_wrap(c(pred, modx, mod2), " are not included in an interaction with
one another in the model.")
}
# Create object to return
ss <- list()
ss <- structure(ss, digits = digits)
d <- get_data(model)
if (is_survey <- "svyglm" %in% class(model)) {
design <- model$survey.design
} else {design <- NULL}
# Which variables are factors?
facvars <- names(d)[!unlist(lapply(d, is.numeric))]
fvars <- names(d)
# Check for factor predictor
if (is.factor(d[[pred]])) {
# I could assume the factor is properly ordered, but that's too risky
stop(wrap_str("Focal predictor (\"pred\") cannot be a factor. Either
use it as modx or convert it to a numeric dummy variable."))
}
wname <- get_weights(model, d)$weights_name
wts <- get_weights(model, d)$weights
offname <- get_offset_name(model)
# Need the raw variable name from the LHS
resp <- all.vars(as.formula(paste("~", (get_response_name(model)))))
# Saving key arguments as attributes of return object
ss <- structure(ss, resp = resp, modx = modx, mod2 = mod2, pred = pred,
cond.int = cond.int)
### Getting moderator values ##################################################
modxvals2 <- mod_vals(d, modx, modx.values, is_survey, wts, design,
modx.labels = modx.labels,
any.mod2 = !is.null(mod2), sims = TRUE)
# Now specify def or not (for labeling w/ print method)
if (is.character(modx.values) | is.null(modx.values)) {
ss <- structure(ss, def = TRUE)
} else {
ss <- structure(ss, def = FALSE)
}
# Don't want def = TRUE for factors even though they are character
if (!is.numeric(d[[modx]])) {ss <- structure(ss, def = FALSE)}
if (!is.null(mod2)) {
if (is.numeric(d[[mod2]])) {
mod2vals2 <- mod_vals(d, mod2, mod2.values, is_survey, wts, design,
modx.labels = mod2.labels,
any.mod2 = !is.null(mod2),
sims = TRUE)
} else {
if (is.null(mod2.values)) {
mod2vals2 <- levels(d[[mod2]])
} else {
if (all(mod2.values %in% unique(d[[mod2]]))) {
mod2vals2 <- mod2.values
} else {
warn_wrap("mod2.values argument must include only levels of the
factor. Using all factor levels instead.", call. = FALSE)
mod2vals2 <- unique(d[[mod2]])
}
}
}
# Now specify def or not
if (is.character(mod2.values) | is.null(mod2.values)) {
ss <- structure(ss, def2 = TRUE)
} else {
ss <- structure(ss, def2 = FALSE)
}
# Don't want def = TRUE for factors even though they are character
if (!is.numeric(d[[mod2]])) {ss <- structure(ss, def2 = FALSE)}
} else {
mod2vals2 <- NULL
modxvals2 <- rev(modxvals2)
}
#### Call margins #############################################################
# Create list to provide to `at` argument
at_list <- list()
at_list[[modx]] <- modxvals2
if (!is.null(mod2)) {
at_list[[mod2]] <- mod2vals2
}
design <- if ("svyglm" %in% class(model)) model$survey.design else NULL
# Get the margins
suppressWarnings({ # can't have confusing warnings from margins
margs <- margins::margins(model, data = d, at = at_list, vce = vce,
# don't need modx, but it works around margins issue #112
variables = c(pred, modx),
iterations = iterations, design = design
)
})
# Get the summary data frame, drop the modx rows, drop the "factor" column
slopes <- subset(summary(margs, level = ci.width), factor == pred) %not%
"factor"
# determine if we're using t or z values
t_or_z <-
if (family(model)$family == "gaussian" & family(model)$link == "identity") {
"t val."
} else {
"z val."
}
names(slopes) %just% c("AME", "SE", "z", "lower", "upper") <-
c("Est.", "S.E.", t_or_z, unlist(make_ci_labs(ci.width)))
# Get the conditional intercepts
# Using aggregate to take the means at each combination of modx and mod2
## Have to jump through some hoops to weight the means
num_per <- nrow(margs) /
nrow(expand.grid(modxvals2, if (!is.null(mod2vals2)) mod2vals2 else NA))
## num_per is how many rows there are per set of modx/mod2 in the margs obj
ints <- aggregate(margs[c("fitted")], margs[c(modx, mod2)],
mean_or_base, weights = margs[["_weights"]][1:num_per])
names(ints) %just% "fitted" <- "intercept"
# Add the intercepts directly to the slopes data.frame since I don't have
# variance estimates for them anyway
slopes <- merge(slopes, ints)
if (!is.null(mod2)) {
# While we're here, let's split the slopes data.frame into pieces by mod2
slopes <- split(slopes, slopes[mod2])
names(slopes) <- paste(mod2, "=", names(mod2vals2))
}
ss <- structure(ss, modx.values = modxvals2, vce = vce,
cond.int = cond.int, confint = confint,
ci.width = ci.width, t_or_z = t_or_z,
nobs = nobs(model))
ss$slopes <- slopes
ss$ints <- ints
if (!is.null(mod2)) {ss <- structure(ss, mod2.values = mod2vals2)}
class(ss) <- "sim_margins"
return(ss)
}
#### PRINT METHOD ############################################################
#' @export
#' @importFrom cli cat_rule rule
#' @importFrom crayon red bold italic
print.sim_margins <- function(x, ...) {
# This is to make refactoring easier after switch to attributes
ss <- x
x <- attributes(x)
# This helps deal with the fact sometimes mod2vals has no length, so we want
# to loop just once
if (!is.null(x$mod2)) {
length <- length(x$mod2.values)
} else {
length <- 1
}
# Loop through each value of second moderator...if none, just one loop
for (j in 1:length) {
m <- NULL
# If we're using second moderator, need to make things make sense
# to inner loop
if (!is.null(x$mod2)) {
m <- ss$slopes[[j]]
if (class(x$mod2.values) != "character") {
m[x$mod2] <- num_print(m[x$mod2], x$digits)
}
# Printing output to make it clear where each batch of second moderator
# slopes begins
if (x$def2 == FALSE) {
cat(rule(center = paste0("While ", x$mod2, " (2nd moderator) ",
"= ", m[j, x$mod2]), line = "bar8"),
"\n\n")
} else {
# If the user went with default +/- SD or used a factor variable,
# we use the labels
label <- names(x$mod2.values)[
which(num_print(x$mod2.values, x$digits) == as.vector(m[j, x$mod2]))
]
cat(rule(center = paste0("While ", x$mod2, " (2nd moderator)", " = ",
m[j, x$mod2], " (", label, ")"), line = "bar8"), "\n\n")
}
m <- m %not% x$mod2
} else {
m <- ss$slopes
}
if (x$confint == FALSE) {
m <- m %not% unlist(make_ci_labs(x$ci.width))
}
if (x$cond.int == FALSE) {
m <- m %not% "intercept"
}
# Clearly label simple slopes
cat(bold(underline("SIMPLE MARGINS")), "\n\n")
for (i in seq_along(x$modx.values)) {
if (class(x$modx.values) != "character") {
m[x$modx] <- num_print(m[x$modx], digits = x$digits)
}
slopes <- as.data.frame(lapply(m[i,2:ncol(m)], as.numeric),
check.names = FALSE)
# Handle automatic labels
if (x$def == TRUE) {
label <- names(x$modx.values)[
which(num_print(x$modx.values, x$digits) == as.vector(m[i, x$modx]))
]
modx_label <- paste0(m[i, x$modx], " (", label, ")")
} else {
modx_label <- paste0(m[i, x$modx])
}
cat(italic(paste0("Average marginal effect of ", x$pred, " when ",
x$modx, " = ", modx_label, ": \n\n")))
print(md_table(slopes, digits = x$digits, format = "pandoc",
row.names = FALSE, sig.digits = FALSE))
cat("\n")
}
} # end mod2 loop
}
#### alternate output formats ################################################
#' @title Tidiers for [sim_margins()] objects.
#' @description You can use [broom::tidy()] and [broom::glance()] for "tidy"
#' methods of storing `sim_margins` output.
#' @param x The `sim_margins` object
#' @param conf.level The width of confidence intervals. Default is .95 (95\%).
#' @param ... Ignored.
#' @rdname sim_margins_tidiers
#' @export
tidy.sim_margins <- function(x, conf.level = .95, ...) {
cols <- c("estimate", "std.error", "statistic", "p.value", "modx",
"modx.value", "mod2", "mod2.value", "intercept")
# Figure out how many rows the data frame will be
num_coefs <- ifelse(is.data.frame(x$slopes),
no = length(x$slopes) * nrow(x$slopes[[1]]),
yes = nrow(x$slopes))
# Create NA-filled data frame
base <- as.data.frame(matrix(rep(NA, times = num_coefs * length(cols)),
ncol = length(cols)))
# Name the columns
names(base) <- cols
# Get the attributes from the sim_slopes object
atts <- attributes(x)
# Is there a second moderator?
any_mod2 <- !is.null(atts$mod2)
if (any_mod2 == FALSE) {
all_slopes <- x$slopes
} else {
all_slopes <- do.call("rbind", x$slopes)
}
# Include the moderator name (probably not best to include this redundant
# info)
base$modx <- atts$modx
# Move the table of values to the data frame
base$modx.value <- all_slopes[,1]
base$estimate <- all_slopes[,"Est."]
base$std.error <- all_slopes[,"S.E."]
base$p.value <- all_slopes[,"p"]
base$statistic <- all_slopes[, grep("val.", colnames(all_slopes), value = T)]
base$intercept <- all_slopes[,"intercept"]
# Handle CIs
## These are the requested CI labels
want_labs <- unlist(make_ci_labs(conf.level))
## Check if those are already calculated
if (all(want_labs %in% colnames(all_slopes))) {
base$conf.low <- all_slopes[,make_ci_labs(conf.level)[[1]]]
base$conf.high <- all_slopes[,make_ci_labs(conf.level)[[2]]]
} else { # If not, calculate them
alpha <- (1 - conf.level) / 2
crit_t <- if (class(x$mods[[1]]) == "lm") {
abs(qt(alpha, df = df.residual(x$mods[[1]])))
} else {
abs(qnorm(alpha))
}
base$conf.low <- base$estimate - (crit_t * base$std.error)
base$conf.high <- base$estimate + (crit_t * base$std.error)
}
# Create unique term labels for each value of the moderator
base$term <- paste(base$modx, "=",
if (is.character(base$modx.value)) {
base$modx.value
} else {
num_print(base$modx.value, attr(x, "digits"))
}
)
# Do the same for moderator 2 if any
if (any_mod2 == TRUE) {
base$mod2 <- atts$mod2
base$mod2.value <- unlist(lapply(atts$mod2.values, function(y) {
rep(y, nrow(x$slopes[[1]]))
}))
base$mod2.term <- paste(base$mod2, "=",
if (is.character(base$mod2.value)) {
base$mod2.value
} else {
num_print(base$mod2.value, attr(x, "digits"))
}
)
}
base <- tibble::as_tibble(base)
attr(base, "pred") <- atts$pred
return(base)
}
#' @rdname sim_margins_tidiers
#' @export
glance.sim_margins <- function(x, ...) {
tibble::as_tibble(data.frame(N = nobs(x)))
}
#' @importFrom stats nobs
#' @export
nobs.sim_margins <- function(object, ...) {
attr(object, "nobs")
}
#' @title Create tabular output for simple margins analysis
#'
#' @description This function converts a `sim_margins` object into a
#' `huxtable` object, making it suitable for use in external documents.
#' @param x A `sim_margins` object.
#' @inheritParams as_huxtable.sim_slopes
#'
#' @details
#'
#' For more on what you can do with a `huxtable`, see \pkg{huxtable}.
#'
#' @rdname as_huxtable.sim_margins
#' @rawNamespace
#' if (getRversion() >= "3.6.0") {
#' S3method(huxtable::as_huxtable, sim_margins)
#' } else {
#' export(as_huxtable.sim_margins)
#' }
as_huxtable.sim_margins <- function(x, format = "{estimate} ({std.error})",
sig.levels = c(`***` = .001, `**` = .01, `*` = .05, `#` = .1),
digits = getOption("jtools-digits", 2), conf.level = .95,
intercept = attr(x, "cond.int"), int.format = format, ...) {
df <- tidy.sim_margins(x, conf.level = conf.level)
make_table(df = df, format = format, sig.levels = sig.levels, digits = digits,
label = "Average marginal effect of",
intercept = if (attr(x, "cond.int")) df$intercept else NULL)
}
#' @export
#' @title Plot coefficients from simple slopes analysis
#' @description This creates a coefficient plot to visually summarize the
#' results of simple slopes analysis.
#' @param x A [sim_margins()] object.
#' @param ... arguments passed to [jtools::plot_coefs()]
plot.sim_margins <- function(x, ...) {
# Get the plot and add better x-axis label
p <- plot_coefs(x, ...) + ggplot2::xlab(paste("Average marginal effect of",
attr(x, "pred")))
# If there's a second moderator, format as appropriate
if (!is.null(attr(x, "mod2"))) {
p <- p + ggplot2::facet_wrap(mod2.term ~ ., ncol = 1, scales = "free_y",
strip.position = "top")
}
p
}
| /R/simple_margins.R | permissive | mychan24/interactions | R | false | false | 16,710 | r | #' Perform a simple margins analysis.
#'
#' \code{sim_margins} conducts a simple margins analysis for the purposes of
#' understanding two- and three-way interaction effects in linear regression.
#'
#' @param ... ignored.
#'
#' @details This allows the user to perform a simple margins analysis for the
#' purpose of probing interaction effects in a linear regression. Two- and
#' three-way interactions are supported, though one should be warned that
#' three-way interactions are not easy to interpret in this way.
#'
#' The function is tested with `lm`, `glm`, `svyglm`, and `merMod` inputs.
#' Others may work as well, but are not tested. In all but the linear model
#' case, be aware that not all the assumptions applied to simple slopes
#' analysis apply.
#'
#' @return
#'
#' A list object with the following components:
#'
#' \item{slopes}{A table of coefficients for the focal predictor at each
#' value of the moderator}
#' \item{ints}{A table of coefficients for the intercept at each value of the
#' moderator}
#' \item{modx.values}{The values of the moderator used in the analysis}
#'
#' @author Jacob Long <\email{long.1377@@osu.edu}>
#'
#' @inheritParams sim_slopes
#' @inheritParams margins::margins
#'
#' @seealso [margins::margins()]
#'
#' @family interaction tools
#'
#' @references
#'
#' Bauer, D. J., & Curran, P. J. (2005). Probing interactions in fixed and
#' multilevel regression: Inferential and graphical techniques.
#' \emph{Multivariate Behavioral Research}, \emph{40}(3), 373-400.
#' \url{https://doi.org/10.1207/s15327906mbr4003_5}
#'
#' Cohen, J., Cohen, P., West, S. G., & Aiken, L. S. (2003). \emph{Applied
#' multiple regression/correlation analyses for the behavioral sciences} (3rd
#' ed.). Mahwah, NJ: Lawrence Erlbaum Associates, Inc.
#'
#' Hanmer, M. J., & Kalkan, K. O. (2013). Behind the curve: Clarifying the best
#' approach to calculating predicted probabilities and marginal effects from
#' limited dependent variable models. *American Journal of Political Science*,
#' *57*, 263–277. \url{https://doi.org/10.1111/j.1540-5907.2012.00602.x}
#'
#'
#' @importFrom stats coef coefficients lm predict sd update getCall vcov relevel
#' @importFrom stats family aggregate formula
#' @import jtools
#' @export
#'
sim_margins <- function(model, pred, modx, mod2 = NULL, modx.values = NULL,
mod2.values = NULL, data = NULL, cond.int = FALSE,
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 1000,
digits = getOption("jtools-digits", default = 2),
pvals = TRUE, confint = FALSE, ci.width = .95,
cluster = NULL, modx.labels = NULL, mod2.labels = NULL,
...) {
if (!requireNamespace("margins")) {
stop_wrap("You must have the margins package installed to use this
function.")
}
# Evaluate the modx, mod2, pred args
pred <- quo_name(enexpr(pred))
modx <- quo_name(enexpr(modx))
if (modx == "NULL") {modx <- NULL}
mod2 <- quo_name(enexpr(mod2))
if (mod2 == "NULL") {mod2 <- NULL}
# Warn user if interaction term is absent
if (!check_interactions(as.formula(formula(model)), c(pred, modx, mod2))) {
warn_wrap(c(pred, modx, mod2), " are not included in an interaction with
one another in the model.")
}
# Create object to return
ss <- list()
ss <- structure(ss, digits = digits)
d <- get_data(model)
if (is_survey <- "svyglm" %in% class(model)) {
design <- model$survey.design
} else {design <- NULL}
# Which variables are factors?
facvars <- names(d)[!unlist(lapply(d, is.numeric))]
fvars <- names(d)
# Check for factor predictor
if (is.factor(d[[pred]])) {
# I could assume the factor is properly ordered, but that's too risky
stop(wrap_str("Focal predictor (\"pred\") cannot be a factor. Either
use it as modx or convert it to a numeric dummy variable."))
}
wname <- get_weights(model, d)$weights_name
wts <- get_weights(model, d)$weights
offname <- get_offset_name(model)
# Need the raw variable name from the LHS
resp <- all.vars(as.formula(paste("~", (get_response_name(model)))))
# Saving key arguments as attributes of return object
ss <- structure(ss, resp = resp, modx = modx, mod2 = mod2, pred = pred,
cond.int = cond.int)
### Getting moderator values ##################################################
modxvals2 <- mod_vals(d, modx, modx.values, is_survey, wts, design,
modx.labels = modx.labels,
any.mod2 = !is.null(mod2), sims = TRUE)
# Now specify def or not (for labeling w/ print method)
if (is.character(modx.values) | is.null(modx.values)) {
ss <- structure(ss, def = TRUE)
} else {
ss <- structure(ss, def = FALSE)
}
# Don't want def = TRUE for factors even though they are character
if (!is.numeric(d[[modx]])) {ss <- structure(ss, def = FALSE)}
if (!is.null(mod2)) {
if (is.numeric(d[[mod2]])) {
mod2vals2 <- mod_vals(d, mod2, mod2.values, is_survey, wts, design,
modx.labels = mod2.labels,
any.mod2 = !is.null(mod2),
sims = TRUE)
} else {
if (is.null(mod2.values)) {
mod2vals2 <- levels(d[[mod2]])
} else {
if (all(mod2.values %in% unique(d[[mod2]]))) {
mod2vals2 <- mod2.values
} else {
warn_wrap("mod2.values argument must include only levels of the
factor. Using all factor levels instead.", call. = FALSE)
mod2vals2 <- unique(d[[mod2]])
}
}
}
# Now specify def or not
if (is.character(mod2.values) | is.null(mod2.values)) {
ss <- structure(ss, def2 = TRUE)
} else {
ss <- structure(ss, def2 = FALSE)
}
# Don't want def = TRUE for factors even though they are character
if (!is.numeric(d[[mod2]])) {ss <- structure(ss, def2 = FALSE)}
} else {
mod2vals2 <- NULL
modxvals2 <- rev(modxvals2)
}
#### Call margins #############################################################
# Create list to provide to `at` argument
at_list <- list()
at_list[[modx]] <- modxvals2
if (!is.null(mod2)) {
at_list[[mod2]] <- mod2vals2
}
design <- if ("svyglm" %in% class(model)) model$survey.design else NULL
# Get the margins
suppressWarnings({ # can't have confusing warnings from margins
margs <- margins::margins(model, data = d, at = at_list, vce = vce,
# don't need modx, but it works around margins issue #112
variables = c(pred, modx),
iterations = iterations, design = design
)
})
# Get the summary data frame, drop the modx rows, drop the "factor" column
slopes <- subset(summary(margs, level = ci.width), factor == pred) %not%
"factor"
# determine if we're using t or z values
t_or_z <-
if (family(model)$family == "gaussian" & family(model)$link == "identity") {
"t val."
} else {
"z val."
}
names(slopes) %just% c("AME", "SE", "z", "lower", "upper") <-
c("Est.", "S.E.", t_or_z, unlist(make_ci_labs(ci.width)))
# Get the conditional intercepts
# Using aggregate to take the means at each combination of modx and mod2
## Have to jump through some hoops to weight the means
num_per <- nrow(margs) /
nrow(expand.grid(modxvals2, if (!is.null(mod2vals2)) mod2vals2 else NA))
## num_per is how many rows there are per set of modx/mod2 in the margs obj
ints <- aggregate(margs[c("fitted")], margs[c(modx, mod2)],
mean_or_base, weights = margs[["_weights"]][1:num_per])
names(ints) %just% "fitted" <- "intercept"
# Add the intercepts directly to the slopes data.frame since I don't have
# variance estimates for them anyway
slopes <- merge(slopes, ints)
if (!is.null(mod2)) {
# While we're here, let's split the slopes data.frame into pieces by mod2
slopes <- split(slopes, slopes[mod2])
names(slopes) <- paste(mod2, "=", names(mod2vals2))
}
ss <- structure(ss, modx.values = modxvals2, vce = vce,
cond.int = cond.int, confint = confint,
ci.width = ci.width, t_or_z = t_or_z,
nobs = nobs(model))
ss$slopes <- slopes
ss$ints <- ints
if (!is.null(mod2)) {ss <- structure(ss, mod2.values = mod2vals2)}
class(ss) <- "sim_margins"
return(ss)
}
#### PRINT METHOD ############################################################
#' @export
#' @importFrom cli cat_rule rule
#' @importFrom crayon red bold italic
print.sim_margins <- function(x, ...) {
# This is to make refactoring easier after switch to attributes
ss <- x
x <- attributes(x)
# This helps deal with the fact sometimes mod2vals has no length, so we want
# to loop just once
if (!is.null(x$mod2)) {
length <- length(x$mod2.values)
} else {
length <- 1
}
# Loop through each value of second moderator...if none, just one loop
for (j in 1:length) {
m <- NULL
# If we're using second moderator, need to make things make sense
# to inner loop
if (!is.null(x$mod2)) {
m <- ss$slopes[[j]]
if (class(x$mod2.values) != "character") {
m[x$mod2] <- num_print(m[x$mod2], x$digits)
}
# Printing output to make it clear where each batch of second moderator
# slopes begins
if (x$def2 == FALSE) {
cat(rule(center = paste0("While ", x$mod2, " (2nd moderator) ",
"= ", m[j, x$mod2]), line = "bar8"),
"\n\n")
} else {
# If the user went with default +/- SD or used a factor variable,
# we use the labels
label <- names(x$mod2.values)[
which(num_print(x$mod2.values, x$digits) == as.vector(m[j, x$mod2]))
]
cat(rule(center = paste0("While ", x$mod2, " (2nd moderator)", " = ",
m[j, x$mod2], " (", label, ")"), line = "bar8"), "\n\n")
}
m <- m %not% x$mod2
} else {
m <- ss$slopes
}
if (x$confint == FALSE) {
m <- m %not% unlist(make_ci_labs(x$ci.width))
}
if (x$cond.int == FALSE) {
m <- m %not% "intercept"
}
# Clearly label simple slopes
cat(bold(underline("SIMPLE MARGINS")), "\n\n")
for (i in seq_along(x$modx.values)) {
if (class(x$modx.values) != "character") {
m[x$modx] <- num_print(m[x$modx], digits = x$digits)
}
slopes <- as.data.frame(lapply(m[i,2:ncol(m)], as.numeric),
check.names = FALSE)
# Handle automatic labels
if (x$def == TRUE) {
label <- names(x$modx.values)[
which(num_print(x$modx.values, x$digits) == as.vector(m[i, x$modx]))
]
modx_label <- paste0(m[i, x$modx], " (", label, ")")
} else {
modx_label <- paste0(m[i, x$modx])
}
cat(italic(paste0("Average marginal effect of ", x$pred, " when ",
x$modx, " = ", modx_label, ": \n\n")))
print(md_table(slopes, digits = x$digits, format = "pandoc",
row.names = FALSE, sig.digits = FALSE))
cat("\n")
}
} # end mod2 loop
}
#### alternate output formats ################################################
#' @title Tidiers for [sim_margins()] objects.
#' @description You can use [broom::tidy()] and [broom::glance()] for "tidy"
#' methods of storing `sim_margins` output.
#' @param x The `sim_margins` object
#' @param conf.level The width of confidence intervals. Default is .95 (95\%).
#' @param ... Ignored.
#' @rdname sim_margins_tidiers
#' @export
tidy.sim_margins <- function(x, conf.level = .95, ...) {
cols <- c("estimate", "std.error", "statistic", "p.value", "modx",
"modx.value", "mod2", "mod2.value", "intercept")
# Figure out how many rows the data frame will be
num_coefs <- ifelse(is.data.frame(x$slopes),
no = length(x$slopes) * nrow(x$slopes[[1]]),
yes = nrow(x$slopes))
# Create NA-filled data frame
base <- as.data.frame(matrix(rep(NA, times = num_coefs * length(cols)),
ncol = length(cols)))
# Name the columns
names(base) <- cols
# Get the attributes from the sim_slopes object
atts <- attributes(x)
# Is there a second moderator?
any_mod2 <- !is.null(atts$mod2)
if (any_mod2 == FALSE) {
all_slopes <- x$slopes
} else {
all_slopes <- do.call("rbind", x$slopes)
}
# Include the moderator name (probably not best to include this redundant
# info)
base$modx <- atts$modx
# Move the table of values to the data frame
base$modx.value <- all_slopes[,1]
base$estimate <- all_slopes[,"Est."]
base$std.error <- all_slopes[,"S.E."]
base$p.value <- all_slopes[,"p"]
base$statistic <- all_slopes[, grep("val.", colnames(all_slopes), value = T)]
base$intercept <- all_slopes[,"intercept"]
# Handle CIs
## These are the requested CI labels
want_labs <- unlist(make_ci_labs(conf.level))
## Check if those are already calculated
if (all(want_labs %in% colnames(all_slopes))) {
base$conf.low <- all_slopes[,make_ci_labs(conf.level)[[1]]]
base$conf.high <- all_slopes[,make_ci_labs(conf.level)[[2]]]
} else { # If not, calculate them
alpha <- (1 - conf.level) / 2
crit_t <- if (class(x$mods[[1]]) == "lm") {
abs(qt(alpha, df = df.residual(x$mods[[1]])))
} else {
abs(qnorm(alpha))
}
base$conf.low <- base$estimate - (crit_t * base$std.error)
base$conf.high <- base$estimate + (crit_t * base$std.error)
}
# Create unique term labels for each value of the moderator
base$term <- paste(base$modx, "=",
if (is.character(base$modx.value)) {
base$modx.value
} else {
num_print(base$modx.value, attr(x, "digits"))
}
)
# Do the same for moderator 2 if any
if (any_mod2 == TRUE) {
base$mod2 <- atts$mod2
base$mod2.value <- unlist(lapply(atts$mod2.values, function(y) {
rep(y, nrow(x$slopes[[1]]))
}))
base$mod2.term <- paste(base$mod2, "=",
if (is.character(base$mod2.value)) {
base$mod2.value
} else {
num_print(base$mod2.value, attr(x, "digits"))
}
)
}
base <- tibble::as_tibble(base)
attr(base, "pred") <- atts$pred
return(base)
}
#' @rdname sim_margins_tidiers
#' @export
glance.sim_margins <- function(x, ...) {
tibble::as_tibble(data.frame(N = nobs(x)))
}
#' @importFrom stats nobs
#' @export
nobs.sim_margins <- function(object, ...) {
attr(object, "nobs")
}
#' @title Create tabular output for simple margins analysis
#'
#' @description This function converts a `sim_margins` object into a
#' `huxtable` object, making it suitable for use in external documents.
#' @param x A `sim_margins` object.
#' @inheritParams as_huxtable.sim_slopes
#'
#' @details
#'
#' For more on what you can do with a `huxtable`, see \pkg{huxtable}.
#'
#' @rdname as_huxtable.sim_margins
#' @rawNamespace
#' if (getRversion() >= "3.6.0") {
#' S3method(huxtable::as_huxtable, sim_margins)
#' } else {
#' export(as_huxtable.sim_margins)
#' }
as_huxtable.sim_margins <- function(x, format = "{estimate} ({std.error})",
sig.levels = c(`***` = .001, `**` = .01, `*` = .05, `#` = .1),
digits = getOption("jtools-digits", 2), conf.level = .95,
intercept = attr(x, "cond.int"), int.format = format, ...) {
df <- tidy.sim_margins(x, conf.level = conf.level)
make_table(df = df, format = format, sig.levels = sig.levels, digits = digits,
label = "Average marginal effect of",
intercept = if (attr(x, "cond.int")) df$intercept else NULL)
}
#' @export
#' @title Plot coefficients from simple slopes analysis
#' @description This creates a coefficient plot to visually summarize the
#' results of simple slopes analysis.
#' @param x A [sim_margins()] object.
#' @param ... arguments passed to [jtools::plot_coefs()]
plot.sim_margins <- function(x, ...) {
# Get the plot and add better x-axis label
p <- plot_coefs(x, ...) + ggplot2::xlab(paste("Average marginal effect of",
attr(x, "pred")))
# If there's a second moderator, format as appropriate
if (!is.null(attr(x, "mod2"))) {
p <- p + ggplot2::facet_wrap(mod2.term ~ ., ncol = 1, scales = "free_y",
strip.position = "top")
}
p
}
|
#' @title Generate model matrix of all possible models with maximum specified size
#' @param mT1 maximum model size
#' @param msnps SNPs for model matrix
#' @return model matrix for SNPs specified in msnps; columns are SNPs, each row corresponds to a model
T1modsmat.fn <- function(mT1,msnps) {
myLetters <- letters[1:26]
s <- length(msnps)
modT1 <- vector("list",mT1)
for(i in 1:mT1) {
mci <- combn(letters[1:s],i,simplify=TRUE) # all combinations of i msnps
nci <- dim(mci)[2]
modT1[[i]] <- matrix(0,nrow=nci,ncol=s)
for(j in 1:nci) modT1[[i]][j,match(mci[,j],myLetters)] <- 1
}
modsT1 <- rep(0,s)
for(i in 1:mT1) modsT1 <- rbind(modsT1,modT1[[i]])
T1mods <- modsT1
return(T1mods)
}
#' @title Approximate Bayes' factors (ABFs) for one case-control study
#' @param sim output from phen.gen.fn
#' @export
#' @return list of data.frames for each trait; column 1 gives case-control status (1/0) and remaining columns form a SnpMatrix object
abfcalc.format <- function(sim) {
Gm <- new("SnpMatrix",(sim$G+1)) # snp cols, indivs rows
c0 <- grep("control.",rownames(Gm))
c1 <- grep("case1.",rownames(Gm))
c2 <- grep("case2.",rownames(Gm))
G1 <- Gm[c(c0,c1),]
G2 <- Gm[c(c0,c2),]
data1.1 <- data.frame(Y=sim$y[c(c0,c1)],sim$G[c(c0,c1),])
data1.2 <- data.frame(Y=sim$y[c(c0,c2)],sim$G[c(c0,c2),])
data1.2$Y[data1.2$Y==2] <- 1
return(list(data1.1,data1.2))
}
#' @title Approximate Bayes' factors (ABFs) for one case-control study
#' @param data1 data.frame that has case-control status (1-0) in column 1 and remaining columns are the genotype scores (rows are individuals)
#' @param mT1 maximum number of causal variants
#' @param msnps vector of SNPs to consider in models
#' @export
#' @author Jenn Asimit
#' @return data.frame of model ABFs (each row is a model): column 1 has ABFs, remaining columns specify inclusion/excusion of SNPs in each model
abfT1.fn <- function(data1,mT1=3,msnps) {
mods <- T1modsmat.fn(mT1,msnps)
colnames(mods) <- msnps
mod1 <- BMA::glib(x=data1[,-1],y=data1[,1],error="binomial", link="logit",models=mods)
logABF <- mod1$bf$twologB10[,1]*0.5
out <- data.frame(logABF=logABF,M=mod1$models,row.names=NULL)
cnames <- c("logABF",names(data1)[-1])
names(out) <- cnames
return(out)
}
#' @title Joint Approximate Bayes' factors (ABFs) for two case-control studies with shared controls
#' @param sim output from phen.gen.fn
#' @param msnps vector of SNPs to consider in models
#' @param mT1 maximum number of causal variants for trait 1
#' @param mT2 maximum number of causal variants for trait 2
#' @export
#' @author Jenn Asimit
#' @return data.frame of joint model ABFs (each row is a model): column 1 has joint ABFs, remaining columns specify inclusion/excusion of SNPs in each joint model
abf.fn <- function(sim,msnps,mT1,mT2) {
data1 <- data.frame(Y=sim$y,sim$G)
s <- length(msnps) # number of snps considered in models for the 2 traits
data.m1 <- data1[,c("Y",msnps)]
m1 <- mlogit2logit(Y ~ 1|. -Y,data.m1,choices=0:2,base.choice=1)
T1mods <- T1modsmat.fn(mT1,msnps)
if(mT2==mT1) {T2mods <- T1mods} else {T2mods <- T1modsmat.fn(mT2,msnps)}
nT1 <- dim(T1mods)[1]
nT2 <- dim(T2mods)[1]
T1modsrep <- matrix(rep(t(T1mods),nT2),ncol=ncol(T1mods),byrow=TRUE)
T2modsrep <- matrix(rep(T2mods,each=nT1),ncol=ncol(T2mods),byrow=FALSE)
T1T2mods <- cbind(T1modsrep,T2modsrep)
#' add column of 1's to the models for the trait2*effect variable
T1T2mods1 <- cbind(T1T2mods,1)
T1T2mods <- T1T2mods1
mod1 <- BMA::glib(x=m1$data[,(4+s):(4+3*s)],y=m1$data$Y.star,error="binomial", link="logit",models=T1T2mods)
logABF <- mod1$bf$twologB10[,1]*0.5
out <- data.frame(logABF=logABF,M=mod1$models)
cnames <- c("logABF",names(m1$data[,(4+s):(4+3*s)]))
names(out) <- cnames
out <- out[,-dim(out)[2]] # rm last column, which is 1 for z_2
return(out) # logABF,mod
}
#' @title Joint Approximate Bayes' factors (ABFs) for two case-control studies with shared controls
#' @param sim output from phen.gen.fn
#' @param msnps vector of SNPs to consider in models
#' @param mT1 maximum number of causal variants for trait 1
#' @param mT2 maximum number of causal variants for trait 2
#' @export
#' @author Jenn Asimit
#' @return data.frame of multinomial logABFs and logistic logABFs
bf.compare.fn <- function(sim,msnps,mT1,mT2) {
#' find marginal ABFs for each trait
data12 <- abfcalc.format(sim)
bft1 <- abfT1.fn(data12[[1]],mT1=mT1,msnps)
bft2 <- abfT1.fn(data12[[2]],mT1=mT2,msnps)
#' find joint ABFs for traits 1 and 2
bft1t2 <- abf.fn(sim,msnps=msnps,mT1=mT1,mT2=mT2)
t1mods <- as.matrix(bft1[,-1])
t2mods <- as.matrix(bft2[,-1])
s <- dim(t1mods)[2]
t1t2mods <- as.matrix(bft1t2[,-1])
colnames(t1mods) <- NULL
colnames(t2mods) <- NULL
colnames(t1t2mods) <- NULL
bfall <- c()
for(k in 1:dim(t1mods)[1]) {
for(j in 1:dim(t2mods)[1]) {
ind <- which(apply(t1t2mods, 1, identical, c(t1mods[k,],t2mods[j,])))
tmp <- cbind(bft1t2[ind,],logBF1=bft1[k,1],logBF2=bft2[j,1])
bfall <- rbind(bfall, tmp)
}
}
return(data.frame(BF12=bfall[,1],BF1=bfall[,"logBF1"],BF2=bfall[,"logBF2"]))
}
#' @title Relationship between multinomial ABFs and logistic ABFs
#' @param BFs data.frame of multinomial logABFs BF12 and logistic logABFs
#' @param Bplot logical, if TRUE plot ABF12 against (log(ABF1)+log(ABF2)
#' @export
#' @author Jenn Asimit
#' @return fitted regression model R2 and coefficient estimate summaries from log(jointABF) ~ (log(ABF1)+log(ABF2))
bf.relations.fn <- function(BFs,Bplot=FALSE) {
b1.b2 <- BFs$BF1 + BFs$BF2
out <- lm(BFs$BF12~b1.b2)
R2 <- summary(out)$r.squared
betas <- summary(out)$coefficients[,1:2]
output <- c(R2=R2,beta0=betas[1,],beta1=betas[2,])
if(Bplot) plot(BFs$BF12~b1.b2,pch=20,xlab="log(BF1)+log(BF2)",ylab="log(BF12)",main="")
return(output)
}
| /R/bf-compare-sim.R | no_license | jennasimit/MFMextra | R | false | false | 5,732 | r |
#' @title Generate model matrix of all possible models with maximum specified size
#' @param mT1 maximum model size
#' @param msnps SNPs for model matrix
#' @return model matrix for SNPs specified in msnps; columns are SNPs, each row corresponds to a model
T1modsmat.fn <- function(mT1,msnps) {
myLetters <- letters[1:26]
s <- length(msnps)
modT1 <- vector("list",mT1)
for(i in 1:mT1) {
mci <- combn(letters[1:s],i,simplify=TRUE) # all combinations of i msnps
nci <- dim(mci)[2]
modT1[[i]] <- matrix(0,nrow=nci,ncol=s)
for(j in 1:nci) modT1[[i]][j,match(mci[,j],myLetters)] <- 1
}
modsT1 <- rep(0,s)
for(i in 1:mT1) modsT1 <- rbind(modsT1,modT1[[i]])
T1mods <- modsT1
return(T1mods)
}
#' @title Approximate Bayes' factors (ABFs) for one case-control study
#' @param sim output from phen.gen.fn
#' @export
#' @return list of data.frames for each trait; column 1 gives case-control status (1/0) and remaining columns form a SnpMatrix object
abfcalc.format <- function(sim) {
Gm <- new("SnpMatrix",(sim$G+1)) # snp cols, indivs rows
c0 <- grep("control.",rownames(Gm))
c1 <- grep("case1.",rownames(Gm))
c2 <- grep("case2.",rownames(Gm))
G1 <- Gm[c(c0,c1),]
G2 <- Gm[c(c0,c2),]
data1.1 <- data.frame(Y=sim$y[c(c0,c1)],sim$G[c(c0,c1),])
data1.2 <- data.frame(Y=sim$y[c(c0,c2)],sim$G[c(c0,c2),])
data1.2$Y[data1.2$Y==2] <- 1
return(list(data1.1,data1.2))
}
#' @title Approximate Bayes' factors (ABFs) for one case-control study
#' @param data1 data.frame that has case-control status (1-0) in column 1 and remaining columns are the genotype scores (rows are individuals)
#' @param mT1 maximum number of causal variants
#' @param msnps vector of SNPs to consider in models
#' @export
#' @author Jenn Asimit
#' @return data.frame of model ABFs (each row is a model): column 1 has ABFs, remaining columns specify inclusion/excusion of SNPs in each model
abfT1.fn <- function(data1,mT1=3,msnps) {
mods <- T1modsmat.fn(mT1,msnps)
colnames(mods) <- msnps
mod1 <- BMA::glib(x=data1[,-1],y=data1[,1],error="binomial", link="logit",models=mods)
logABF <- mod1$bf$twologB10[,1]*0.5
out <- data.frame(logABF=logABF,M=mod1$models,row.names=NULL)
cnames <- c("logABF",names(data1)[-1])
names(out) <- cnames
return(out)
}
#' @title Joint Approximate Bayes' factors (ABFs) for two case-control studies with shared controls
#' @param sim output from phen.gen.fn
#' @param msnps vector of SNPs to consider in models
#' @param mT1 maximum number of causal variants for trait 1
#' @param mT2 maximum number of causal variants for trait 2
#' @export
#' @author Jenn Asimit
#' @return data.frame of joint model ABFs (each row is a model): column 1 has joint ABFs, remaining columns specify inclusion/excusion of SNPs in each joint model
abf.fn <- function(sim,msnps,mT1,mT2) {
data1 <- data.frame(Y=sim$y,sim$G)
s <- length(msnps) # number of snps considered in models for the 2 traits
data.m1 <- data1[,c("Y",msnps)]
m1 <- mlogit2logit(Y ~ 1|. -Y,data.m1,choices=0:2,base.choice=1)
T1mods <- T1modsmat.fn(mT1,msnps)
if(mT2==mT1) {T2mods <- T1mods} else {T2mods <- T1modsmat.fn(mT2,msnps)}
nT1 <- dim(T1mods)[1]
nT2 <- dim(T2mods)[1]
T1modsrep <- matrix(rep(t(T1mods),nT2),ncol=ncol(T1mods),byrow=TRUE)
T2modsrep <- matrix(rep(T2mods,each=nT1),ncol=ncol(T2mods),byrow=FALSE)
T1T2mods <- cbind(T1modsrep,T2modsrep)
#' add column of 1's to the models for the trait2*effect variable
T1T2mods1 <- cbind(T1T2mods,1)
T1T2mods <- T1T2mods1
mod1 <- BMA::glib(x=m1$data[,(4+s):(4+3*s)],y=m1$data$Y.star,error="binomial", link="logit",models=T1T2mods)
logABF <- mod1$bf$twologB10[,1]*0.5
out <- data.frame(logABF=logABF,M=mod1$models)
cnames <- c("logABF",names(m1$data[,(4+s):(4+3*s)]))
names(out) <- cnames
out <- out[,-dim(out)[2]] # rm last column, which is 1 for z_2
return(out) # logABF,mod
}
#' @title Joint Approximate Bayes' factors (ABFs) for two case-control studies with shared controls
#' @param sim output from phen.gen.fn
#' @param msnps vector of SNPs to consider in models
#' @param mT1 maximum number of causal variants for trait 1
#' @param mT2 maximum number of causal variants for trait 2
#' @export
#' @author Jenn Asimit
#' @return data.frame of multinomial logABFs and logistic logABFs
bf.compare.fn <- function(sim,msnps,mT1,mT2) {
#' find marginal ABFs for each trait
data12 <- abfcalc.format(sim)
bft1 <- abfT1.fn(data12[[1]],mT1=mT1,msnps)
bft2 <- abfT1.fn(data12[[2]],mT1=mT2,msnps)
#' find joint ABFs for traits 1 and 2
bft1t2 <- abf.fn(sim,msnps=msnps,mT1=mT1,mT2=mT2)
t1mods <- as.matrix(bft1[,-1])
t2mods <- as.matrix(bft2[,-1])
s <- dim(t1mods)[2]
t1t2mods <- as.matrix(bft1t2[,-1])
colnames(t1mods) <- NULL
colnames(t2mods) <- NULL
colnames(t1t2mods) <- NULL
bfall <- c()
for(k in 1:dim(t1mods)[1]) {
for(j in 1:dim(t2mods)[1]) {
ind <- which(apply(t1t2mods, 1, identical, c(t1mods[k,],t2mods[j,])))
tmp <- cbind(bft1t2[ind,],logBF1=bft1[k,1],logBF2=bft2[j,1])
bfall <- rbind(bfall, tmp)
}
}
return(data.frame(BF12=bfall[,1],BF1=bfall[,"logBF1"],BF2=bfall[,"logBF2"]))
}
#' @title Relationship between multinomial ABFs and logistic ABFs
#' @param BFs data.frame of multinomial logABFs BF12 and logistic logABFs
#' @param Bplot logical, if TRUE plot ABF12 against (log(ABF1)+log(ABF2)
#' @export
#' @author Jenn Asimit
#' @return fitted regression model R2 and coefficient estimate summaries from log(jointABF) ~ (log(ABF1)+log(ABF2))
bf.relations.fn <- function(BFs,Bplot=FALSE) {
b1.b2 <- BFs$BF1 + BFs$BF2
out <- lm(BFs$BF12~b1.b2)
R2 <- summary(out)$r.squared
betas <- summary(out)$coefficients[,1:2]
output <- c(R2=R2,beta0=betas[1,],beta1=betas[2,])
if(Bplot) plot(BFs$BF12~b1.b2,pch=20,xlab="log(BF1)+log(BF2)",ylab="log(BF12)",main="")
return(output)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stack_events.R
\name{stack_events}
\alias{stack_events}
\title{Stack event files}
\usage{
stack_events(..., keep.vars = NULL, keep.all = F)
}
\arguments{
\item{...}{MEPS event datasets to stack.}
\item{keep.vars}{Vector of variables in addition to shared variables to keep in the stacked dataset.}
\item{keep.all}{If TRUE, keep all variables from all datasets. Can slow down computation.}
}
\value{
data frame of stacked event files with standardized variable names. Name of dataset is included as 'data' variable in stacked data set.
}
\description{
Stack MEPS event files. If needed, will remove event key prefixes to standardize variable names and will add total source of payment variables to hospital event files. By default, keeps only variables that are shared by the datasets (plus LINKIDX / EVNTIDX for ID variables)
}
\examples{
# Get event datasets
RX <- read_MEPS(year=2013,type='RX',web=T)
OB <- read_MEPS(year=2013,type='OB',web=T)
OP <- read_MEPS(year=2013,type='OP',web=T)
ER <- read_MEPS(year=2013,type='ER',web=T)
aa <- stack_events(RX,OB,OP,ER); head(aa);
# Force 'SEEDOC' and 'SEETLKPV' into stacked dataset
bb <- stack_events(RX,OB,OP,ER,keep.vars=c('SEEDOC','SEETLKPV')); head(bb);
}
| /MEPS/man/stack_events.Rd | no_license | TaraFararooy/meps_r_pkg | R | false | true | 1,290 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stack_events.R
\name{stack_events}
\alias{stack_events}
\title{Stack event files}
\usage{
stack_events(..., keep.vars = NULL, keep.all = F)
}
\arguments{
\item{...}{MEPS event datasets to stack.}
\item{keep.vars}{Vector of variables in addition to shared variables to keep in the stacked dataset.}
\item{keep.all}{If TRUE, keep all variables from all datasets. Can slow down computation.}
}
\value{
data frame of stacked event files with standardized variable names. Name of dataset is included as 'data' variable in stacked data set.
}
\description{
Stack MEPS event files. If needed, will remove event key prefixes to standardize variable names and will add total source of payment variables to hospital event files. By default, keeps only variables that are shared by the datasets (plus LINKIDX / EVNTIDX for ID variables)
}
\examples{
# Get event datasets
RX <- read_MEPS(year=2013,type='RX',web=T)
OB <- read_MEPS(year=2013,type='OB',web=T)
OP <- read_MEPS(year=2013,type='OP',web=T)
ER <- read_MEPS(year=2013,type='ER',web=T)
aa <- stack_events(RX,OB,OP,ER); head(aa);
# Force 'SEEDOC' and 'SEETLKPV' into stacked dataset
bb <- stack_events(RX,OB,OP,ER,keep.vars=c('SEEDOC','SEETLKPV')); head(bb);
}
|
## #This file is used for calculating transient boundary conditions
## #using universal kriging
###cov_model_sets = c('gaussian','wave','exponential','spherical')
###drift_sets = c(0,1)
setwd("/Users/shua784/Dropbox/PNNL/Projects/Reach_scale_model/")
# rm(list=ls())
library(geoR)
library(rhdf5)
library(ggplot2)
# library(gstat)
library(sp)
library(maptools)
library(phylin)
##------------INPUT----------------##
# source("codes/300A_parameters.R")
H5close()
options(geoR.messages=FALSE)
# input_folder = 'data/headdata4krige_Plume_2008-2017/'
fname_geoFramework.r = "results/geoframework_200m.r"
fname_river.geo = "data/river_geometry_manual.csv"
fname_mvAwln = "/Users/shua784/Dropbox/PNNL/People/From_Patrick/SQL/mvAwln.csv"
fname_mvAwln_id = "/Users/shua784/Dropbox/PNNL/People/From_Patrick/SQL/mvAwln_wellID_updated.csv"
fname_manual_wells_ids = "/Users/shua784/Dropbox/PNNL/People/From_Patrick/SQL/HYDRAULIC_HEAD_MV_WellID.csv"
fname_manual_wells = "/Users/shua784/Dropbox/PNNL/People/From_Patrick/SQL/HYDRAULIC_HEAD_MV.csv"
fname_USGS_wells = "/Users/shua784/Dropbox/PNNL/People/from_Erick/Burns_well_data.csv"
fname_USGS_wells_ids = "/Users/shua784/Dropbox/PNNL/People/from_Erick/Burns_well_attributes.csv"
fname_SFA_wells = "/Users/shua784/Dropbox/PNNL/People/Velo/300A_Well_Data/"
fname_SFA_wells_ids = "/Users/shua784/Dropbox/PNNL/People/Velo/300A_well_coord.csv"
fname_SFA_wells_all = "/Users/shua784/Dropbox/PNNL/People/Velo/SFA_all_wells.csv"
is.plot = F
##--------------OUTPUT---------------------##
fname_initial.h5 = "Inputs/HFR_model_200m/HFR_H_Initial_2007_04_01.h5"
# BC.h5 = "Inputs/HFR_H_BC.h5"
# fname_head.bc.r= "results/HFR_head_BC.r"
fname_wells.r = "results/well_compiled_wl_data.r"
# fname_fig.initalH_contour = "figures/initial_head_150m.jpg"
# fname_fig.initialH_krige = "figures/initial_head_krige.jpg"
fname_fig.initialH_idw = "figures/initial_head_200m_2017-04-01.jpg"
fname.selected.wells.df = "results/selected.wells.df_2007-3-28.r"
load(fname_geoFramework.r)
## for grids
grid.x = idx
grid.y = idy
grid.nx = nx
grid.ny = ny
# pred.grid.south = expand.grid(seq(range_x[1]+grid.x/2,range_x[2],grid.x),range_y[1]+grid.y/2) # for South boundary
# pred.grid.north = expand.grid(seq(range_x[1]+grid.x/2,range_x[2],grid.x),range_y[2]-grid.y/2) # for North boundary
# pred.grid.east = expand.grid(range_x[1]+grid.x/2,seq(range_y[1]+grid.y/2,range_y[2],grid.y)) # for East boundary
# pred.grid.west = expand.grid(range_x[2]-grid.x/2,seq(range_y[1]+grid.y/2,range_y[2],grid.y)) # for West boundary
pred.grid.domain = expand.grid(seq(range_x[1]+grid.x/2,range_x[2],grid.x),
seq(range_y[1]+grid.y/2,range_y[2],grid.y)) # for domain
# colnames(pred.grid.south)=c('x','y')
# colnames(pred.grid.north)=c('x','y')
# colnames(pred.grid.east)=c('x','y')
# colnames(pred.grid.west)=c('x','y')
colnames(pred.grid.domain)=c('x','y')
## time information
# start.time = as.POSIXct("2010-02-27 12:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
# end.time = as.POSIXct("2010-02-28 23:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
# dt = 3600 ##secs
# times = seq(start.time,end.time,dt)
# ntime = length(times)
# time.id = seq(0,ntime-1,dt/3600) ##hourly boundary, why start from 0h?
# origin.time = as.POSIXct("2007-12-31 23:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S") # starting time should be 1 h early than "2008-1-1 0:0:0" to set the right index in folder/headdata4krige_Plume_2008_2017
## BC.south = array(NA,c(ntime,grid.nx))
## BC.north = array(NA,c(ntime,grid.nx))
## BC.east = array(NA,c(ntime,grid.ny))
## BC.west = array(NA,c(ntime,grid.ny))
BC.south = c()
BC.north = c()
BC.east = c()
BC.west = c()
avail.time.id = c()
# ##==================== read into well data ====================
if (!file.exists(fname_wells.r)) {
mvAwln.id = read.csv(fname_mvAwln_id, stringsAsFactors = F)
mvAwln = read.csv(fname_mvAwln, stringsAsFactors = F)
mvAwln.id = transform(mvAwln.id,Easting = as.numeric(Easting),
Northing = as.numeric(Northing))
HEIS_auto_wells = subset(mvAwln, select = c("WellName", "WellNumber", "procWaterElevation", "procDate"))
HEIS_auto_wells = transform(HEIS_auto_wells, WellName = as.character(WellName),
WellNumber = as.character(WellNumber),
procWaterElevation = as.numeric(procWaterElevation),
procDate = as.POSIXct(procDate))
manual_wells_ids = read.csv(fname_manual_wells_ids, stringsAsFactors = F)
manual_wells = read.csv(fname_manual_wells, stringsAsFactors = F)
manual_wells = transform(manual_wells, HYD_DATE_TIME_PST = as.POSIXct(HYD_DATE_TIME_PST))
# HEIS_auto_wells = mvAwln
HEIS_auto_wells_ids = mvAwln.id
HEIS_manual_wells = manual_wells
colnames(HEIS_manual_wells)[1:4] = c("WellNumber", "WellName", "procDate", "procWaterElevation")
HEIS_manual_wells_ids = manual_wells_ids
USGS_wells = read.csv(fname_USGS_wells, stringsAsFactors = F)
USGS_wells_ids = read.csv(fname_USGS_wells_ids, stringsAsFactors = F)
USGS_wells_ids = transform(USGS_wells_ids, CP_ID_NUM = as.character(CP_ID_NUM))
USGS_wells = transform(USGS_wells, CP_NUM = as.character(CP_NUM), DATE = as.POSIXct(DATE))
USGS_wells$WLELEVft88 = USGS_wells$WLELEVft88*0.3048 # convert ft to meter
USGS_wells_ids$X_SP_83FT = USGS_wells_ids$X_SP_83FT*0.3048
USGS_wells_ids$Y_SP_83FT = USGS_wells_ids$Y_SP_83FT*0.3048
colnames(USGS_wells)[1:4] = c("WellNumber", "procDate", "Year_fract", "procWaterElevation")
colnames(USGS_wells_ids)[2:4] = c("WellNumber", "Easting", "Northing")
## select USGS wells
USGS_wells_selected.names = USGS_wells_ids$WellNumber[which(USGS_wells_ids$Easting < range.xcoods[2] & USGS_wells_ids$Easting > range.xcoods[1] &
USGS_wells_ids$Northing < range.ycoods[2] & USGS_wells_ids$Northing > range.ycoods[1])]
USGS_wells_selected = data.frame(WellName = character(), Easting = numeric(),
Northing = numeric(), DateTime = as.POSIXct(character()), WL = numeric(), stringsAsFactors = F)
for (iwell in USGS_wells_selected.names) {
manual_well = USGS_wells[which(USGS_wells$WellNumber == iwell), ]
USGS_wells_selected = rbind(USGS_wells_selected, data.frame(WellNumber = manual_well$WellNumber, WL = manual_well$procWaterElevation,
DateTime = manual_well$procDate,
Easting = rep(USGS_wells_ids$Easting[which(USGS_wells_ids$WellNumber == iwell)], length(manual_well$WellNumber)),
Northing = rep(USGS_wells_ids$Northing[which(USGS_wells_ids$WellNumber == iwell)], length(manual_well$WellNumber)),
stringsAsFactors = F
))
}
## SFA wells
SFA_wells_ids = read.csv(fname_SFA_wells_ids, stringsAsFactors = F)
colnames(SFA_wells_ids)[2] = c("WellName")
# SFA_wells_list=c("399-1-1_3var.csv")
# iwell = SFA_wells_list
if (!file.exists(fname_SFA_wells_all)) {
SFA_wells = data.frame(WellName = as.character(), DateTime = as.POSIXct(character()), Temp = numeric(),
Spc = numeric(), WL = numeric(), stringsAsFactors = F)
SFA_wells_list = list.files(fname_SFA_wells)
for (iwell in SFA_wells_list) {
# iwell = "399-1-1_3var.csv"
iSFA_well = read.csv(paste(fname_SFA_wells, iwell, sep = ""), stringsAsFactors = F)
# iSFA_well = read.csv(paste(fname_SFA_wells, "399-1-1_3var.csv", sep = ""), stringsAsFactors = F)
colnames(iSFA_well) = c("DateTime", "Temp", "Spc", "WL")
if (iwell %in% c("399-5-1_3var.csv", "399-3-19_3var.csv" ) ) {
iSFA_well$DateTime = as.POSIXct(iSFA_well$DateTime, format = "%m/%d/%y %H:%M", tz = "GMT") ## time formate must agree with data-column
} else {
iSFA_well$DateTime = as.POSIXct(iSFA_well$DateTime, format = "%d-%b-%Y %H:%M:%S", tz = "GMT") ## time formate must agree with data-column
}
id_col = data.frame(WellName = rep(gsub("_3var.csv", "", iwell), dim(iSFA_well)[1]), stringsAsFactors = F)
iSFA_well = cbind(id_col, iSFA_well)
SFA_wells = rbind(SFA_wells, iSFA_well, stringsAsFactors =F)
}
# as.POSIXct(strptime(SFA_wells$DateTime[2], "%d-%b-%Y %H:%M:%S"), format = "%d-%m-%Y %H:%M:%S", tz = "GMT")
# SFA_wells$DateTime = as.POSIXct(SFA_wells$DateTime, format = "%d-%b-%Y %H:%M:%S", tz = "GMT") ## time formate must agree with data-column
write.csv(SFA_wells, file = "/Users/shua784/Dropbox/PNNL/People/Velo/SFA_all_wells.csv", row.names = F)
} else {
SFA_wells = read.csv(fname_SFA_wells_all, stringsAsFactors = F)
}
save(list = c("HEIS_auto_wells", "HEIS_auto_wells_ids", "HEIS_manual_wells", "HEIS_manual_wells_ids",
"USGS_wells", "USGS_wells_ids", "USGS_wells_selected", "USGS_wells_selected.names","SFA_wells", "SFA_wells_ids"), file = fname_wells.r)
} else {
load(fname_wells.r)
}
range.xcoods = c(model_origin[1], model_origin[1] + xlen)
range.ycoods = c(model_origin[2], model_origin[2] + ylen)
##-------------------- plot all USGS wells------------------------
if (is.plot) {
USGS_wells_selected.names = USGS_wells_ids$WellNumber[which(USGS_wells_ids$Easting < range.xcoods[2] & USGS_wells_ids$Easting > range.xcoods[1] &
USGS_wells_ids$Northing < range.ycoods[2] & USGS_wells_ids$Northing > range.ycoods[1])]
USGS_wells_selected = data.frame(WellName = character(), Easting = numeric(),
Northing = numeric(), DateTime = as.POSIXct(character()), WL = numeric(), stringsAsFactors = F)
start.time = as.POSIXct("1990-01-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
end.time = as.POSIXct("2011-01-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
jpeg(file="figures/USGS.wells.jpg", width=12, height=16, units="in", res=300)
par(mar =c(4,4,1,1))
plot(0,0,xlim=c(start.time, end.time), ylim = c(100, 305),type = "n", xlab = "Date", ylab = "Water Level (m)",
axes = F, cex=1.5)
box()
colors = rainbow(100)
for (iwell in USGS_wells_selected.names) {
manual_well = USGS_wells[which(USGS_wells$WellNumber == iwell), ]
USGS_wells_selected = rbind(USGS_wells_selected, data.frame(WellNumber = manual_well$WellNumber, WL = manual_well$procWaterElevation,
DateTime = manual_well$procDate,
Easting = rep(USGS_wells_ids$Easting[which(USGS_wells_ids$WellNumber == iwell)], length(manual_well$WellNumber)),
Northing = rep(USGS_wells_ids$Northing[which(USGS_wells_ids$WellNumber == iwell)], length(manual_well$WellNumber)),
stringsAsFactors = F
))
lines(manual_well$procDate, manual_well$procWaterElevation, col= sample(colors), lwd = 1 )
points(manual_well$procDate, manual_well$procWaterElevation, pch=1, cex=1)
axis.POSIXct(1,at=seq(as.Date("1990-01-01 00:00:00",tz="GMT"),
to=as.Date("2011-01-01 00:00:00",tz="GMT"),by="quarter"),
format="%m/%Y",mgp=c(5,1.7,0),cex.axis=1)
axis(2,at=seq(100, 305, 5),mgp=c(5,0.7,0),cex.axis=1)
}
dev.off()
hist(USGS_wells_selected$DateTime, breaks = 1000, freq = T)
##---------------------- plot all east wells----------------------------
east.wells=c()
pattern = c(glob2rx("15N*"),glob2rx("14N*"), glob2rx("13N*"), glob2rx("12N*"),glob2rx("11N*"), glob2rx("10N*"), glob2rx("09N*"))
east.wells = grep(paste(pattern,collapse = "|"), HEIS_manual_wells_ids$WELL_NAME, value = T)
east.wells.data = data.frame(WellName = character(), Easting = numeric(),
Northing = numeric(), DateTime = as.POSIXct(character()), WL = numeric(), stringsAsFactors = F)
start.time = as.POSIXct("1990-01-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
end.time = as.POSIXct("2008-01-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
jpeg(file="figures/east.wells.jpg", width=12, height=16, units="in", res=300)
par(mar =c(4,4,1,1))
plot(0,0,xlim=c(start.time, end.time), ylim = c(150, 305),type = "n", xlab = "Date", ylab = "Water Level (m)",
axes = F, cex=1.5)
box()
colors = rainbow(100)
for (iwell in east.wells) {
manual_well = HEIS_manual_wells[which(HEIS_manual_wells$WellName == iwell), ]
east.wells.data = rbind(east.wells.data, data.frame(WellName = manual_well$WellName, WL = manual_well$procWaterElevation,
DateTime = manual_well$procDate,
Easting = rep(HEIS_manual_wells_ids$EASTING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)], length(manual_well$WellName)),
Northing = rep(HEIS_manual_wells_ids$NORTHING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)], length(manual_well$WellName)),
stringsAsFactors = F
))
lines(manual_well$procDate, manual_well$procWaterElevation, col= sample(colors), lwd = 1 )
points(manual_well$procDate, manual_well$procWaterElevation, pch=1, cex=1)
axis.POSIXct(1,at=seq(as.Date("1990-01-01 00:00:00",tz="GMT"),
to=as.Date("2008-01-01 00:00:00",tz="GMT"),by="quarter"),
format="%m/%Y",mgp=c(5,1.7,0),cex.axis=1)
axis(2,at=seq(150, 305, 5),mgp=c(5,0.7,0),cex.axis=1)
# date.range = range(manual_well$procDate)
#
# print(paste(iwell, "has", length(manual_well$procWaterElevation), "obs. points"))
}
dev.off()
hist(east.wells.data$DateTime, breaks = 1000, freq = T)
##---------------------- plot all HEIS manual wells--------------------
start.time = as.POSIXct("1990-01-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
end.time = as.POSIXct("2017-01-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
jpeg(file="figures/all.manual.wells.jpg", width=12, height=16, units="in", res=300)
par(mar =c(4,4,1,1))
plot(0,0,xlim=c(start.time, end.time), ylim = c(100, 305),type = "n", xlab = "Date", ylab = "Water Level (m)",
axes = F, cex=1.5)
box()
colors = rainbow(100)
for (iwell in well_names) {
manual_well = HEIS_manual_wells[which(HEIS_manual_wells$WellName == iwell), ]
lines(manual_well$procDate, manual_well$procWaterElevation, col= sample(colors), lwd = 1 )
points(manual_well$procDate, manual_well$procWaterElevation, pch=1, cex=1)
axis.POSIXct(1,at=seq(as.Date("1990-01-01 00:00:00",tz="GMT"),
to=as.Date("2017-01-01 00:00:00",tz="GMT"),by="quarter"),
format="%m/%Y",mgp=c(5,1.7,0),cex.axis=1)
axis(2,at=seq(150, 305, 5),mgp=c(5,0.7,0),cex.axis=1)
}
dev.off()
hist.HEIS = hist(HEIS_manual_wells$procDate, breaks = 1000, freq = T)
}
##---------------------------select wells with data at each time stamp-----------------------
if (!file.exists(fname.selected.wells.df)) {
## create empty matrix with Colclasses defined
# initial.time = as.POSIXct("2005-03-29 12:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
# initial.time = as.POSIXct("2007-03-28 12:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
initial.time = as.POSIXct("2007-04-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
# times = initial.time
# min.time = as.POSIXct("2005-01-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
# max.time = as.POSIXct("2010-01-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
# times = seq(min.time, max.time, by = 30*86400)
# times = seq(min.time, max.time, by = "month")
# well_names = unique(HEIS_manual_wells_ids$WELL_NAME)
# USGS_wells_selected.names = USGS_wells_ids$WellNumber[which(USGS_wells_ids$Easting < range.xcoods[2] & USGS_wells_ids$Easting > range.xcoods[1] &
# USGS_wells_ids$Northing < range.ycoods[2] & USGS_wells_ids$Northing > range.ycoods[1])]
well_names = c(unique(HEIS_manual_wells_ids$WELL_NAME), unique(USGS_wells_selected.names), unique(SFA_wells_ids$WellName))
# well_names = c(unique(SFA_wells_ids$WellName)[27:48])
# time_mar = 1*24*3600 #1 day range
time_mar = 15*86400 #15 day range
# times=times[1]
times = initial.time
# itime = times
for (i in 1:length(times)) {
itime = times[i]
print(itime)
selected.wells = data.frame(WellName = character(), WellNumber = character(),
Easting = numeric(),
Northing = numeric(), DateTime = as.POSIXct(character()), WL = numeric(), stringsAsFactors = F)
# well_names = c("699-39-79", "199-D3-2", "199-B2-12", "399-5-1")
# well_names = c("399-1-1")
for (iwell in well_names) {
# iwell = c("199-B2-12")
# iwell = c("199-D3-2")
if (iwell %in% SFA_wells$WellName) {
print(paste(iwell, "(SFA)"))
manual_well = SFA_wells[which(SFA_wells$WellName == iwell), ]
index = which.min(abs(as.numeric(manual_well$DateTime - itime)))
DateTime = manual_well$DateTime[index]
if (DateTime == itime) {
WL = manual_well$WL[index]
selected.wells = rbind(selected.wells, data.frame(WellName = manual_well$WellName[index],
WellNumber = manual_well$WellName[index],
WL = WL,
DateTime = DateTime,
Easting = SFA_wells_ids$Easting[which(SFA_wells_ids$WellName == iwell)],
Northing = SFA_wells_ids$Northing[which(SFA_wells_ids$WellName == iwell)],
stringsAsFactors = F
))
} else if (DateTime < itime + time_mar & DateTime > itime - time_mar) {
print(paste(iwell,"has wl within 1day of itime (SFA well)"))
WLs = manual_well$WL[which(manual_well$DateTime <itime + time_mar & manual_well$DateTime > itime - time_mar)]
WL = median(WLs)
selected.wells = rbind(selected.wells, data.frame(WellName = manual_well$WellName[index],
WellNumber = manual_well$WellName[index],
WL = WL,
DateTime = DateTime,
Easting = SFA_wells_ids$Easting[which(SFA_wells_ids$WellName == iwell)],
Northing = SFA_wells_ids$Northing[which(SFA_wells_ids$WellName == iwell)],
stringsAsFactors = F
))
}
}
## sample wells from mvAwln
if (iwell %in% HEIS_auto_wells$WellName) {
# print(paste(iwell, "(mvAwln)"))
auto_well = HEIS_auto_wells[which(HEIS_auto_wells$WellName == iwell), ]
index = which.min(abs(as.numeric(auto_well$procDate - itime)))
DateTime = auto_well$procDate[index]
## find wells having data within given time range
if (DateTime == itime) {
WL = auto_well$procWaterElevation[index]
selected.wells = rbind(selected.wells, data.frame(WellName = auto_well$WellName[index],
WellNumber = auto_well$WellNumber[index],
WL = WL,
DateTime = DateTime,
Easting = HEIS_manual_wells_ids$EASTING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)],
Northing = HEIS_manual_wells_ids$NORTHING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)],
stringsAsFactors = F
))
} else if (DateTime < itime + time_mar & DateTime > itime - time_mar) {
print(paste(iwell,"has wl within 1day of itime (mvAwln well)"))
WLs = auto_well$procWaterElevation[which(auto_well$procDate <itime + time_mar & auto_well$procDate > itime - time_mar)]
WL = median(WLs)
selected.wells = rbind(selected.wells, data.frame(WellName = auto_well$WellName[index],
WellNumber = auto_well$WellNumber[index],
WL = WL,
DateTime = DateTime,
Easting = HEIS_manual_wells_ids$EASTING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)],
Northing = HEIS_manual_wells_ids$NORTHING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)],
stringsAsFactors = F
))
}
}
## sample wells from manual HEIS data
if (iwell %in% HEIS_manual_wells$WellName) {
# print(paste(iwell, "(HEIS manual)"))
manual_well = HEIS_manual_wells[which(HEIS_manual_wells$WellName == iwell), ]
index = which.min(abs(as.numeric(manual_well$procDate - itime)))
DateTime = manual_well$procDate[index]
if (DateTime == itime) {
WL = manual_well$procWaterElevation[index]
selected.wells = rbind(selected.wells, data.frame(WellName = manual_well$WellName[index],
WellNumber = manual_well$WellNumber[index],
WL = WL,
DateTime = DateTime,
Easting = HEIS_manual_wells_ids$EASTING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)],
Northing = HEIS_manual_wells_ids$NORTHING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)],
stringsAsFactors = F
))
} else if (DateTime < itime + time_mar & DateTime > itime - time_mar) {
print(paste(iwell,"has wl within 1day of itime (manual well)"))
WLs = manual_well$procWaterElevation[which(manual_well$procDate <itime + time_mar & manual_well$procDate > itime - time_mar)]
WL = median(WLs)
selected.wells = rbind(selected.wells, data.frame(WellName = manual_well$WellName[index],
WellNumber = manual_well$WellNumber[index],
WL = WL,
DateTime = DateTime,
Easting = HEIS_manual_wells_ids$EASTING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)],
Northing = HEIS_manual_wells_ids$NORTHING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)],
stringsAsFactors = F
))
}
}
## sample wells from USGS
if (iwell %in% USGS_wells_selected$WellNumber) {
# print(paste(iwell, "(USGS)"))
manual_well = USGS_wells_selected[which(USGS_wells_selected$WellNumber == iwell), ]
index = which.min(abs(as.numeric(manual_well$DateTime - itime)))
DateTime = manual_well$DateTime[index]
if (DateTime == itime) {
WL = manual_well$WL[index]
selected.wells = rbind(selected.wells, data.frame(WellName = manual_well$WellNumber[index],
WellNumber = manual_well$WellNumber[index],
WL = WL,
DateTime = DateTime,
Easting = USGS_wells_selected$Easting[index],
Northing = USGS_wells_selected$Northing[index],
stringsAsFactors = F
))
} else if (DateTime < itime + time_mar & DateTime > itime - time_mar) {
print(paste(iwell,"has wl within 1day of itime (USGS well)"))
WLs = manual_well$WL[which(manual_well$DateTime <itime + time_mar & manual_well$DateTime > itime - time_mar)]
WL = median(WLs)
selected.wells = rbind(selected.wells, data.frame(WellName = manual_well$WellNumber[index],
WellNumber = manual_well$WellNumber[index],
WL = WL,
DateTime = DateTime,
Easting = USGS_wells_selected$Easting[index],
Northing = USGS_wells_selected$Northing[index],
stringsAsFactors = F
))
}
}
## sample wells from SFA data
}
selected.wells.unique = selected.wells[!duplicated(selected.wells$WellName), ] # remove duplicated wellNames
selected.wells.unique = selected.wells[!duplicated(selected.wells$Easting), ] # remove duplicated well coords
selected.wells.unique = selected.wells.unique[complete.cases(selected.wells.unique), ] # remove rows contain NAs
selected.wells.df = data.frame(x=selected.wells.unique$Easting, y=selected.wells.unique$Northing, z = selected.wells.unique$WL)
# colnames(data) = c('x','y','z')
selected.wells.df = selected.wells.df[order(selected.wells.df$x),]
# save(selected.wells.df, file = "results/selected.wells.df_2007-4-1.r")
# ##----------------------- plot well head----------------------
# # save(selected.wells.df, file = "results/inital_data_coords.r")
#
#
# plot(selected.wells.df$x, selected.wells.df$y, asp = 1)
# # par(mfrow = c(2, 1))
# s= interp(selected.wells.df$x, selected.wells.df$y, selected.wells.df$z, duplicate = "strip", nx=100, ny=100)
# jpeg(paste("figures/", initial.time, ".jpg", sep = ""), width=8,height=8,units='in',res=300,quality=100)
# image2D(s, shade=0.2, rasterImage = F, NAcol = "gray",
# main = paste(initial.time, "inital head (contour)"), asp = 1, contour = T, add = F
)
#
#
# points(selected.wells.df$x, selected.wells.df$y, col = "white", pch = 1)
# # dev.off()
# load("results/inital_data_coords.r")
# selected.wells.df = data
}
save(selected.wells.df, file = fname.selected.wells.df)
} else {
load(fname.selected.wells.df)
if (dim(selected.wells.df)[1]>2) {
## ---------------use inverse distance interpolation------------------
# geo.data = selected.wells.df
# coordinates(geo.data)= ~x+y
# plot(geo.data)
# x.range <- range.xcoods # min/max longitude of the interpolation area
# y.range <- range.ycoods # min/max latitude of the interpolation area
# grd <- expand.grid(x = seq(from = x.range[1], to = x.range[2], by = idx), y = seq(from = y.range[1],
# to = y.range[2], by = idy)) # expand points to grid
grd = expand.grid(unit_x, unit_y)
# save(grd, file = "results/model_grids.r")
idw.interp = idw(values=selected.wells.df[,"z"],
coords = selected.wells.df[,c("x","y")],
grid=grd,
method="shepard",
p=2)
idw.interp = as.numeric(unlist(idw.interp))
h.initial = array(idw.interp, c(nx, ny))
river.geometry = read.csv(fname_river.geo)
river.geometry = river.geometry[, 2:3]
itime = as.POSIXct("2007-04-01 12:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
jpeg(fname_fig.initialH_idw, width=8,height=8,units='in',res=300,quality=100)
# plot(selected.wells.df$x, selected.wells.df$y, col = "black", pch = 1, asp=1, xlim = c(x.range[1], x.range[2]))
head4plot = h.initial
head4plot[head4plot>200]=200
image2D(z= head4plot, x= unit_x, y= unit_y, shade=0.2, rasterImage = F, NAcol = "white",
main = paste("Initial Head", itime), asp = 1, contour = T, zlim = c(100, 200), xlab = "Easting", ylab = "Northing")
points(selected.wells.df$x, selected.wells.df$y, col = "white", pch = 1, asp=1)
polygon(river.geometry$x, river.geometry$y, border = "gray", asp=1)
dev.off()
}
}
## ------------------- Krige------------------------------------------
# data = as.geodata(data)
##This bins and esimator.type is defined by Xingyuan
# if (nrow(data$coords)>27) {
# # bin1 = variog(data,uvec=c(0,50,100,seq(150,210,30),250,300),trend='cte',bin.cloud=T,estimator.type='modulus')
# bin1 = variog(data, uvec=c(0, 500, 1000, 2000, 2500, 3500, 4500, 5500, seq(6000,60000,100)),trend='cte',bin.cloud=T,estimator.type='modulus', option = "cloud")
# } else {
# bin1 = variog(data,uvec=c(0,100,seq(150,210,30),250,300),trend='cte',bin.cloud=T,estimator.type='modulus')
# }
# initial.values <- expand.grid(max(bin1$v),seq(300))
# wls = variofit(bin1,ini = initial.values,fix.nugget=T,nugget = 0.00001,fix.kappa=F,cov.model='exponential')
#check the varigram
# if (itime %% 1000 == 1) {
# jpeg(filename=paste('figures/Semivariance Time = ',start.time,".jpg", sep=''),
# width=5,height=5,units="in",quality=100,res=300)
# plot(bin1,main = paste('Time = ',start.time, sep=''),col='red', pch = 19, cex = 1, lty = "solid", lwd = 2)
# text(bin1$u,bin1$v,labels=bin1$n, cex= 0.7,pos = 2)
# lines(wls)
# dev.off()
# print(times[itime])
# }
# ## Generate boundary and initial condition
# kc.south = krige.conv(data, loc = pred.grid.south, krige = krige.control(obj.m=wls,type.krige='OK',trend.d='cte',trend.l='cte'))
# kc.north = krige.conv(data, loc = pred.grid.north, krige = krige.control(obj.m=wls,type.krige='OK',trend.d='cte',trend.l='cte'))
# kc.east = krige.conv(data, loc = pred.grid.east, krige = krige.control(obj.m=wls,type.krige='OK',trend.d='cte',trend.l='cte'))
# kc.west = krige.conv(data, loc = pred.grid.west, krige = krige.control(obj.m=wls,type.krige='OK',trend.d='cte',trend.l='cte'))
#
# BC.south = rbind(BC.south,kc.south$predict)
# BC.north = rbind(BC.north,kc.north$predict)
# BC.east = rbind(BC.east,kc.east$predict)
# BC.west = rbind(BC.west,kc.west$predict)
## krige initial head
# if (itime==start.time)
# {
# kc.domain = krige.conv(data, loc = pred.grid.domain, krige = krige.control(obj.m=wls,type.krige='OK',trend.d='cte',trend.l='cte'))
# h.initial = as.vector(kc.domain$predict)
# dim(h.initial) = c(grid.nx,grid.ny)
# }
# jpeg(fname_fig.initialH_krige, width=8,height=8,units='in',res=300,quality=100)
# image2D(z= h.initial, x= unit_x, y= unit_y, shade=0.2, rasterImage = F, NAcol = "white",
# main = paste("Initial Head", start.time), asp = 1)
# dev.off()
# }
# }
##----------------import initial head from Hanford_Reach_2007_Initial.h5------------------
# old_ini = h5read("Inputs/test_2007_age/Hanford_Reach_2007_Initial.h5", name = "Initial_Head/Data")
#
# old_ini = t(old_ini)
#
# old_ini.list = list(x = seq(from = 538000, to = 614000, by = 250), y = seq(from = 97000, to = 164000, by = 250), z = old_ini)
#
# new_ini = interp.surface(old_ini.list, cells_proj)
#
# new_ini[which(is.na(new_ini))] = 110
#
# new_ini = array(new_ini, c(nx,ny))
#
# image2D(z= new_ini, x= unit_x, y= unit_y, shade=0.2, rasterImage = F, NAcol = "white", border = NA, resfac = 3,
# main = c("new_ini"), asp = 1)
# image2D(z= old_ini, x = seq(from = 538000, to = 614000, by = 250), y = seq(from = 97000, to = 164000, by = 250), shade=0.2, rasterImage = F, NAcol = "white",
# main = c("old_ini"), asp = 1)
#
# fname_initial.h5 = "Inputs/HFR_model_200m/HFR_H_Initial_new.h5"
#
# if (file.exists(fname_initial.h5)) {
# file.remove(fname_initial.h5)
# }
# h5createFile(fname_initial.h5)
# h5createGroup(fname_initial.h5,'Initial_Head')
#
# h5write(t(new_ini),fname_initial.h5, ## why tranpose? to match HDF5 format
# 'Initial_Head/Data',level=0)
# fid = H5Fopen(fname_initial.h5)
# h5g = H5Gopen(fid,'/Initial_Head')
# h5writeAttribute(attr = 1.0, h5obj = h5g, name = 'Cell Centered')
# h5writeAttribute.character(attr = "XY", h5obj = h5g, name = 'Dimension')
# h5writeAttribute(attr = c(200, 200), h5obj = h5g, name = 'Discretization')
# h5writeAttribute(attr = 500.0, h5obj = h5g, name = 'Max Buffer Size')
# h5writeAttribute(attr = c(0, 0), h5obj = h5g, name = 'Origin')
# H5Gclose(h5g)
# H5Fclose(fid)
##-----------------------------------------------------------------------------------------------
time.id = avail.time.id
##Generate the initial condition hdf5 file for the domain.
if (file.exists(fname_initial.h5)) {
file.remove(fname_initial.h5)
}
h5createFile(fname_initial.h5)
h5createGroup(fname_initial.h5,'Initial_Head')
h5write(t(h.initial),fname_initial.h5, ## why tranpose? to match HDF5 format
'Initial_Head/Data',level=0)
fid = H5Fopen(fname_initial.h5)
h5g = H5Gopen(fid,'/Initial_Head')
h5writeAttribute(attr = 1.0, h5obj = h5g, name = 'Cell Centered')
h5writeAttribute.character(attr = "XY", h5obj = h5g, name = 'Dimension')
h5writeAttribute(attr = c(200, 200), h5obj = h5g, name = 'Discretization')
h5writeAttribute(attr = 500.0, h5obj = h5g, name = 'Max Buffer Size')
h5writeAttribute(attr = c(0, 0), h5obj = h5g, name = 'Origin')
H5Gclose(h5g)
H5Fclose(fid)
#
# ##Generate the BC hdf5 file.
# if (file.exists(paste(output_folder,BC.h5,sep=''))) {
# file.remove(paste(output_folder,BC.h5,sep=''))
# }
#
# h5createFile(paste(output_folder,BC.h5,sep=''))
#
# ### write data
# h5createGroup(paste(output_folder,BC.h5,sep=''),'BC_South')
# h5write(time.id,paste(output_folder,BC.h5,sep=''),'BC_South/Times',level=0)
# h5write(BC.south,paste(output_folder,BC.h5,sep=''),'BC_South/Data',level=0)
#
# h5createGroup(paste(output_folder,BC.h5,sep=''),'BC_North')
# h5write(time.id,paste(output_folder,BC.h5,sep=''),'BC_North/Times',level=0)
# h5write(BC.north,paste(output_folder,BC.h5,sep=''),'BC_North/Data',level=0)
#
# h5createGroup(paste(output_folder,BC.h5,sep=''),'BC_East')
# h5write(time.id,paste(output_folder,BC.h5,sep=''),'BC_East/Times',level=0)
# h5write(BC.east,paste(output_folder,BC.h5,sep=''),'BC_East/Data',level=0)
#
# h5createGroup(paste(output_folder,BC.h5,sep=''),'BC_West')
# h5write(time.id,paste(output_folder,BC.h5,sep=''),'BC_West/Times',level=0)
# h5write(BC.west,paste(output_folder,BC.h5,sep=''),'BC_West/Data',level=0)
#
# ### write attribute
# fid = H5Fopen(paste(output_folder,BC.h5,sep=''))
# h5g.south = H5Gopen(fid,'/BC_South')
# h5g.north = H5Gopen(fid,'/BC_North')
# h5g.east = H5Gopen(fid,'/BC_East')
# h5g.west = H5Gopen(fid,'/BC_West')
#
#
# h5writeAttribute(attr = 1.0, h5obj = h5g.south, name = 'Cell Centered')
# h5writeAttribute(attr = 'X', h5obj = h5g.south, name = 'Dimension')
# h5writeAttribute(attr = grid.x, h5obj = h5g.south, name = 'Discretization')
# h5writeAttribute(attr = 200.0, h5obj = h5g.south, name = 'Max Buffer Size')
# h5writeAttribute(attr = range_x[1], h5obj = h5g.south, name = 'Origin')
# h5writeAttribute(attr = 'h', h5obj = h5g.south, name = 'Time Units')
# h5writeAttribute(attr = 1.0, h5obj = h5g.south, name = 'Transient')
#
#
# h5writeAttribute(attr = 1.0, h5obj = h5g.north, name = 'Cell Centered')
# h5writeAttribute(attr = 'X', h5obj = h5g.north, name = 'Dimension')
# h5writeAttribute(attr = grid.x, h5obj = h5g.north, name = 'Discretization')
# h5writeAttribute(attr = 200.0, h5obj = h5g.north, name = 'Max Buffer Size')
# h5writeAttribute(attr = range_x[1], h5obj = h5g.north, name = 'Origin')
# h5writeAttribute(attr = 'h', h5obj = h5g.north, name = 'Time Units')
# h5writeAttribute(attr = 1.0, h5obj = h5g.north, name = 'Transient')
#
#
# h5writeAttribute(attr = 1.0, h5obj = h5g.east, name = 'Cell Centered')
# h5writeAttribute(attr = 'Y', h5obj = h5g.east, name = 'Dimension')
# h5writeAttribute(attr = grid.y, h5obj = h5g.east, name = 'Discretization')
# h5writeAttribute(attr = 200.0, h5obj = h5g.east, name = 'Max Buffer Size')
# h5writeAttribute(attr = range_y[1], h5obj = h5g.east, name = 'Origin')
# h5writeAttribute(attr = 'h', h5obj = h5g.east, name = 'Time Units')
# h5writeAttribute(attr = 1.0, h5obj = h5g.east, name = 'Transient')
#
#
# h5writeAttribute(attr = 1.0, h5obj = h5g.west, name = 'Cell Centered')
# h5writeAttribute(attr = 'Y', h5obj = h5g.west, name = 'Dimension')
# h5writeAttribute(attr = grid.y, h5obj = h5g.west, name = 'Discretization')
# h5writeAttribute(attr = 200.0, h5obj = h5g.west, name = 'Max Buffer Size')
# h5writeAttribute(attr = range_y[1], h5obj = h5g.west, name = 'Origin')
# h5writeAttribute(attr = 'h', h5obj = h5g.west, name = 'Time Units')
# h5writeAttribute(attr = 1.0, h5obj = h5g.west, name = 'Transient')
#
#
# H5Gclose(h5g.south)
# H5Gclose(h5g.north)
# H5Gclose(h5g.east)
# H5Gclose(h5g.west)
# H5Fclose(fid)
# save(list=ls(),file=fname_300A.bc.r)
| /70km_reach_model/pre_process/grid_250m_60km/HFR_initial_head.R | no_license | mrubayet/archived_codes_for_sfa_modeling | R | false | false | 38,491 | r | ## #This file is used for calculating transient boundary conditions
## #using universal kriging
###cov_model_sets = c('gaussian','wave','exponential','spherical')
###drift_sets = c(0,1)
setwd("/Users/shua784/Dropbox/PNNL/Projects/Reach_scale_model/")
# rm(list=ls())
library(geoR)
library(rhdf5)
library(ggplot2)
# library(gstat)
library(sp)
library(maptools)
library(phylin)
##------------INPUT----------------##
# source("codes/300A_parameters.R")
H5close()
options(geoR.messages=FALSE)
# input_folder = 'data/headdata4krige_Plume_2008-2017/'
fname_geoFramework.r = "results/geoframework_200m.r"
fname_river.geo = "data/river_geometry_manual.csv"
fname_mvAwln = "/Users/shua784/Dropbox/PNNL/People/From_Patrick/SQL/mvAwln.csv"
fname_mvAwln_id = "/Users/shua784/Dropbox/PNNL/People/From_Patrick/SQL/mvAwln_wellID_updated.csv"
fname_manual_wells_ids = "/Users/shua784/Dropbox/PNNL/People/From_Patrick/SQL/HYDRAULIC_HEAD_MV_WellID.csv"
fname_manual_wells = "/Users/shua784/Dropbox/PNNL/People/From_Patrick/SQL/HYDRAULIC_HEAD_MV.csv"
fname_USGS_wells = "/Users/shua784/Dropbox/PNNL/People/from_Erick/Burns_well_data.csv"
fname_USGS_wells_ids = "/Users/shua784/Dropbox/PNNL/People/from_Erick/Burns_well_attributes.csv"
fname_SFA_wells = "/Users/shua784/Dropbox/PNNL/People/Velo/300A_Well_Data/"
fname_SFA_wells_ids = "/Users/shua784/Dropbox/PNNL/People/Velo/300A_well_coord.csv"
fname_SFA_wells_all = "/Users/shua784/Dropbox/PNNL/People/Velo/SFA_all_wells.csv"
is.plot = F
##--------------OUTPUT---------------------##
fname_initial.h5 = "Inputs/HFR_model_200m/HFR_H_Initial_2007_04_01.h5"
# BC.h5 = "Inputs/HFR_H_BC.h5"
# fname_head.bc.r= "results/HFR_head_BC.r"
fname_wells.r = "results/well_compiled_wl_data.r"
# fname_fig.initalH_contour = "figures/initial_head_150m.jpg"
# fname_fig.initialH_krige = "figures/initial_head_krige.jpg"
fname_fig.initialH_idw = "figures/initial_head_200m_2017-04-01.jpg"
fname.selected.wells.df = "results/selected.wells.df_2007-3-28.r"
load(fname_geoFramework.r)
## for grids
grid.x = idx
grid.y = idy
grid.nx = nx
grid.ny = ny
# pred.grid.south = expand.grid(seq(range_x[1]+grid.x/2,range_x[2],grid.x),range_y[1]+grid.y/2) # for South boundary
# pred.grid.north = expand.grid(seq(range_x[1]+grid.x/2,range_x[2],grid.x),range_y[2]-grid.y/2) # for North boundary
# pred.grid.east = expand.grid(range_x[1]+grid.x/2,seq(range_y[1]+grid.y/2,range_y[2],grid.y)) # for East boundary
# pred.grid.west = expand.grid(range_x[2]-grid.x/2,seq(range_y[1]+grid.y/2,range_y[2],grid.y)) # for West boundary
pred.grid.domain = expand.grid(seq(range_x[1]+grid.x/2,range_x[2],grid.x),
seq(range_y[1]+grid.y/2,range_y[2],grid.y)) # for domain
# colnames(pred.grid.south)=c('x','y')
# colnames(pred.grid.north)=c('x','y')
# colnames(pred.grid.east)=c('x','y')
# colnames(pred.grid.west)=c('x','y')
colnames(pred.grid.domain)=c('x','y')
## time information
# start.time = as.POSIXct("2010-02-27 12:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
# end.time = as.POSIXct("2010-02-28 23:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
# dt = 3600 ##secs
# times = seq(start.time,end.time,dt)
# ntime = length(times)
# time.id = seq(0,ntime-1,dt/3600) ##hourly boundary, why start from 0h?
# origin.time = as.POSIXct("2007-12-31 23:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S") # starting time should be 1 h early than "2008-1-1 0:0:0" to set the right index in folder/headdata4krige_Plume_2008_2017
## BC.south = array(NA,c(ntime,grid.nx))
## BC.north = array(NA,c(ntime,grid.nx))
## BC.east = array(NA,c(ntime,grid.ny))
## BC.west = array(NA,c(ntime,grid.ny))
BC.south = c()
BC.north = c()
BC.east = c()
BC.west = c()
avail.time.id = c()
# ##==================== read into well data ====================
if (!file.exists(fname_wells.r)) {
mvAwln.id = read.csv(fname_mvAwln_id, stringsAsFactors = F)
mvAwln = read.csv(fname_mvAwln, stringsAsFactors = F)
mvAwln.id = transform(mvAwln.id,Easting = as.numeric(Easting),
Northing = as.numeric(Northing))
HEIS_auto_wells = subset(mvAwln, select = c("WellName", "WellNumber", "procWaterElevation", "procDate"))
HEIS_auto_wells = transform(HEIS_auto_wells, WellName = as.character(WellName),
WellNumber = as.character(WellNumber),
procWaterElevation = as.numeric(procWaterElevation),
procDate = as.POSIXct(procDate))
manual_wells_ids = read.csv(fname_manual_wells_ids, stringsAsFactors = F)
manual_wells = read.csv(fname_manual_wells, stringsAsFactors = F)
manual_wells = transform(manual_wells, HYD_DATE_TIME_PST = as.POSIXct(HYD_DATE_TIME_PST))
# HEIS_auto_wells = mvAwln
HEIS_auto_wells_ids = mvAwln.id
HEIS_manual_wells = manual_wells
colnames(HEIS_manual_wells)[1:4] = c("WellNumber", "WellName", "procDate", "procWaterElevation")
HEIS_manual_wells_ids = manual_wells_ids
USGS_wells = read.csv(fname_USGS_wells, stringsAsFactors = F)
USGS_wells_ids = read.csv(fname_USGS_wells_ids, stringsAsFactors = F)
USGS_wells_ids = transform(USGS_wells_ids, CP_ID_NUM = as.character(CP_ID_NUM))
USGS_wells = transform(USGS_wells, CP_NUM = as.character(CP_NUM), DATE = as.POSIXct(DATE))
USGS_wells$WLELEVft88 = USGS_wells$WLELEVft88*0.3048 # convert ft to meter
USGS_wells_ids$X_SP_83FT = USGS_wells_ids$X_SP_83FT*0.3048
USGS_wells_ids$Y_SP_83FT = USGS_wells_ids$Y_SP_83FT*0.3048
colnames(USGS_wells)[1:4] = c("WellNumber", "procDate", "Year_fract", "procWaterElevation")
colnames(USGS_wells_ids)[2:4] = c("WellNumber", "Easting", "Northing")
## select USGS wells
USGS_wells_selected.names = USGS_wells_ids$WellNumber[which(USGS_wells_ids$Easting < range.xcoods[2] & USGS_wells_ids$Easting > range.xcoods[1] &
USGS_wells_ids$Northing < range.ycoods[2] & USGS_wells_ids$Northing > range.ycoods[1])]
USGS_wells_selected = data.frame(WellName = character(), Easting = numeric(),
Northing = numeric(), DateTime = as.POSIXct(character()), WL = numeric(), stringsAsFactors = F)
for (iwell in USGS_wells_selected.names) {
manual_well = USGS_wells[which(USGS_wells$WellNumber == iwell), ]
USGS_wells_selected = rbind(USGS_wells_selected, data.frame(WellNumber = manual_well$WellNumber, WL = manual_well$procWaterElevation,
DateTime = manual_well$procDate,
Easting = rep(USGS_wells_ids$Easting[which(USGS_wells_ids$WellNumber == iwell)], length(manual_well$WellNumber)),
Northing = rep(USGS_wells_ids$Northing[which(USGS_wells_ids$WellNumber == iwell)], length(manual_well$WellNumber)),
stringsAsFactors = F
))
}
## SFA wells
SFA_wells_ids = read.csv(fname_SFA_wells_ids, stringsAsFactors = F)
colnames(SFA_wells_ids)[2] = c("WellName")
# SFA_wells_list=c("399-1-1_3var.csv")
# iwell = SFA_wells_list
if (!file.exists(fname_SFA_wells_all)) {
SFA_wells = data.frame(WellName = as.character(), DateTime = as.POSIXct(character()), Temp = numeric(),
Spc = numeric(), WL = numeric(), stringsAsFactors = F)
SFA_wells_list = list.files(fname_SFA_wells)
for (iwell in SFA_wells_list) {
# iwell = "399-1-1_3var.csv"
iSFA_well = read.csv(paste(fname_SFA_wells, iwell, sep = ""), stringsAsFactors = F)
# iSFA_well = read.csv(paste(fname_SFA_wells, "399-1-1_3var.csv", sep = ""), stringsAsFactors = F)
colnames(iSFA_well) = c("DateTime", "Temp", "Spc", "WL")
if (iwell %in% c("399-5-1_3var.csv", "399-3-19_3var.csv" ) ) {
iSFA_well$DateTime = as.POSIXct(iSFA_well$DateTime, format = "%m/%d/%y %H:%M", tz = "GMT") ## time formate must agree with data-column
} else {
iSFA_well$DateTime = as.POSIXct(iSFA_well$DateTime, format = "%d-%b-%Y %H:%M:%S", tz = "GMT") ## time formate must agree with data-column
}
id_col = data.frame(WellName = rep(gsub("_3var.csv", "", iwell), dim(iSFA_well)[1]), stringsAsFactors = F)
iSFA_well = cbind(id_col, iSFA_well)
SFA_wells = rbind(SFA_wells, iSFA_well, stringsAsFactors =F)
}
# as.POSIXct(strptime(SFA_wells$DateTime[2], "%d-%b-%Y %H:%M:%S"), format = "%d-%m-%Y %H:%M:%S", tz = "GMT")
# SFA_wells$DateTime = as.POSIXct(SFA_wells$DateTime, format = "%d-%b-%Y %H:%M:%S", tz = "GMT") ## time formate must agree with data-column
write.csv(SFA_wells, file = "/Users/shua784/Dropbox/PNNL/People/Velo/SFA_all_wells.csv", row.names = F)
} else {
SFA_wells = read.csv(fname_SFA_wells_all, stringsAsFactors = F)
}
save(list = c("HEIS_auto_wells", "HEIS_auto_wells_ids", "HEIS_manual_wells", "HEIS_manual_wells_ids",
"USGS_wells", "USGS_wells_ids", "USGS_wells_selected", "USGS_wells_selected.names","SFA_wells", "SFA_wells_ids"), file = fname_wells.r)
} else {
load(fname_wells.r)
}
range.xcoods = c(model_origin[1], model_origin[1] + xlen)
range.ycoods = c(model_origin[2], model_origin[2] + ylen)
##-------------------- plot all USGS wells------------------------
if (is.plot) {
USGS_wells_selected.names = USGS_wells_ids$WellNumber[which(USGS_wells_ids$Easting < range.xcoods[2] & USGS_wells_ids$Easting > range.xcoods[1] &
USGS_wells_ids$Northing < range.ycoods[2] & USGS_wells_ids$Northing > range.ycoods[1])]
USGS_wells_selected = data.frame(WellName = character(), Easting = numeric(),
Northing = numeric(), DateTime = as.POSIXct(character()), WL = numeric(), stringsAsFactors = F)
start.time = as.POSIXct("1990-01-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
end.time = as.POSIXct("2011-01-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
jpeg(file="figures/USGS.wells.jpg", width=12, height=16, units="in", res=300)
par(mar =c(4,4,1,1))
plot(0,0,xlim=c(start.time, end.time), ylim = c(100, 305),type = "n", xlab = "Date", ylab = "Water Level (m)",
axes = F, cex=1.5)
box()
colors = rainbow(100)
for (iwell in USGS_wells_selected.names) {
manual_well = USGS_wells[which(USGS_wells$WellNumber == iwell), ]
USGS_wells_selected = rbind(USGS_wells_selected, data.frame(WellNumber = manual_well$WellNumber, WL = manual_well$procWaterElevation,
DateTime = manual_well$procDate,
Easting = rep(USGS_wells_ids$Easting[which(USGS_wells_ids$WellNumber == iwell)], length(manual_well$WellNumber)),
Northing = rep(USGS_wells_ids$Northing[which(USGS_wells_ids$WellNumber == iwell)], length(manual_well$WellNumber)),
stringsAsFactors = F
))
lines(manual_well$procDate, manual_well$procWaterElevation, col= sample(colors), lwd = 1 )
points(manual_well$procDate, manual_well$procWaterElevation, pch=1, cex=1)
axis.POSIXct(1,at=seq(as.Date("1990-01-01 00:00:00",tz="GMT"),
to=as.Date("2011-01-01 00:00:00",tz="GMT"),by="quarter"),
format="%m/%Y",mgp=c(5,1.7,0),cex.axis=1)
axis(2,at=seq(100, 305, 5),mgp=c(5,0.7,0),cex.axis=1)
}
dev.off()
hist(USGS_wells_selected$DateTime, breaks = 1000, freq = T)
##---------------------- plot all east wells----------------------------
east.wells=c()
pattern = c(glob2rx("15N*"),glob2rx("14N*"), glob2rx("13N*"), glob2rx("12N*"),glob2rx("11N*"), glob2rx("10N*"), glob2rx("09N*"))
east.wells = grep(paste(pattern,collapse = "|"), HEIS_manual_wells_ids$WELL_NAME, value = T)
east.wells.data = data.frame(WellName = character(), Easting = numeric(),
Northing = numeric(), DateTime = as.POSIXct(character()), WL = numeric(), stringsAsFactors = F)
start.time = as.POSIXct("1990-01-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
end.time = as.POSIXct("2008-01-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
jpeg(file="figures/east.wells.jpg", width=12, height=16, units="in", res=300)
par(mar =c(4,4,1,1))
plot(0,0,xlim=c(start.time, end.time), ylim = c(150, 305),type = "n", xlab = "Date", ylab = "Water Level (m)",
axes = F, cex=1.5)
box()
colors = rainbow(100)
for (iwell in east.wells) {
manual_well = HEIS_manual_wells[which(HEIS_manual_wells$WellName == iwell), ]
east.wells.data = rbind(east.wells.data, data.frame(WellName = manual_well$WellName, WL = manual_well$procWaterElevation,
DateTime = manual_well$procDate,
Easting = rep(HEIS_manual_wells_ids$EASTING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)], length(manual_well$WellName)),
Northing = rep(HEIS_manual_wells_ids$NORTHING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)], length(manual_well$WellName)),
stringsAsFactors = F
))
lines(manual_well$procDate, manual_well$procWaterElevation, col= sample(colors), lwd = 1 )
points(manual_well$procDate, manual_well$procWaterElevation, pch=1, cex=1)
axis.POSIXct(1,at=seq(as.Date("1990-01-01 00:00:00",tz="GMT"),
to=as.Date("2008-01-01 00:00:00",tz="GMT"),by="quarter"),
format="%m/%Y",mgp=c(5,1.7,0),cex.axis=1)
axis(2,at=seq(150, 305, 5),mgp=c(5,0.7,0),cex.axis=1)
# date.range = range(manual_well$procDate)
#
# print(paste(iwell, "has", length(manual_well$procWaterElevation), "obs. points"))
}
dev.off()
hist(east.wells.data$DateTime, breaks = 1000, freq = T)
##---------------------- plot all HEIS manual wells--------------------
start.time = as.POSIXct("1990-01-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
end.time = as.POSIXct("2017-01-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
jpeg(file="figures/all.manual.wells.jpg", width=12, height=16, units="in", res=300)
par(mar =c(4,4,1,1))
plot(0,0,xlim=c(start.time, end.time), ylim = c(100, 305),type = "n", xlab = "Date", ylab = "Water Level (m)",
axes = F, cex=1.5)
box()
colors = rainbow(100)
for (iwell in well_names) {
manual_well = HEIS_manual_wells[which(HEIS_manual_wells$WellName == iwell), ]
lines(manual_well$procDate, manual_well$procWaterElevation, col= sample(colors), lwd = 1 )
points(manual_well$procDate, manual_well$procWaterElevation, pch=1, cex=1)
axis.POSIXct(1,at=seq(as.Date("1990-01-01 00:00:00",tz="GMT"),
to=as.Date("2017-01-01 00:00:00",tz="GMT"),by="quarter"),
format="%m/%Y",mgp=c(5,1.7,0),cex.axis=1)
axis(2,at=seq(150, 305, 5),mgp=c(5,0.7,0),cex.axis=1)
}
dev.off()
hist.HEIS = hist(HEIS_manual_wells$procDate, breaks = 1000, freq = T)
}
##---------------------------select wells with data at each time stamp-----------------------
if (!file.exists(fname.selected.wells.df)) {
## create empty matrix with Colclasses defined
# initial.time = as.POSIXct("2005-03-29 12:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
# initial.time = as.POSIXct("2007-03-28 12:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
initial.time = as.POSIXct("2007-04-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
# times = initial.time
# min.time = as.POSIXct("2005-01-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
# max.time = as.POSIXct("2010-01-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
# times = seq(min.time, max.time, by = 30*86400)
# times = seq(min.time, max.time, by = "month")
# well_names = unique(HEIS_manual_wells_ids$WELL_NAME)
# USGS_wells_selected.names = USGS_wells_ids$WellNumber[which(USGS_wells_ids$Easting < range.xcoods[2] & USGS_wells_ids$Easting > range.xcoods[1] &
# USGS_wells_ids$Northing < range.ycoods[2] & USGS_wells_ids$Northing > range.ycoods[1])]
well_names = c(unique(HEIS_manual_wells_ids$WELL_NAME), unique(USGS_wells_selected.names), unique(SFA_wells_ids$WellName))
# well_names = c(unique(SFA_wells_ids$WellName)[27:48])
# time_mar = 1*24*3600 #1 day range
time_mar = 15*86400 #15 day range
# times=times[1]
times = initial.time
# itime = times
for (i in 1:length(times)) {
itime = times[i]
print(itime)
selected.wells = data.frame(WellName = character(), WellNumber = character(),
Easting = numeric(),
Northing = numeric(), DateTime = as.POSIXct(character()), WL = numeric(), stringsAsFactors = F)
# well_names = c("699-39-79", "199-D3-2", "199-B2-12", "399-5-1")
# well_names = c("399-1-1")
for (iwell in well_names) {
# iwell = c("199-B2-12")
# iwell = c("199-D3-2")
if (iwell %in% SFA_wells$WellName) {
print(paste(iwell, "(SFA)"))
manual_well = SFA_wells[which(SFA_wells$WellName == iwell), ]
index = which.min(abs(as.numeric(manual_well$DateTime - itime)))
DateTime = manual_well$DateTime[index]
if (DateTime == itime) {
WL = manual_well$WL[index]
selected.wells = rbind(selected.wells, data.frame(WellName = manual_well$WellName[index],
WellNumber = manual_well$WellName[index],
WL = WL,
DateTime = DateTime,
Easting = SFA_wells_ids$Easting[which(SFA_wells_ids$WellName == iwell)],
Northing = SFA_wells_ids$Northing[which(SFA_wells_ids$WellName == iwell)],
stringsAsFactors = F
))
} else if (DateTime < itime + time_mar & DateTime > itime - time_mar) {
print(paste(iwell,"has wl within 1day of itime (SFA well)"))
WLs = manual_well$WL[which(manual_well$DateTime <itime + time_mar & manual_well$DateTime > itime - time_mar)]
WL = median(WLs)
selected.wells = rbind(selected.wells, data.frame(WellName = manual_well$WellName[index],
WellNumber = manual_well$WellName[index],
WL = WL,
DateTime = DateTime,
Easting = SFA_wells_ids$Easting[which(SFA_wells_ids$WellName == iwell)],
Northing = SFA_wells_ids$Northing[which(SFA_wells_ids$WellName == iwell)],
stringsAsFactors = F
))
}
}
## sample wells from mvAwln
if (iwell %in% HEIS_auto_wells$WellName) {
# print(paste(iwell, "(mvAwln)"))
auto_well = HEIS_auto_wells[which(HEIS_auto_wells$WellName == iwell), ]
index = which.min(abs(as.numeric(auto_well$procDate - itime)))
DateTime = auto_well$procDate[index]
## find wells having data within given time range
if (DateTime == itime) {
WL = auto_well$procWaterElevation[index]
selected.wells = rbind(selected.wells, data.frame(WellName = auto_well$WellName[index],
WellNumber = auto_well$WellNumber[index],
WL = WL,
DateTime = DateTime,
Easting = HEIS_manual_wells_ids$EASTING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)],
Northing = HEIS_manual_wells_ids$NORTHING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)],
stringsAsFactors = F
))
} else if (DateTime < itime + time_mar & DateTime > itime - time_mar) {
print(paste(iwell,"has wl within 1day of itime (mvAwln well)"))
WLs = auto_well$procWaterElevation[which(auto_well$procDate <itime + time_mar & auto_well$procDate > itime - time_mar)]
WL = median(WLs)
selected.wells = rbind(selected.wells, data.frame(WellName = auto_well$WellName[index],
WellNumber = auto_well$WellNumber[index],
WL = WL,
DateTime = DateTime,
Easting = HEIS_manual_wells_ids$EASTING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)],
Northing = HEIS_manual_wells_ids$NORTHING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)],
stringsAsFactors = F
))
}
}
## sample wells from manual HEIS data
if (iwell %in% HEIS_manual_wells$WellName) {
# print(paste(iwell, "(HEIS manual)"))
manual_well = HEIS_manual_wells[which(HEIS_manual_wells$WellName == iwell), ]
index = which.min(abs(as.numeric(manual_well$procDate - itime)))
DateTime = manual_well$procDate[index]
if (DateTime == itime) {
WL = manual_well$procWaterElevation[index]
selected.wells = rbind(selected.wells, data.frame(WellName = manual_well$WellName[index],
WellNumber = manual_well$WellNumber[index],
WL = WL,
DateTime = DateTime,
Easting = HEIS_manual_wells_ids$EASTING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)],
Northing = HEIS_manual_wells_ids$NORTHING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)],
stringsAsFactors = F
))
} else if (DateTime < itime + time_mar & DateTime > itime - time_mar) {
print(paste(iwell,"has wl within 1day of itime (manual well)"))
WLs = manual_well$procWaterElevation[which(manual_well$procDate <itime + time_mar & manual_well$procDate > itime - time_mar)]
WL = median(WLs)
selected.wells = rbind(selected.wells, data.frame(WellName = manual_well$WellName[index],
WellNumber = manual_well$WellNumber[index],
WL = WL,
DateTime = DateTime,
Easting = HEIS_manual_wells_ids$EASTING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)],
Northing = HEIS_manual_wells_ids$NORTHING[which(HEIS_manual_wells_ids$WELL_NAME == iwell)],
stringsAsFactors = F
))
}
}
## sample wells from USGS
if (iwell %in% USGS_wells_selected$WellNumber) {
# print(paste(iwell, "(USGS)"))
manual_well = USGS_wells_selected[which(USGS_wells_selected$WellNumber == iwell), ]
index = which.min(abs(as.numeric(manual_well$DateTime - itime)))
DateTime = manual_well$DateTime[index]
if (DateTime == itime) {
WL = manual_well$WL[index]
selected.wells = rbind(selected.wells, data.frame(WellName = manual_well$WellNumber[index],
WellNumber = manual_well$WellNumber[index],
WL = WL,
DateTime = DateTime,
Easting = USGS_wells_selected$Easting[index],
Northing = USGS_wells_selected$Northing[index],
stringsAsFactors = F
))
} else if (DateTime < itime + time_mar & DateTime > itime - time_mar) {
print(paste(iwell,"has wl within 1day of itime (USGS well)"))
WLs = manual_well$WL[which(manual_well$DateTime <itime + time_mar & manual_well$DateTime > itime - time_mar)]
WL = median(WLs)
selected.wells = rbind(selected.wells, data.frame(WellName = manual_well$WellNumber[index],
WellNumber = manual_well$WellNumber[index],
WL = WL,
DateTime = DateTime,
Easting = USGS_wells_selected$Easting[index],
Northing = USGS_wells_selected$Northing[index],
stringsAsFactors = F
))
}
}
## sample wells from SFA data
}
selected.wells.unique = selected.wells[!duplicated(selected.wells$WellName), ] # remove duplicated wellNames
selected.wells.unique = selected.wells[!duplicated(selected.wells$Easting), ] # remove duplicated well coords
selected.wells.unique = selected.wells.unique[complete.cases(selected.wells.unique), ] # remove rows contain NAs
selected.wells.df = data.frame(x=selected.wells.unique$Easting, y=selected.wells.unique$Northing, z = selected.wells.unique$WL)
# colnames(data) = c('x','y','z')
selected.wells.df = selected.wells.df[order(selected.wells.df$x),]
# save(selected.wells.df, file = "results/selected.wells.df_2007-4-1.r")
# ##----------------------- plot well head----------------------
# # save(selected.wells.df, file = "results/inital_data_coords.r")
#
#
# plot(selected.wells.df$x, selected.wells.df$y, asp = 1)
# # par(mfrow = c(2, 1))
# s= interp(selected.wells.df$x, selected.wells.df$y, selected.wells.df$z, duplicate = "strip", nx=100, ny=100)
# jpeg(paste("figures/", initial.time, ".jpg", sep = ""), width=8,height=8,units='in',res=300,quality=100)
# image2D(s, shade=0.2, rasterImage = F, NAcol = "gray",
# main = paste(initial.time, "inital head (contour)"), asp = 1, contour = T, add = F
)
#
#
# points(selected.wells.df$x, selected.wells.df$y, col = "white", pch = 1)
# # dev.off()
# load("results/inital_data_coords.r")
# selected.wells.df = data
}
save(selected.wells.df, file = fname.selected.wells.df)
} else {
load(fname.selected.wells.df)
if (dim(selected.wells.df)[1]>2) {
## ---------------use inverse distance interpolation------------------
# geo.data = selected.wells.df
# coordinates(geo.data)= ~x+y
# plot(geo.data)
# x.range <- range.xcoods # min/max longitude of the interpolation area
# y.range <- range.ycoods # min/max latitude of the interpolation area
# grd <- expand.grid(x = seq(from = x.range[1], to = x.range[2], by = idx), y = seq(from = y.range[1],
# to = y.range[2], by = idy)) # expand points to grid
grd = expand.grid(unit_x, unit_y)
# save(grd, file = "results/model_grids.r")
idw.interp = idw(values=selected.wells.df[,"z"],
coords = selected.wells.df[,c("x","y")],
grid=grd,
method="shepard",
p=2)
idw.interp = as.numeric(unlist(idw.interp))
h.initial = array(idw.interp, c(nx, ny))
river.geometry = read.csv(fname_river.geo)
river.geometry = river.geometry[, 2:3]
itime = as.POSIXct("2007-04-01 12:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
jpeg(fname_fig.initialH_idw, width=8,height=8,units='in',res=300,quality=100)
# plot(selected.wells.df$x, selected.wells.df$y, col = "black", pch = 1, asp=1, xlim = c(x.range[1], x.range[2]))
head4plot = h.initial
head4plot[head4plot>200]=200
image2D(z= head4plot, x= unit_x, y= unit_y, shade=0.2, rasterImage = F, NAcol = "white",
main = paste("Initial Head", itime), asp = 1, contour = T, zlim = c(100, 200), xlab = "Easting", ylab = "Northing")
points(selected.wells.df$x, selected.wells.df$y, col = "white", pch = 1, asp=1)
polygon(river.geometry$x, river.geometry$y, border = "gray", asp=1)
dev.off()
}
}
## ------------------- Krige------------------------------------------
# data = as.geodata(data)
##This bins and esimator.type is defined by Xingyuan
# if (nrow(data$coords)>27) {
# # bin1 = variog(data,uvec=c(0,50,100,seq(150,210,30),250,300),trend='cte',bin.cloud=T,estimator.type='modulus')
# bin1 = variog(data, uvec=c(0, 500, 1000, 2000, 2500, 3500, 4500, 5500, seq(6000,60000,100)),trend='cte',bin.cloud=T,estimator.type='modulus', option = "cloud")
# } else {
# bin1 = variog(data,uvec=c(0,100,seq(150,210,30),250,300),trend='cte',bin.cloud=T,estimator.type='modulus')
# }
# initial.values <- expand.grid(max(bin1$v),seq(300))
# wls = variofit(bin1,ini = initial.values,fix.nugget=T,nugget = 0.00001,fix.kappa=F,cov.model='exponential')
#check the varigram
# if (itime %% 1000 == 1) {
# jpeg(filename=paste('figures/Semivariance Time = ',start.time,".jpg", sep=''),
# width=5,height=5,units="in",quality=100,res=300)
# plot(bin1,main = paste('Time = ',start.time, sep=''),col='red', pch = 19, cex = 1, lty = "solid", lwd = 2)
# text(bin1$u,bin1$v,labels=bin1$n, cex= 0.7,pos = 2)
# lines(wls)
# dev.off()
# print(times[itime])
# }
# ## Generate boundary and initial condition
# kc.south = krige.conv(data, loc = pred.grid.south, krige = krige.control(obj.m=wls,type.krige='OK',trend.d='cte',trend.l='cte'))
# kc.north = krige.conv(data, loc = pred.grid.north, krige = krige.control(obj.m=wls,type.krige='OK',trend.d='cte',trend.l='cte'))
# kc.east = krige.conv(data, loc = pred.grid.east, krige = krige.control(obj.m=wls,type.krige='OK',trend.d='cte',trend.l='cte'))
# kc.west = krige.conv(data, loc = pred.grid.west, krige = krige.control(obj.m=wls,type.krige='OK',trend.d='cte',trend.l='cte'))
#
# BC.south = rbind(BC.south,kc.south$predict)
# BC.north = rbind(BC.north,kc.north$predict)
# BC.east = rbind(BC.east,kc.east$predict)
# BC.west = rbind(BC.west,kc.west$predict)
## krige initial head
# if (itime==start.time)
# {
# kc.domain = krige.conv(data, loc = pred.grid.domain, krige = krige.control(obj.m=wls,type.krige='OK',trend.d='cte',trend.l='cte'))
# h.initial = as.vector(kc.domain$predict)
# dim(h.initial) = c(grid.nx,grid.ny)
# }
# jpeg(fname_fig.initialH_krige, width=8,height=8,units='in',res=300,quality=100)
# image2D(z= h.initial, x= unit_x, y= unit_y, shade=0.2, rasterImage = F, NAcol = "white",
# main = paste("Initial Head", start.time), asp = 1)
# dev.off()
# }
# }
##----------------import initial head from Hanford_Reach_2007_Initial.h5------------------
# old_ini = h5read("Inputs/test_2007_age/Hanford_Reach_2007_Initial.h5", name = "Initial_Head/Data")
#
# old_ini = t(old_ini)
#
# old_ini.list = list(x = seq(from = 538000, to = 614000, by = 250), y = seq(from = 97000, to = 164000, by = 250), z = old_ini)
#
# new_ini = interp.surface(old_ini.list, cells_proj)
#
# new_ini[which(is.na(new_ini))] = 110
#
# new_ini = array(new_ini, c(nx,ny))
#
# image2D(z= new_ini, x= unit_x, y= unit_y, shade=0.2, rasterImage = F, NAcol = "white", border = NA, resfac = 3,
# main = c("new_ini"), asp = 1)
# image2D(z= old_ini, x = seq(from = 538000, to = 614000, by = 250), y = seq(from = 97000, to = 164000, by = 250), shade=0.2, rasterImage = F, NAcol = "white",
# main = c("old_ini"), asp = 1)
#
# fname_initial.h5 = "Inputs/HFR_model_200m/HFR_H_Initial_new.h5"
#
# if (file.exists(fname_initial.h5)) {
# file.remove(fname_initial.h5)
# }
# h5createFile(fname_initial.h5)
# h5createGroup(fname_initial.h5,'Initial_Head')
#
# h5write(t(new_ini),fname_initial.h5, ## why tranpose? to match HDF5 format
# 'Initial_Head/Data',level=0)
# fid = H5Fopen(fname_initial.h5)
# h5g = H5Gopen(fid,'/Initial_Head')
# h5writeAttribute(attr = 1.0, h5obj = h5g, name = 'Cell Centered')
# h5writeAttribute.character(attr = "XY", h5obj = h5g, name = 'Dimension')
# h5writeAttribute(attr = c(200, 200), h5obj = h5g, name = 'Discretization')
# h5writeAttribute(attr = 500.0, h5obj = h5g, name = 'Max Buffer Size')
# h5writeAttribute(attr = c(0, 0), h5obj = h5g, name = 'Origin')
# H5Gclose(h5g)
# H5Fclose(fid)
##-----------------------------------------------------------------------------------------------
time.id = avail.time.id
##Generate the initial condition hdf5 file for the domain.
if (file.exists(fname_initial.h5)) {
file.remove(fname_initial.h5)
}
h5createFile(fname_initial.h5)
h5createGroup(fname_initial.h5,'Initial_Head')
h5write(t(h.initial),fname_initial.h5, ## why tranpose? to match HDF5 format
'Initial_Head/Data',level=0)
fid = H5Fopen(fname_initial.h5)
h5g = H5Gopen(fid,'/Initial_Head')
h5writeAttribute(attr = 1.0, h5obj = h5g, name = 'Cell Centered')
h5writeAttribute.character(attr = "XY", h5obj = h5g, name = 'Dimension')
h5writeAttribute(attr = c(200, 200), h5obj = h5g, name = 'Discretization')
h5writeAttribute(attr = 500.0, h5obj = h5g, name = 'Max Buffer Size')
h5writeAttribute(attr = c(0, 0), h5obj = h5g, name = 'Origin')
H5Gclose(h5g)
H5Fclose(fid)
#
# ##Generate the BC hdf5 file.
# if (file.exists(paste(output_folder,BC.h5,sep=''))) {
# file.remove(paste(output_folder,BC.h5,sep=''))
# }
#
# h5createFile(paste(output_folder,BC.h5,sep=''))
#
# ### write data
# h5createGroup(paste(output_folder,BC.h5,sep=''),'BC_South')
# h5write(time.id,paste(output_folder,BC.h5,sep=''),'BC_South/Times',level=0)
# h5write(BC.south,paste(output_folder,BC.h5,sep=''),'BC_South/Data',level=0)
#
# h5createGroup(paste(output_folder,BC.h5,sep=''),'BC_North')
# h5write(time.id,paste(output_folder,BC.h5,sep=''),'BC_North/Times',level=0)
# h5write(BC.north,paste(output_folder,BC.h5,sep=''),'BC_North/Data',level=0)
#
# h5createGroup(paste(output_folder,BC.h5,sep=''),'BC_East')
# h5write(time.id,paste(output_folder,BC.h5,sep=''),'BC_East/Times',level=0)
# h5write(BC.east,paste(output_folder,BC.h5,sep=''),'BC_East/Data',level=0)
#
# h5createGroup(paste(output_folder,BC.h5,sep=''),'BC_West')
# h5write(time.id,paste(output_folder,BC.h5,sep=''),'BC_West/Times',level=0)
# h5write(BC.west,paste(output_folder,BC.h5,sep=''),'BC_West/Data',level=0)
#
# ### write attribute
# fid = H5Fopen(paste(output_folder,BC.h5,sep=''))
# h5g.south = H5Gopen(fid,'/BC_South')
# h5g.north = H5Gopen(fid,'/BC_North')
# h5g.east = H5Gopen(fid,'/BC_East')
# h5g.west = H5Gopen(fid,'/BC_West')
#
#
# h5writeAttribute(attr = 1.0, h5obj = h5g.south, name = 'Cell Centered')
# h5writeAttribute(attr = 'X', h5obj = h5g.south, name = 'Dimension')
# h5writeAttribute(attr = grid.x, h5obj = h5g.south, name = 'Discretization')
# h5writeAttribute(attr = 200.0, h5obj = h5g.south, name = 'Max Buffer Size')
# h5writeAttribute(attr = range_x[1], h5obj = h5g.south, name = 'Origin')
# h5writeAttribute(attr = 'h', h5obj = h5g.south, name = 'Time Units')
# h5writeAttribute(attr = 1.0, h5obj = h5g.south, name = 'Transient')
#
#
# h5writeAttribute(attr = 1.0, h5obj = h5g.north, name = 'Cell Centered')
# h5writeAttribute(attr = 'X', h5obj = h5g.north, name = 'Dimension')
# h5writeAttribute(attr = grid.x, h5obj = h5g.north, name = 'Discretization')
# h5writeAttribute(attr = 200.0, h5obj = h5g.north, name = 'Max Buffer Size')
# h5writeAttribute(attr = range_x[1], h5obj = h5g.north, name = 'Origin')
# h5writeAttribute(attr = 'h', h5obj = h5g.north, name = 'Time Units')
# h5writeAttribute(attr = 1.0, h5obj = h5g.north, name = 'Transient')
#
#
# h5writeAttribute(attr = 1.0, h5obj = h5g.east, name = 'Cell Centered')
# h5writeAttribute(attr = 'Y', h5obj = h5g.east, name = 'Dimension')
# h5writeAttribute(attr = grid.y, h5obj = h5g.east, name = 'Discretization')
# h5writeAttribute(attr = 200.0, h5obj = h5g.east, name = 'Max Buffer Size')
# h5writeAttribute(attr = range_y[1], h5obj = h5g.east, name = 'Origin')
# h5writeAttribute(attr = 'h', h5obj = h5g.east, name = 'Time Units')
# h5writeAttribute(attr = 1.0, h5obj = h5g.east, name = 'Transient')
#
#
# h5writeAttribute(attr = 1.0, h5obj = h5g.west, name = 'Cell Centered')
# h5writeAttribute(attr = 'Y', h5obj = h5g.west, name = 'Dimension')
# h5writeAttribute(attr = grid.y, h5obj = h5g.west, name = 'Discretization')
# h5writeAttribute(attr = 200.0, h5obj = h5g.west, name = 'Max Buffer Size')
# h5writeAttribute(attr = range_y[1], h5obj = h5g.west, name = 'Origin')
# h5writeAttribute(attr = 'h', h5obj = h5g.west, name = 'Time Units')
# h5writeAttribute(attr = 1.0, h5obj = h5g.west, name = 'Transient')
#
#
# H5Gclose(h5g.south)
# H5Gclose(h5g.north)
# H5Gclose(h5g.east)
# H5Gclose(h5g.west)
# H5Fclose(fid)
# save(list=ls(),file=fname_300A.bc.r)
|
#To disable the line, simply add # at the front.
#training
i <- 1
trainxone <- list()
for(i in 1:60000){
trainxone[[i]] <- image_read(trainxjpeg[[i]], density = NULL, depth = NULL, strip = FALSE)
image_reducenoise(trainxone[i])
image_contrast(trainxone[i], sharpen = 1)
image_normalize(trainxone[i])
image_equalize(trainxone[i])
image_quantize(trainxone[i])
image_convert(trainxone[[i]], "jpg")
i <- i + 1
}
#testing
i <- 1
testxone <- list()
for(i in 1:10000){
testxone[[i]] <- image_read(testxjpeg[[i]], density = NULL, depth = NULL, strip = FALSE)
image_reducenoise(testxone[i])
image_contrast(testxone[i], sharpen = 1)
image_normalize(testxone[i])
image_equalize(testxone[i])
image_quantize(testxone[i])
image_convert(testxone[[i]], "jpg")
i <- i + 1
} | /imagemagick.R | permissive | ZH1275/computer-vision-image-processing-mnist | R | false | false | 789 | r | #To disable the line, simply add # at the front.
#training
i <- 1
trainxone <- list()
for(i in 1:60000){
trainxone[[i]] <- image_read(trainxjpeg[[i]], density = NULL, depth = NULL, strip = FALSE)
image_reducenoise(trainxone[i])
image_contrast(trainxone[i], sharpen = 1)
image_normalize(trainxone[i])
image_equalize(trainxone[i])
image_quantize(trainxone[i])
image_convert(trainxone[[i]], "jpg")
i <- i + 1
}
#testing
i <- 1
testxone <- list()
for(i in 1:10000){
testxone[[i]] <- image_read(testxjpeg[[i]], density = NULL, depth = NULL, strip = FALSE)
image_reducenoise(testxone[i])
image_contrast(testxone[i], sharpen = 1)
image_normalize(testxone[i])
image_equalize(testxone[i])
image_quantize(testxone[i])
image_convert(testxone[[i]], "jpg")
i <- i + 1
} |
t2002<-Newpop
t2003<-projection(allsurv2002,allEMflow2002,allImm2002,allinm2002,alloutm2002,allfert2002,mixingmatrix,bm,bf,data.frame(t2002),GORSlist2)
t2004<-projection(allsurv2003,allEMflow2003,allImm2003,allinm2003,alloutm2003,allfert2003,mixingmatrix,bm,bf,data.frame(t2003[1]),GORSlist2)
t2005<-projection(allsurv2004,allEMflow2004,allImm2004,allinm2004,alloutm2004,allfert2004,mixingmatrix,bm,bf,data.frame(t2004[1]),GORSlist2)
t2006<-projection(allsurv2005,allEMflow2005,allImm2005,allinm2005,alloutm2005,allfert2005,mixingmatrix,bm,bf,data.frame(t2005[1]),GORSlist2)
t2007<-projection(allsurv2006,allEMflow2006,allImm2006,allinm2006,alloutm2006,allfert2006,mixingmatrix,bm,bf,data.frame(t2006[1]),GORSlist2)
t2008<-projection(allsurv2007,allEMflow2007,allImm2007,allinm2007,alloutm2007,allfert2007,mixingmatrix,bm,bf,data.frame(t2007[1]),GORSlist2)
t2009<-projection(allsurv2008,allEMflow2008,allImm2008,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2008[1]),GORSlist2)
t2010<-projection(allsurv2009,allEMflow2009,allImm2009,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2009[1]),GORSlist2)
t2011<-projection(allsurv2010,allEMflow2010,allImm2010,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2010[1]),GORSlist2)
t2012<-projection(allsurv2011,allEMflow2011,allImm2011,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2011[1]),GORSlist2)
t2013<-projection(allsurv2012,allEMflow2012,allImm2012,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2012[1]),GORSlist2)
t2014<-projection(allsurv2013,allEMflow2013,allImm2013,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2013[1]),GORSlist2)
t2015<-projection(allsurv2014,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2014[1]),GORSlist2)
t2016<-projection(allsurv2015,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2015[1]),GORSlist2)
t2017<-projection(allsurv2016,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2016[1]),GORSlist2)
t2018<-projection(allsurv2017,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2017[1]),GORSlist2)
t2019<-projection(allsurv2018,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2018[1]),GORSlist2)
t2020<-projection(allsurv2019,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2019[1]),GORSlist2)
t2021<-projection(allsurv2020,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2020[1]),GORSlist2)
t2022<-projection(allsurv2021,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2021[1]),GORSlist2)
t2023<-projection(allsurv2022,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2022[1]),GORSlist2)
t2024<-projection(allsurv2023,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2023[1]),GORSlist2)
t2025<-projection(allsurv2024,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2024[1]),GORSlist2)
t2026<-projection(allsurv2025,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2025[1]),GORSlist2)
t2027<-projection(allsurv2026,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2026[1]),GORSlist2)
t2028<-projection(allsurv2027,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2027[1]),GORSlist2)
t2029<-projection(allsurv2028,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2028[1]),GORSlist2)
t2030<-projection(allsurv2029,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2029[1]),GORSlist2)
t2031<-projection(allsurv2030,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2030[1]),GORSlist2)
t2032<-projection(allsurv2031,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2031[1]),GORSlist2)
t2033<-projection(allsurv2032,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2032[1]),GORSlist2)
t2034<-projection(allsurv2033,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2033[1]),GORSlist2)
t2035<-projection(allsurv2034,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2034[1]),GORSlist2)
t2036<-projection(allsurv2035,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2035[1]),GORSlist2)
t2037<-projection(allsurv2036,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2036[1]),GORSlist2)
t2038<-projection(allsurv2037,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2037[1]),GORSlist2)
t2039<-projection(allsurv2038,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2038[1]),GORSlist2)
t2040<-projection(allsurv2039,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2039[1]),GORSlist2)
t2041<-projection(allsurv2040,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2040[1]),GORSlist2)
t2042<-projection(allsurv2041,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2041[1]),GORSlist2)
t2043<-projection(allsurv2042,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2042[1]),GORSlist2)
t2044<-projection(allsurv2043,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2043[1]),GORSlist2)
t2045<-projection(allsurv2044,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2044[1]),GORSlist2)
t2046<-projection(allsurv2045,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2045[1]),GORSlist2)
t2047<-projection(allsurv2046,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2046[1]),GORSlist2)
t2048<-projection(allsurv2047,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2047[1]),GORSlist2)
t2049<-projection(allsurv2048,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2048[1]),GORSlist2)
t2050<-projection(allsurv2049,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2049[1]),GORSlist2)
t2051<-projection(allsurv2050,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2050[1]),GORSlist2)
compnames<-c("Deaths","Births","Immig","Emig","out.mig","fininmig","Startpop","Endpop")
comp2002<-data.frame(components)
comp2003<-data.frame(t2003[2])
comp2004<-data.frame(t2004[2])
comp2005<-data.frame(t2005[2])
comp2006<-data.frame(t2006[2])
comp2007<-data.frame(t2007[2])
comp2008<-data.frame(t2008[2])
comp2009<-data.frame(t2009[2])
comp2010<-data.frame(t2010[2])
comp2011<-data.frame(t2011[2])
comp2012<-data.frame(t2012[2])
comp2013<-data.frame(t2013[2])
comp2014<-data.frame(t2014[2])
comp2015<-data.frame(t2015[2])
comp2016<-data.frame(t2016[2])
comp2017<-data.frame(t2017[2])
comp2018<-data.frame(t2018[2])
comp2019<-data.frame(t2019[2])
comp2020<-data.frame(t2020[2])
comp2021<-data.frame(t2021[2])
comp2022<-data.frame(t2022[2])
comp2023<-data.frame(t2023[2])
comp2024<-data.frame(t2024[2])
comp2025<-data.frame(t2025[2])
comp2026<-data.frame(t2026[2])
comp2027<-data.frame(t2027[2])
comp2028<-data.frame(t2028[2])
comp2029<-data.frame(t2029[2])
comp2030<-data.frame(t2030[2])
comp2031<-data.frame(t2031[2])
comp2032<-data.frame(t2032[2])
comp2033<-data.frame(t2033[2])
comp2034<-data.frame(t2034[2])
comp2035<-data.frame(t2035[2])
comp2036<-data.frame(t2036[2])
comp2037<-data.frame(t2037[2])
comp2038<-data.frame(t2038[2])
comp2039<-data.frame(t2039[2])
comp2040<-data.frame(t2040[2])
comp2041<-data.frame(t2041[2])
comp2042<-data.frame(t2042[2])
comp2043<-data.frame(t2043[2])
comp2044<-data.frame(t2044[2])
comp2045<-data.frame(t2045[2])
comp2046<-data.frame(t2046[2])
comp2047<-data.frame(t2047[2])
comp2048<-data.frame(t2048[2])
comp2049<-data.frame(t2049[2])
comp2050<-data.frame(t2050[2])
comp2051<-data.frame(t2051[2])
colnames(comp2002)<-compnames
colnames(comp2003)<-compnames
colnames(comp2004)<-compnames
colnames(comp2005)<-compnames
colnames(comp2006)<-compnames
colnames(comp2007)<-compnames
colnames(comp2008)<-compnames
colnames(comp2009)<-compnames
colnames(comp2010)<-compnames
colnames(comp2011)<-compnames
colnames(comp2012)<-compnames
colnames(comp2013)<-compnames
colnames(comp2014)<-compnames
colnames(comp2015)<-compnames
colnames(comp2016)<-compnames
colnames(comp2017)<-compnames
colnames(comp2018)<-compnames
colnames(comp2019)<-compnames
colnames(comp2020)<-compnames
colnames(comp2021)<-compnames
colnames(comp2022)<-compnames
colnames(comp2023)<-compnames
colnames(comp2024)<-compnames
colnames(comp2025)<-compnames
colnames(comp2026)<-compnames
colnames(comp2027)<-compnames
colnames(comp2028)<-compnames
colnames(comp2029)<-compnames
colnames(comp2030)<-compnames
colnames(comp2031)<-compnames
colnames(comp2032)<-compnames
colnames(comp2033)<-compnames
colnames(comp2034)<-compnames
colnames(comp2035)<-compnames
colnames(comp2036)<-compnames
colnames(comp2037)<-compnames
colnames(comp2038)<-compnames
colnames(comp2039)<-compnames
colnames(comp2040)<-compnames
colnames(comp2041)<-compnames
colnames(comp2042)<-compnames
colnames(comp2043)<-compnames
colnames(comp2044)<-compnames
colnames(comp2045)<-compnames
colnames(comp2046)<-compnames
colnames(comp2047)<-compnames
colnames(comp2048)<-compnames
colnames(comp2049)<-compnames
colnames(comp2050)<-compnames
colnames(comp2051)<-compnames
Deaths0151<-cbind(comp2002$Deaths,
comp2003$Deaths,
comp2004$Deaths,
comp2005$Deaths,
comp2006$Deaths,
comp2007$Deaths,
comp2008$Deaths,
comp2009$Deaths,
comp2010$Deaths,
comp2011$Deaths,
comp2012$Deaths,
comp2013$Deaths,
comp2014$Deaths,
comp2015$Deaths,
comp2016$Deaths,
comp2017$Deaths,
comp2018$Deaths,
comp2019$Deaths,
comp2020$Deaths,
comp2021$Deaths,
comp2022$Deaths,
comp2023$Deaths,
comp2024$Deaths,
comp2025$Deaths,
comp2026$Deaths,
comp2027$Deaths,
comp2028$Deaths,
comp2029$Deaths,
comp2030$Deaths,
comp2031$Deaths,
comp2032$Deaths,
comp2033$Deaths,
comp2034$Deaths,
comp2035$Deaths,
comp2036$Deaths,
comp2037$Deaths,
comp2038$Deaths,
comp2039$Deaths,
comp2040$Deaths,
comp2041$Deaths,
comp2042$Deaths,
comp2043$Deaths,
comp2044$Deaths,
comp2045$Deaths,
comp2046$Deaths,
comp2047$Deaths,
comp2048$Deaths,
comp2049$Deaths,
comp2050$Deaths,
comp2051$Deaths)
Births0151<-cbind(comp2002$Births,
comp2003$Births,
comp2004$Births,
comp2005$Births,
comp2006$Births,
comp2007$Births,
comp2008$Births,
comp2009$Births,
comp2010$Births,
comp2011$Births,
comp2012$Births,
comp2013$Births,
comp2014$Births,
comp2015$Births,
comp2016$Births,
comp2017$Births,
comp2018$Births,
comp2019$Births,
comp2020$Births,
comp2021$Births,
comp2022$Births,
comp2023$Births,
comp2024$Births,
comp2025$Births,
comp2026$Births,
comp2027$Births,
comp2028$Births,
comp2029$Births,
comp2030$Births,
comp2031$Births,
comp2032$Births,
comp2033$Births,
comp2034$Births,
comp2035$Births,
comp2036$Births,
comp2037$Births,
comp2038$Births,
comp2039$Births,
comp2040$Births,
comp2041$Births,
comp2042$Births,
comp2043$Births,
comp2044$Births,
comp2045$Births,
comp2046$Births,
comp2047$Births,
comp2048$Births,
comp2049$Births,
comp2050$Births,
comp2051$Births)
Immig0151<-cbind(comp2002$Immig,
comp2003$Immig,
comp2004$Immig,
comp2005$Immig,
comp2006$Immig,
comp2007$Immig,
comp2008$Immig,
comp2009$Immig,
comp2010$Immig,
comp2011$Immig,
comp2012$Immig,
comp2013$Immig,
comp2014$Immig,
comp2015$Immig,
comp2016$Immig,
comp2017$Immig,
comp2018$Immig,
comp2019$Immig,
comp2020$Immig,
comp2021$Immig,
comp2022$Immig,
comp2023$Immig,
comp2024$Immig,
comp2025$Immig,
comp2026$Immig,
comp2027$Immig,
comp2028$Immig,
comp2029$Immig,
comp2030$Immig,
comp2031$Immig,
comp2032$Immig,
comp2033$Immig,
comp2034$Immig,
comp2035$Immig,
comp2036$Immig,
comp2037$Immig,
comp2038$Immig,
comp2039$Immig,
comp2040$Immig,
comp2041$Immig,
comp2042$Immig,
comp2043$Immig,
comp2044$Immig,
comp2045$Immig,
comp2046$Immig,
comp2047$Immig,
comp2048$Immig,
comp2049$Immig,
comp2050$Immig,
comp2051$Immig)
Emig0151<-cbind(comp2002$Emig,
comp2003$Emig,
comp2004$Emig,
comp2005$Emig,
comp2006$Emig,
comp2007$Emig,
comp2008$Emig,
comp2009$Emig,
comp2010$Emig,
comp2011$Emig,
comp2012$Emig,
comp2013$Emig,
comp2014$Emig,
comp2015$Emig,
comp2016$Emig,
comp2017$Emig,
comp2018$Emig,
comp2019$Emig,
comp2020$Emig,
comp2021$Emig,
comp2022$Emig,
comp2023$Emig,
comp2024$Emig,
comp2025$Emig,
comp2026$Emig,
comp2027$Emig,
comp2028$Emig,
comp2029$Emig,
comp2030$Emig,
comp2031$Emig,
comp2032$Emig,
comp2033$Emig,
comp2034$Emig,
comp2035$Emig,
comp2036$Emig,
comp2037$Emig,
comp2038$Emig,
comp2039$Emig,
comp2040$Emig,
comp2041$Emig,
comp2042$Emig,
comp2043$Emig,
comp2044$Emig,
comp2045$Emig,
comp2046$Emig,
comp2047$Emig,
comp2048$Emig,
comp2049$Emig,
comp2050$Emig,
comp2051$Emig)
out.mig0151<-cbind(comp2002$out.mig,
comp2003$out.mig,
comp2004$out.mig,
comp2005$out.mig,
comp2006$out.mig,
comp2007$out.mig,
comp2008$out.mig,
comp2009$out.mig,
comp2010$out.mig,
comp2011$out.mig,
comp2012$out.mig,
comp2013$out.mig,
comp2014$out.mig,
comp2015$out.mig,
comp2016$out.mig,
comp2017$out.mig,
comp2018$out.mig,
comp2019$out.mig,
comp2020$out.mig,
comp2021$out.mig,
comp2022$out.mig,
comp2023$out.mig,
comp2024$out.mig,
comp2025$out.mig,
comp2026$out.mig,
comp2027$out.mig,
comp2028$out.mig,
comp2029$out.mig,
comp2030$out.mig,
comp2031$out.mig,
comp2032$out.mig,
comp2033$out.mig,
comp2034$out.mig,
comp2035$out.mig,
comp2036$out.mig,
comp2037$out.mig,
comp2038$out.mig,
comp2039$out.mig,
comp2040$out.mig,
comp2041$out.mig,
comp2042$out.mig,
comp2043$out.mig,
comp2044$out.mig,
comp2045$out.mig,
comp2046$out.mig,
comp2047$out.mig,
comp2048$out.mig,
comp2049$out.mig,
comp2050$out.mig,
comp2051$out.mig)
fininmig0151<-cbind(comp2002$fininmig,
comp2003$fininmig,
comp2004$fininmig,
comp2005$fininmig,
comp2006$fininmig,
comp2007$fininmig,
comp2008$fininmig,
comp2009$fininmig,
comp2010$fininmig,
comp2011$fininmig,
comp2012$fininmig,
comp2013$fininmig,
comp2014$fininmig,
comp2015$fininmig,
comp2016$fininmig,
comp2017$fininmig,
comp2018$fininmig,
comp2019$fininmig,
comp2020$fininmig,
comp2021$fininmig,
comp2022$fininmig,
comp2023$fininmig,
comp2024$fininmig,
comp2025$fininmig,
comp2026$fininmig,
comp2027$fininmig,
comp2028$fininmig,
comp2029$fininmig,
comp2030$fininmig,
comp2031$fininmig,
comp2032$fininmig,
comp2033$fininmig,
comp2034$fininmig,
comp2035$fininmig,
comp2036$fininmig,
comp2037$fininmig,
comp2038$fininmig,
comp2039$fininmig,
comp2040$fininmig,
comp2041$fininmig,
comp2042$fininmig,
comp2043$fininmig,
comp2044$fininmig,
comp2045$fininmig,
comp2046$fininmig,
comp2047$fininmig,
comp2048$fininmig,
comp2049$fininmig,
comp2050$fininmig,
comp2051$fininmig)
Startpop0151<-cbind(comp2002$Startpop,
comp2003$Startpop,
comp2004$Startpop,
comp2005$Startpop,
comp2006$Startpop,
comp2007$Startpop,
comp2008$Startpop,
comp2009$Startpop,
comp2010$Startpop,
comp2011$Startpop,
comp2012$Startpop,
comp2013$Startpop,
comp2014$Startpop,
comp2015$Startpop,
comp2016$Startpop,
comp2017$Startpop,
comp2018$Startpop,
comp2019$Startpop,
comp2020$Startpop,
comp2021$Startpop,
comp2022$Startpop,
comp2023$Startpop,
comp2024$Startpop,
comp2025$Startpop,
comp2026$Startpop,
comp2027$Startpop,
comp2028$Startpop,
comp2029$Startpop,
comp2030$Startpop,
comp2031$Startpop,
comp2032$Startpop,
comp2033$Startpop,
comp2034$Startpop,
comp2035$Startpop,
comp2036$Startpop,
comp2037$Startpop,
comp2038$Startpop,
comp2039$Startpop,
comp2040$Startpop,
comp2041$Startpop,
comp2042$Startpop,
comp2043$Startpop,
comp2044$Startpop,
comp2045$Startpop,
comp2046$Startpop,
comp2047$Startpop,
comp2048$Startpop,
comp2049$Startpop,
comp2050$Startpop,
comp2051$Startpop)
Endpop0151<-cbind(comp2002$Endpop,
comp2003$Endpop,
comp2004$Endpop,
comp2005$Endpop,
comp2006$Endpop,
comp2007$Endpop,
comp2008$Endpop,
comp2009$Endpop,
comp2010$Endpop,
comp2011$Endpop,
comp2012$Endpop,
comp2013$Endpop,
comp2014$Endpop,
comp2015$Endpop,
comp2016$Endpop,
comp2017$Endpop,
comp2018$Endpop,
comp2019$Endpop,
comp2020$Endpop,
comp2021$Endpop,
comp2022$Endpop,
comp2023$Endpop,
comp2024$Endpop,
comp2025$Endpop,
comp2026$Endpop,
comp2027$Endpop,
comp2028$Endpop,
comp2029$Endpop,
comp2030$Endpop,
comp2031$Endpop,
comp2032$Endpop,
comp2033$Endpop,
comp2034$Endpop,
comp2035$Endpop,
comp2036$Endpop,
comp2037$Endpop,
comp2038$Endpop,
comp2039$Endpop,
comp2040$Endpop,
comp2041$Endpop,
comp2042$Endpop,
comp2043$Endpop,
comp2044$Endpop,
comp2045$Endpop,
comp2046$Endpop,
comp2047$Endpop,
comp2048$Endpop,
comp2049$Endpop,
comp2050$Endpop,
comp2051$Endpop)
colnames(Deaths0151)<-2002:2051
colnames(Births0151)<-2002:2051
colnames(Immig0151)<-2002:2051
colnames(Emig0151)<-2002:2051
colnames(fininmig0151)<-2002:2051
colnames(Startpop0151)<-2002:2051
colnames(Endpop0151)<-2002:2051
colnames(out.mig0151)<-2002:2051
# par(mfrow=c(4,4))
# par(mar=c(1,2,2,1))
#
# for(i in 1:16) plot(Births0151[i,], col=i)
# par(mfrow=c(4,4))
# par(mar=c(1,2,2,1))
# for(i in 1:16) plot(out.mig0151[i,], col=i)
# par(mfrow=c(4,4))
# par(mar=c(1,2,2,1))
# for(i in 1:16) plot(Deaths[i,], col=i)
#for i in 1:16
#plot(Births0151b[1,])
#points(Deaths[1,])
#par(mfrow=c(2,4))
#plot(Deaths[1,])
#plot(Deaths[2,])
#plot(Deaths[3,])
#plot(Deaths[4,])
#plot(Deaths[5,])
#plot(Deaths[6,])
#plot(Deaths[7,])
#plot(Deaths[8,])
#plot(pop20012051)
pop20012051<-c(sum(population),
sum(t2002),
sum(data.frame(t2003[1])),
sum(data.frame(t2004[1])),
sum(data.frame(t2005[1])),
sum(data.frame(t2006[1])),
sum(data.frame(t2007[1])),
sum(data.frame(t2008[1])),
sum(data.frame(t2009[1])),
sum(data.frame(t2010[1])),
sum(data.frame(t2011[1])),
sum(data.frame(t2012[1])),
sum(data.frame(t2013[1])),
sum(data.frame(t2014[1])),
sum(data.frame(t2015[1])),
sum(data.frame(t2016[1])),
sum(data.frame(t2017[1])),
sum(data.frame(t2018[1])),
sum(data.frame(t2019[1])),
sum(data.frame(t2020[1])),
sum(data.frame(t2021[1])),
sum(data.frame(t2022[1])),
sum(data.frame(t2023[1])),
sum(data.frame(t2024[1])),
sum(data.frame(t2025[1])),
sum(data.frame(t2026[1])),
sum(data.frame(t2027[1])),
sum(data.frame(t2028[1])),
sum(data.frame(t2029[1])),
sum(data.frame(t2030[1])),
sum(data.frame(t2031[1])),
sum(data.frame(t2032[1])),
sum(data.frame(t2033[1])),
sum(data.frame(t2034[1])),
sum(data.frame(t2035[1])),
sum(data.frame(t2036[1])),
sum(data.frame(t2037[1])),
sum(data.frame(t2038[1])),
sum(data.frame(t2039[1])),
sum(data.frame(t2040[1])),
sum(data.frame(t2041[1])),
sum(data.frame(t2042[1])),
sum(data.frame(t2043[1])),
sum(data.frame(t2044[1])),
sum(data.frame(t2045[1])),
sum(data.frame(t2046[1])),
sum(data.frame(t2047[1])),
sum(data.frame(t2048[1])),
sum(data.frame(t2049[1])),
sum(data.frame(t2050[1])),
sum(data.frame(t2051[1])))
write.csv(pop20012051,"TRENDEFV220012051.csv")
| /Runmodel_em_trend_comp_EF_flow_li.r | no_license | ETHPOP/BSPS2015Model | R | false | false | 19,330 | r | t2002<-Newpop
t2003<-projection(allsurv2002,allEMflow2002,allImm2002,allinm2002,alloutm2002,allfert2002,mixingmatrix,bm,bf,data.frame(t2002),GORSlist2)
t2004<-projection(allsurv2003,allEMflow2003,allImm2003,allinm2003,alloutm2003,allfert2003,mixingmatrix,bm,bf,data.frame(t2003[1]),GORSlist2)
t2005<-projection(allsurv2004,allEMflow2004,allImm2004,allinm2004,alloutm2004,allfert2004,mixingmatrix,bm,bf,data.frame(t2004[1]),GORSlist2)
t2006<-projection(allsurv2005,allEMflow2005,allImm2005,allinm2005,alloutm2005,allfert2005,mixingmatrix,bm,bf,data.frame(t2005[1]),GORSlist2)
t2007<-projection(allsurv2006,allEMflow2006,allImm2006,allinm2006,alloutm2006,allfert2006,mixingmatrix,bm,bf,data.frame(t2006[1]),GORSlist2)
t2008<-projection(allsurv2007,allEMflow2007,allImm2007,allinm2007,alloutm2007,allfert2007,mixingmatrix,bm,bf,data.frame(t2007[1]),GORSlist2)
t2009<-projection(allsurv2008,allEMflow2008,allImm2008,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2008[1]),GORSlist2)
t2010<-projection(allsurv2009,allEMflow2009,allImm2009,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2009[1]),GORSlist2)
t2011<-projection(allsurv2010,allEMflow2010,allImm2010,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2010[1]),GORSlist2)
t2012<-projection(allsurv2011,allEMflow2011,allImm2011,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2011[1]),GORSlist2)
t2013<-projection(allsurv2012,allEMflow2012,allImm2012,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2012[1]),GORSlist2)
t2014<-projection(allsurv2013,allEMflow2013,allImm2013,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2013[1]),GORSlist2)
t2015<-projection(allsurv2014,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2014[1]),GORSlist2)
t2016<-projection(allsurv2015,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2015[1]),GORSlist2)
t2017<-projection(allsurv2016,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2016[1]),GORSlist2)
t2018<-projection(allsurv2017,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2017[1]),GORSlist2)
t2019<-projection(allsurv2018,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2018[1]),GORSlist2)
t2020<-projection(allsurv2019,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2019[1]),GORSlist2)
t2021<-projection(allsurv2020,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2020[1]),GORSlist2)
t2022<-projection(allsurv2021,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2021[1]),GORSlist2)
t2023<-projection(allsurv2022,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2022[1]),GORSlist2)
t2024<-projection(allsurv2023,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2023[1]),GORSlist2)
t2025<-projection(allsurv2024,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2024[1]),GORSlist2)
t2026<-projection(allsurv2025,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2025[1]),GORSlist2)
t2027<-projection(allsurv2026,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2026[1]),GORSlist2)
t2028<-projection(allsurv2027,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2027[1]),GORSlist2)
t2029<-projection(allsurv2028,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2028[1]),GORSlist2)
t2030<-projection(allsurv2029,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2029[1]),GORSlist2)
t2031<-projection(allsurv2030,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2030[1]),GORSlist2)
t2032<-projection(allsurv2031,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2031[1]),GORSlist2)
t2033<-projection(allsurv2032,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2032[1]),GORSlist2)
t2034<-projection(allsurv2033,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2033[1]),GORSlist2)
t2035<-projection(allsurv2034,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2034[1]),GORSlist2)
t2036<-projection(allsurv2035,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2035[1]),GORSlist2)
t2037<-projection(allsurv2036,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2036[1]),GORSlist2)
t2038<-projection(allsurv2037,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2037[1]),GORSlist2)
t2039<-projection(allsurv2038,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2038[1]),GORSlist2)
t2040<-projection(allsurv2039,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2039[1]),GORSlist2)
t2041<-projection(allsurv2040,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2040[1]),GORSlist2)
t2042<-projection(allsurv2041,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2041[1]),GORSlist2)
t2043<-projection(allsurv2042,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2042[1]),GORSlist2)
t2044<-projection(allsurv2043,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2043[1]),GORSlist2)
t2045<-projection(allsurv2044,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2044[1]),GORSlist2)
t2046<-projection(allsurv2045,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2045[1]),GORSlist2)
t2047<-projection(allsurv2046,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2046[1]),GORSlist2)
t2048<-projection(allsurv2047,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2047[1]),GORSlist2)
t2049<-projection(allsurv2048,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2048[1]),GORSlist2)
t2050<-projection(allsurv2049,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2049[1]),GORSlist2)
t2051<-projection(allsurv2050,allEMflow2014,allImm2014,allinm2007,alloutm2007,allfert2008,mixingmatrix,bm,bf,data.frame(t2050[1]),GORSlist2)
compnames<-c("Deaths","Births","Immig","Emig","out.mig","fininmig","Startpop","Endpop")
comp2002<-data.frame(components)
comp2003<-data.frame(t2003[2])
comp2004<-data.frame(t2004[2])
comp2005<-data.frame(t2005[2])
comp2006<-data.frame(t2006[2])
comp2007<-data.frame(t2007[2])
comp2008<-data.frame(t2008[2])
comp2009<-data.frame(t2009[2])
comp2010<-data.frame(t2010[2])
comp2011<-data.frame(t2011[2])
comp2012<-data.frame(t2012[2])
comp2013<-data.frame(t2013[2])
comp2014<-data.frame(t2014[2])
comp2015<-data.frame(t2015[2])
comp2016<-data.frame(t2016[2])
comp2017<-data.frame(t2017[2])
comp2018<-data.frame(t2018[2])
comp2019<-data.frame(t2019[2])
comp2020<-data.frame(t2020[2])
comp2021<-data.frame(t2021[2])
comp2022<-data.frame(t2022[2])
comp2023<-data.frame(t2023[2])
comp2024<-data.frame(t2024[2])
comp2025<-data.frame(t2025[2])
comp2026<-data.frame(t2026[2])
comp2027<-data.frame(t2027[2])
comp2028<-data.frame(t2028[2])
comp2029<-data.frame(t2029[2])
comp2030<-data.frame(t2030[2])
comp2031<-data.frame(t2031[2])
comp2032<-data.frame(t2032[2])
comp2033<-data.frame(t2033[2])
comp2034<-data.frame(t2034[2])
comp2035<-data.frame(t2035[2])
comp2036<-data.frame(t2036[2])
comp2037<-data.frame(t2037[2])
comp2038<-data.frame(t2038[2])
comp2039<-data.frame(t2039[2])
comp2040<-data.frame(t2040[2])
comp2041<-data.frame(t2041[2])
comp2042<-data.frame(t2042[2])
comp2043<-data.frame(t2043[2])
comp2044<-data.frame(t2044[2])
comp2045<-data.frame(t2045[2])
comp2046<-data.frame(t2046[2])
comp2047<-data.frame(t2047[2])
comp2048<-data.frame(t2048[2])
comp2049<-data.frame(t2049[2])
comp2050<-data.frame(t2050[2])
comp2051<-data.frame(t2051[2])
colnames(comp2002)<-compnames
colnames(comp2003)<-compnames
colnames(comp2004)<-compnames
colnames(comp2005)<-compnames
colnames(comp2006)<-compnames
colnames(comp2007)<-compnames
colnames(comp2008)<-compnames
colnames(comp2009)<-compnames
colnames(comp2010)<-compnames
colnames(comp2011)<-compnames
colnames(comp2012)<-compnames
colnames(comp2013)<-compnames
colnames(comp2014)<-compnames
colnames(comp2015)<-compnames
colnames(comp2016)<-compnames
colnames(comp2017)<-compnames
colnames(comp2018)<-compnames
colnames(comp2019)<-compnames
colnames(comp2020)<-compnames
colnames(comp2021)<-compnames
colnames(comp2022)<-compnames
colnames(comp2023)<-compnames
colnames(comp2024)<-compnames
colnames(comp2025)<-compnames
colnames(comp2026)<-compnames
colnames(comp2027)<-compnames
colnames(comp2028)<-compnames
colnames(comp2029)<-compnames
colnames(comp2030)<-compnames
colnames(comp2031)<-compnames
colnames(comp2032)<-compnames
colnames(comp2033)<-compnames
colnames(comp2034)<-compnames
colnames(comp2035)<-compnames
colnames(comp2036)<-compnames
colnames(comp2037)<-compnames
colnames(comp2038)<-compnames
colnames(comp2039)<-compnames
colnames(comp2040)<-compnames
colnames(comp2041)<-compnames
colnames(comp2042)<-compnames
colnames(comp2043)<-compnames
colnames(comp2044)<-compnames
colnames(comp2045)<-compnames
colnames(comp2046)<-compnames
colnames(comp2047)<-compnames
colnames(comp2048)<-compnames
colnames(comp2049)<-compnames
colnames(comp2050)<-compnames
colnames(comp2051)<-compnames
Deaths0151<-cbind(comp2002$Deaths,
comp2003$Deaths,
comp2004$Deaths,
comp2005$Deaths,
comp2006$Deaths,
comp2007$Deaths,
comp2008$Deaths,
comp2009$Deaths,
comp2010$Deaths,
comp2011$Deaths,
comp2012$Deaths,
comp2013$Deaths,
comp2014$Deaths,
comp2015$Deaths,
comp2016$Deaths,
comp2017$Deaths,
comp2018$Deaths,
comp2019$Deaths,
comp2020$Deaths,
comp2021$Deaths,
comp2022$Deaths,
comp2023$Deaths,
comp2024$Deaths,
comp2025$Deaths,
comp2026$Deaths,
comp2027$Deaths,
comp2028$Deaths,
comp2029$Deaths,
comp2030$Deaths,
comp2031$Deaths,
comp2032$Deaths,
comp2033$Deaths,
comp2034$Deaths,
comp2035$Deaths,
comp2036$Deaths,
comp2037$Deaths,
comp2038$Deaths,
comp2039$Deaths,
comp2040$Deaths,
comp2041$Deaths,
comp2042$Deaths,
comp2043$Deaths,
comp2044$Deaths,
comp2045$Deaths,
comp2046$Deaths,
comp2047$Deaths,
comp2048$Deaths,
comp2049$Deaths,
comp2050$Deaths,
comp2051$Deaths)
Births0151<-cbind(comp2002$Births,
comp2003$Births,
comp2004$Births,
comp2005$Births,
comp2006$Births,
comp2007$Births,
comp2008$Births,
comp2009$Births,
comp2010$Births,
comp2011$Births,
comp2012$Births,
comp2013$Births,
comp2014$Births,
comp2015$Births,
comp2016$Births,
comp2017$Births,
comp2018$Births,
comp2019$Births,
comp2020$Births,
comp2021$Births,
comp2022$Births,
comp2023$Births,
comp2024$Births,
comp2025$Births,
comp2026$Births,
comp2027$Births,
comp2028$Births,
comp2029$Births,
comp2030$Births,
comp2031$Births,
comp2032$Births,
comp2033$Births,
comp2034$Births,
comp2035$Births,
comp2036$Births,
comp2037$Births,
comp2038$Births,
comp2039$Births,
comp2040$Births,
comp2041$Births,
comp2042$Births,
comp2043$Births,
comp2044$Births,
comp2045$Births,
comp2046$Births,
comp2047$Births,
comp2048$Births,
comp2049$Births,
comp2050$Births,
comp2051$Births)
Immig0151<-cbind(comp2002$Immig,
comp2003$Immig,
comp2004$Immig,
comp2005$Immig,
comp2006$Immig,
comp2007$Immig,
comp2008$Immig,
comp2009$Immig,
comp2010$Immig,
comp2011$Immig,
comp2012$Immig,
comp2013$Immig,
comp2014$Immig,
comp2015$Immig,
comp2016$Immig,
comp2017$Immig,
comp2018$Immig,
comp2019$Immig,
comp2020$Immig,
comp2021$Immig,
comp2022$Immig,
comp2023$Immig,
comp2024$Immig,
comp2025$Immig,
comp2026$Immig,
comp2027$Immig,
comp2028$Immig,
comp2029$Immig,
comp2030$Immig,
comp2031$Immig,
comp2032$Immig,
comp2033$Immig,
comp2034$Immig,
comp2035$Immig,
comp2036$Immig,
comp2037$Immig,
comp2038$Immig,
comp2039$Immig,
comp2040$Immig,
comp2041$Immig,
comp2042$Immig,
comp2043$Immig,
comp2044$Immig,
comp2045$Immig,
comp2046$Immig,
comp2047$Immig,
comp2048$Immig,
comp2049$Immig,
comp2050$Immig,
comp2051$Immig)
Emig0151<-cbind(comp2002$Emig,
comp2003$Emig,
comp2004$Emig,
comp2005$Emig,
comp2006$Emig,
comp2007$Emig,
comp2008$Emig,
comp2009$Emig,
comp2010$Emig,
comp2011$Emig,
comp2012$Emig,
comp2013$Emig,
comp2014$Emig,
comp2015$Emig,
comp2016$Emig,
comp2017$Emig,
comp2018$Emig,
comp2019$Emig,
comp2020$Emig,
comp2021$Emig,
comp2022$Emig,
comp2023$Emig,
comp2024$Emig,
comp2025$Emig,
comp2026$Emig,
comp2027$Emig,
comp2028$Emig,
comp2029$Emig,
comp2030$Emig,
comp2031$Emig,
comp2032$Emig,
comp2033$Emig,
comp2034$Emig,
comp2035$Emig,
comp2036$Emig,
comp2037$Emig,
comp2038$Emig,
comp2039$Emig,
comp2040$Emig,
comp2041$Emig,
comp2042$Emig,
comp2043$Emig,
comp2044$Emig,
comp2045$Emig,
comp2046$Emig,
comp2047$Emig,
comp2048$Emig,
comp2049$Emig,
comp2050$Emig,
comp2051$Emig)
out.mig0151<-cbind(comp2002$out.mig,
comp2003$out.mig,
comp2004$out.mig,
comp2005$out.mig,
comp2006$out.mig,
comp2007$out.mig,
comp2008$out.mig,
comp2009$out.mig,
comp2010$out.mig,
comp2011$out.mig,
comp2012$out.mig,
comp2013$out.mig,
comp2014$out.mig,
comp2015$out.mig,
comp2016$out.mig,
comp2017$out.mig,
comp2018$out.mig,
comp2019$out.mig,
comp2020$out.mig,
comp2021$out.mig,
comp2022$out.mig,
comp2023$out.mig,
comp2024$out.mig,
comp2025$out.mig,
comp2026$out.mig,
comp2027$out.mig,
comp2028$out.mig,
comp2029$out.mig,
comp2030$out.mig,
comp2031$out.mig,
comp2032$out.mig,
comp2033$out.mig,
comp2034$out.mig,
comp2035$out.mig,
comp2036$out.mig,
comp2037$out.mig,
comp2038$out.mig,
comp2039$out.mig,
comp2040$out.mig,
comp2041$out.mig,
comp2042$out.mig,
comp2043$out.mig,
comp2044$out.mig,
comp2045$out.mig,
comp2046$out.mig,
comp2047$out.mig,
comp2048$out.mig,
comp2049$out.mig,
comp2050$out.mig,
comp2051$out.mig)
fininmig0151<-cbind(comp2002$fininmig,
comp2003$fininmig,
comp2004$fininmig,
comp2005$fininmig,
comp2006$fininmig,
comp2007$fininmig,
comp2008$fininmig,
comp2009$fininmig,
comp2010$fininmig,
comp2011$fininmig,
comp2012$fininmig,
comp2013$fininmig,
comp2014$fininmig,
comp2015$fininmig,
comp2016$fininmig,
comp2017$fininmig,
comp2018$fininmig,
comp2019$fininmig,
comp2020$fininmig,
comp2021$fininmig,
comp2022$fininmig,
comp2023$fininmig,
comp2024$fininmig,
comp2025$fininmig,
comp2026$fininmig,
comp2027$fininmig,
comp2028$fininmig,
comp2029$fininmig,
comp2030$fininmig,
comp2031$fininmig,
comp2032$fininmig,
comp2033$fininmig,
comp2034$fininmig,
comp2035$fininmig,
comp2036$fininmig,
comp2037$fininmig,
comp2038$fininmig,
comp2039$fininmig,
comp2040$fininmig,
comp2041$fininmig,
comp2042$fininmig,
comp2043$fininmig,
comp2044$fininmig,
comp2045$fininmig,
comp2046$fininmig,
comp2047$fininmig,
comp2048$fininmig,
comp2049$fininmig,
comp2050$fininmig,
comp2051$fininmig)
Startpop0151<-cbind(comp2002$Startpop,
comp2003$Startpop,
comp2004$Startpop,
comp2005$Startpop,
comp2006$Startpop,
comp2007$Startpop,
comp2008$Startpop,
comp2009$Startpop,
comp2010$Startpop,
comp2011$Startpop,
comp2012$Startpop,
comp2013$Startpop,
comp2014$Startpop,
comp2015$Startpop,
comp2016$Startpop,
comp2017$Startpop,
comp2018$Startpop,
comp2019$Startpop,
comp2020$Startpop,
comp2021$Startpop,
comp2022$Startpop,
comp2023$Startpop,
comp2024$Startpop,
comp2025$Startpop,
comp2026$Startpop,
comp2027$Startpop,
comp2028$Startpop,
comp2029$Startpop,
comp2030$Startpop,
comp2031$Startpop,
comp2032$Startpop,
comp2033$Startpop,
comp2034$Startpop,
comp2035$Startpop,
comp2036$Startpop,
comp2037$Startpop,
comp2038$Startpop,
comp2039$Startpop,
comp2040$Startpop,
comp2041$Startpop,
comp2042$Startpop,
comp2043$Startpop,
comp2044$Startpop,
comp2045$Startpop,
comp2046$Startpop,
comp2047$Startpop,
comp2048$Startpop,
comp2049$Startpop,
comp2050$Startpop,
comp2051$Startpop)
Endpop0151<-cbind(comp2002$Endpop,
comp2003$Endpop,
comp2004$Endpop,
comp2005$Endpop,
comp2006$Endpop,
comp2007$Endpop,
comp2008$Endpop,
comp2009$Endpop,
comp2010$Endpop,
comp2011$Endpop,
comp2012$Endpop,
comp2013$Endpop,
comp2014$Endpop,
comp2015$Endpop,
comp2016$Endpop,
comp2017$Endpop,
comp2018$Endpop,
comp2019$Endpop,
comp2020$Endpop,
comp2021$Endpop,
comp2022$Endpop,
comp2023$Endpop,
comp2024$Endpop,
comp2025$Endpop,
comp2026$Endpop,
comp2027$Endpop,
comp2028$Endpop,
comp2029$Endpop,
comp2030$Endpop,
comp2031$Endpop,
comp2032$Endpop,
comp2033$Endpop,
comp2034$Endpop,
comp2035$Endpop,
comp2036$Endpop,
comp2037$Endpop,
comp2038$Endpop,
comp2039$Endpop,
comp2040$Endpop,
comp2041$Endpop,
comp2042$Endpop,
comp2043$Endpop,
comp2044$Endpop,
comp2045$Endpop,
comp2046$Endpop,
comp2047$Endpop,
comp2048$Endpop,
comp2049$Endpop,
comp2050$Endpop,
comp2051$Endpop)
colnames(Deaths0151)<-2002:2051
colnames(Births0151)<-2002:2051
colnames(Immig0151)<-2002:2051
colnames(Emig0151)<-2002:2051
colnames(fininmig0151)<-2002:2051
colnames(Startpop0151)<-2002:2051
colnames(Endpop0151)<-2002:2051
colnames(out.mig0151)<-2002:2051
# par(mfrow=c(4,4))
# par(mar=c(1,2,2,1))
#
# for(i in 1:16) plot(Births0151[i,], col=i)
# par(mfrow=c(4,4))
# par(mar=c(1,2,2,1))
# for(i in 1:16) plot(out.mig0151[i,], col=i)
# par(mfrow=c(4,4))
# par(mar=c(1,2,2,1))
# for(i in 1:16) plot(Deaths[i,], col=i)
#for i in 1:16
#plot(Births0151b[1,])
#points(Deaths[1,])
#par(mfrow=c(2,4))
#plot(Deaths[1,])
#plot(Deaths[2,])
#plot(Deaths[3,])
#plot(Deaths[4,])
#plot(Deaths[5,])
#plot(Deaths[6,])
#plot(Deaths[7,])
#plot(Deaths[8,])
#plot(pop20012051)
pop20012051<-c(sum(population),
sum(t2002),
sum(data.frame(t2003[1])),
sum(data.frame(t2004[1])),
sum(data.frame(t2005[1])),
sum(data.frame(t2006[1])),
sum(data.frame(t2007[1])),
sum(data.frame(t2008[1])),
sum(data.frame(t2009[1])),
sum(data.frame(t2010[1])),
sum(data.frame(t2011[1])),
sum(data.frame(t2012[1])),
sum(data.frame(t2013[1])),
sum(data.frame(t2014[1])),
sum(data.frame(t2015[1])),
sum(data.frame(t2016[1])),
sum(data.frame(t2017[1])),
sum(data.frame(t2018[1])),
sum(data.frame(t2019[1])),
sum(data.frame(t2020[1])),
sum(data.frame(t2021[1])),
sum(data.frame(t2022[1])),
sum(data.frame(t2023[1])),
sum(data.frame(t2024[1])),
sum(data.frame(t2025[1])),
sum(data.frame(t2026[1])),
sum(data.frame(t2027[1])),
sum(data.frame(t2028[1])),
sum(data.frame(t2029[1])),
sum(data.frame(t2030[1])),
sum(data.frame(t2031[1])),
sum(data.frame(t2032[1])),
sum(data.frame(t2033[1])),
sum(data.frame(t2034[1])),
sum(data.frame(t2035[1])),
sum(data.frame(t2036[1])),
sum(data.frame(t2037[1])),
sum(data.frame(t2038[1])),
sum(data.frame(t2039[1])),
sum(data.frame(t2040[1])),
sum(data.frame(t2041[1])),
sum(data.frame(t2042[1])),
sum(data.frame(t2043[1])),
sum(data.frame(t2044[1])),
sum(data.frame(t2045[1])),
sum(data.frame(t2046[1])),
sum(data.frame(t2047[1])),
sum(data.frame(t2048[1])),
sum(data.frame(t2049[1])),
sum(data.frame(t2050[1])),
sum(data.frame(t2051[1])))
write.csv(pop20012051,"TRENDEFV220012051.csv")
|
read.ini <- function(filename)
{
connection <- file(filename)
Lines <- readLines(connection)
close(connection)
Lines <- chartr("[=", "#=", Lines) # change section headers
res = list()
for (l in Lines) {
if (length(grep("\\\\n", l))==0) {
ls = strsplit(l, "=")[[1]]
if( length(ls)<2) next;
# remove all spaces
value = gsub(" ", "",ls[2])
key = gsub(" ", "",ls[1])
# check if we have a numeric, and store it as such
if (length(grep("[a-zA-Z\\,]", value))==0) {
res[key]=as.numeric(value[1])
} else {
res[key]=value
}
}
}
return(res)
}
| /R/parseini.r | no_license | tlamadon/Utils | R | false | false | 594 | r | read.ini <- function(filename)
{
connection <- file(filename)
Lines <- readLines(connection)
close(connection)
Lines <- chartr("[=", "#=", Lines) # change section headers
res = list()
for (l in Lines) {
if (length(grep("\\\\n", l))==0) {
ls = strsplit(l, "=")[[1]]
if( length(ls)<2) next;
# remove all spaces
value = gsub(" ", "",ls[2])
key = gsub(" ", "",ls[1])
# check if we have a numeric, and store it as such
if (length(grep("[a-zA-Z\\,]", value))==0) {
res[key]=as.numeric(value[1])
} else {
res[key]=value
}
}
}
return(res)
}
|
#' Computes the Matrix Profile or Pan-Matrix Profile
#'
#' Main API Function
#'
#' Computes the exact or approximate Matrix Profile based on the sample percent
#' specified. Currently, MPX and SCRIMP++ are used for the exact and
#' approximate algorithms respectively. See details for more information about the arguments
#' combinations.
#'
#' @param ts a `matrix` or a `vector`. The time series to analyze.
#' @param query a `matrix` or a `vector`. Optional The query to analyze. Note that when computing the Pan-Matrix Profile
#' the query is ignored!
#' @param windows an `int` or a `vector`. The window(s) to compute the Matrix Profile. Note that it may be an `int`
#' for a single matrix profile computation or a `vector` of `int` for computing the Pan-Matrix Profile.
#' @param sample_pct a `numeric`. A number between 0 and 1 representing how many samples to compute for
#' the Matrix Profile or Pan-Matrix Profile. When it is 1, the exact algorithm is used. (default is `1.0`).
#' @param threshold a `numeric`. Correlation threshold. See details. (Default is `0.98`).
#' @param n_jobs an `int`. The number of cpu cores to use when computing the MatrixProfile. (default is `1`).
#'
#' @details
#'
#' When a single `windows` is given, the Matrix Profile is computed. If a `query` is provided, AB join is computed.
#' Otherwise the self-join is computed.
#' When multiple `windows` or none are given, the Pan-Matrix Profile is computed. If a `threshold` is set (it is,
#' by default), the upper bound will be computed and the given `windows` or a default range (when no `windows`), below
#' the upper bound will be computed.
#'
#' @return
#' The profile computed.
#'
#' @export
#'
#' @family Main API
#'
#' @references Website: <http://www.cs.ucr.edu/~eamonn/MatrixProfile.html>
#'
#' @examples
#'
#' # Matrix Profile
#' result <- compute(mp_toy_data$data[, 1], 80)
#' \donttest{
#' # Pan-Matrix Profile
#' result <- compute(mp_toy_data$data[, 1])
#' }
compute <- function(ts, windows = NULL, query = NULL, sample_pct = 1.0, threshold = 0.98, n_jobs = 1L) {
# Parse arguments ---------------------------------
checkmate::qassert(ts, "N+")
windows <- checkmate::qassert(windows, c("0", "X+[4,)"))
checkmate::qassert(query, c("0", "N>=4"))
checkmate::qassert(sample_pct, "N1(0,1]")
checkmate::qassert(threshold, c("0", "N1(0,1]"))
n_jobs <- as.integer(checkmate::qassert(n_jobs, paste0("X1[1,", parallel::detectCores(), "]")))
res <- NULL
algorithm <- NULL
join <- FALSE
metric <- "euclidean"
# Start ---------------------------------
if (length(windows) == 1) {
## Matrix Profile =========================
if (is.null(query)) {
### Self-join #############################
if (sample_pct >= 1) {
res <- mpx(data = ts, window_size = windows, idx = TRUE, dist = metric, n_workers = n_jobs)
algorithm <- "mpx"
} else {
res <- scrimp(ts, window_size = windows, s_size = floor(sample_pct * length(ts))) # n_jobs
algorithm <- "scrimp"
}
} else {
### AB join #############################
join <- TRUE
if (sample_pct >= 1) {
res <- mpx(data = ts, query = query, window_size = windows, idx = TRUE, dist = metric, n_workers = n_jobs)
algorithm <- "mpx"
} else {
# TODO: add scrimp AB-join
res <- scrimp(ts, window_size = windows, s_size = floor(sample_pct * length(ts))) # n_jobs # AB
algorithm <- "scrimp"
}
}
} else {
## Pan Matrix Profile =========================
if (!is.null(threshold)) {
# when a threshold is passed, we compute the upper bound
res <- pmp_upper_bound(data = ts, threshold = threshold, n_workers = n_jobs)
}
if (is.null(windows)) {
# when no windows are passed, create an array from 10 to upper bound or half ts size
windows <- seq.int(from = 10, to = min(length(ts) / 2, res$upper_window), length.out = 20)
} else {
# otherwise, remove windows that are above upper bound or half ts size
windows <- windows[windows <= min(length(ts) / 2, res$upper_window)]
}
windows <- floor(windows)
res <- pmp(data = ts, window_sizes = windows, plot = FALSE, pmp_obj = res, n_workers = n_jobs)
algorithm <- "pmp"
}
# Build compute object --------------------------
# Main fields, easily accessible by the user
if (length(windows) == 1) {
result <- list(
mp = as.matrix(res$mp),
pi = as.matrix(res$pi),
# mpb = NULL,
# pib = NULL,
rmp = NULL,
rpi = NULL,
lmp = NULL,
lpi = NULL,
w = windows,
ez = res$ez
)
class(result) <- "MatrixProfile"
} else {
result <- res
class(result) <- "PMP"
}
result$sample_pct <- sample_pct
result$data <- list(
ts = as.vector(ts),
query = as.vector(query)
)
# Attributes
attr(result, "join") <- join
attr(result, "metric") <- metric
attr(result, "algorithm") <- algorithm
# End ---------------------------------
return(invisible(result))
}
| /tsmp/R/compute.R | permissive | akhikolla/TestedPackages-NoIssues | R | false | false | 5,193 | r | #' Computes the Matrix Profile or Pan-Matrix Profile
#'
#' Main API Function
#'
#' Computes the exact or approximate Matrix Profile based on the sample percent
#' specified. Currently, MPX and SCRIMP++ are used for the exact and
#' approximate algorithms respectively. See details for more information about the arguments
#' combinations.
#'
#' @param ts a `matrix` or a `vector`. The time series to analyze.
#' @param query a `matrix` or a `vector`. Optional The query to analyze. Note that when computing the Pan-Matrix Profile
#' the query is ignored!
#' @param windows an `int` or a `vector`. The window(s) to compute the Matrix Profile. Note that it may be an `int`
#' for a single matrix profile computation or a `vector` of `int` for computing the Pan-Matrix Profile.
#' @param sample_pct a `numeric`. A number between 0 and 1 representing how many samples to compute for
#' the Matrix Profile or Pan-Matrix Profile. When it is 1, the exact algorithm is used. (default is `1.0`).
#' @param threshold a `numeric`. Correlation threshold. See details. (Default is `0.98`).
#' @param n_jobs an `int`. The number of cpu cores to use when computing the MatrixProfile. (default is `1`).
#'
#' @details
#'
#' When a single `windows` is given, the Matrix Profile is computed. If a `query` is provided, AB join is computed.
#' Otherwise the self-join is computed.
#' When multiple `windows` or none are given, the Pan-Matrix Profile is computed. If a `threshold` is set (it is,
#' by default), the upper bound will be computed and the given `windows` or a default range (when no `windows`), below
#' the upper bound will be computed.
#'
#' @return
#' The profile computed.
#'
#' @export
#'
#' @family Main API
#'
#' @references Website: <http://www.cs.ucr.edu/~eamonn/MatrixProfile.html>
#'
#' @examples
#'
#' # Matrix Profile
#' result <- compute(mp_toy_data$data[, 1], 80)
#' \donttest{
#' # Pan-Matrix Profile
#' result <- compute(mp_toy_data$data[, 1])
#' }
compute <- function(ts, windows = NULL, query = NULL, sample_pct = 1.0, threshold = 0.98, n_jobs = 1L) {
# Parse arguments ---------------------------------
checkmate::qassert(ts, "N+")
windows <- checkmate::qassert(windows, c("0", "X+[4,)"))
checkmate::qassert(query, c("0", "N>=4"))
checkmate::qassert(sample_pct, "N1(0,1]")
checkmate::qassert(threshold, c("0", "N1(0,1]"))
n_jobs <- as.integer(checkmate::qassert(n_jobs, paste0("X1[1,", parallel::detectCores(), "]")))
res <- NULL
algorithm <- NULL
join <- FALSE
metric <- "euclidean"
# Start ---------------------------------
if (length(windows) == 1) {
## Matrix Profile =========================
if (is.null(query)) {
### Self-join #############################
if (sample_pct >= 1) {
res <- mpx(data = ts, window_size = windows, idx = TRUE, dist = metric, n_workers = n_jobs)
algorithm <- "mpx"
} else {
res <- scrimp(ts, window_size = windows, s_size = floor(sample_pct * length(ts))) # n_jobs
algorithm <- "scrimp"
}
} else {
### AB join #############################
join <- TRUE
if (sample_pct >= 1) {
res <- mpx(data = ts, query = query, window_size = windows, idx = TRUE, dist = metric, n_workers = n_jobs)
algorithm <- "mpx"
} else {
# TODO: add scrimp AB-join
res <- scrimp(ts, window_size = windows, s_size = floor(sample_pct * length(ts))) # n_jobs # AB
algorithm <- "scrimp"
}
}
} else {
## Pan Matrix Profile =========================
if (!is.null(threshold)) {
# when a threshold is passed, we compute the upper bound
res <- pmp_upper_bound(data = ts, threshold = threshold, n_workers = n_jobs)
}
if (is.null(windows)) {
# when no windows are passed, create an array from 10 to upper bound or half ts size
windows <- seq.int(from = 10, to = min(length(ts) / 2, res$upper_window), length.out = 20)
} else {
# otherwise, remove windows that are above upper bound or half ts size
windows <- windows[windows <= min(length(ts) / 2, res$upper_window)]
}
windows <- floor(windows)
res <- pmp(data = ts, window_sizes = windows, plot = FALSE, pmp_obj = res, n_workers = n_jobs)
algorithm <- "pmp"
}
# Build compute object --------------------------
# Main fields, easily accessible by the user
if (length(windows) == 1) {
result <- list(
mp = as.matrix(res$mp),
pi = as.matrix(res$pi),
# mpb = NULL,
# pib = NULL,
rmp = NULL,
rpi = NULL,
lmp = NULL,
lpi = NULL,
w = windows,
ez = res$ez
)
class(result) <- "MatrixProfile"
} else {
result <- res
class(result) <- "PMP"
}
result$sample_pct <- sample_pct
result$data <- list(
ts = as.vector(ts),
query = as.vector(query)
)
# Attributes
attr(result, "join") <- join
attr(result, "metric") <- metric
attr(result, "algorithm") <- algorithm
# End ---------------------------------
return(invisible(result))
}
|
# wrapper function for C -> foo()
dyn.load("foo.so")
hola <- function() {
result <- .Call("hola")
return(result)
} | /tests/libs/R/tests/foo.R | permissive | openhpc/ohpc | R | false | false | 118 | r | # wrapper function for C -> foo()
dyn.load("foo.so")
hola <- function() {
result <- .Call("hola")
return(result)
} |
#' Reads a tibble into a csv file and creates a table into MonetDBLite
#'
#' MonetDBLite is a columnar storage architecture SQL database that runs inside
#' R. This function reads a tibble and stores it into a csv file.
#' In order to create a temporary server for columnar storage MonetDBLite, it
#' create a DBI connection, then it reads the csv file and creates the table
#' with specified column name.
#'
#' It also queries MonetDBLite database and provides results of the following
#' commonly asked queries:
#' How many total records are in the table?
#' Print output of select columns from the table.
#'
#' This function closes the MonetDBLite connection after fecthing the data.
#'
#' @usage loadMonetDB(tibble, colName, tabName)
#'
#' @param tibble Tibble to be written into csv file
#' @param colName Column names for the table to load data
#' @param tabName Name of the table to load the data
#' @return Prints table name and output queries from MonetDB after loading the data
#'
#' @importFrom DBI dbConnect dbExistsTable dbRemoveTable dbDisconnect dbListTables
#' @importFrom MonetDBLite monetdb.read.csv monetdblite_shutdown src_monetdblite
#' @importFrom readr write_csv
#' @importFrom dplyr tbl %>% count select_all
#' @export
#'
#' @examples
#'#=======================================================================#
#'# Load into MonetDBLite and run queries
#'#=======================================================================#
#' MonetDBLite::monetdblite_shutdown()
#' dFile <- system.file("extdata", "lasc04765150.las", package = "adaboost")
#' lasOut <- readFile(dFile)
#' dataTibble <- lasSlotExtract(lasOut, dFile)$dataTibble
#' dataColName <- lasSlotExtract(lasOut, dFile)$dbColnamesData
#' loadMonetDB(dataTibble, dataColName, "slotdata" )
#' MonetDBLite::monetdblite_shutdown()
#'
loadMonetDB <- function(tibble, colName, tabName) {
csvFile <- tempfile()
readr::write_csv(tibble, csvFile)
dbDir <- tempfile()
MonetDBLite::monetdblite_shutdown()
con <- DBI::dbConnect(MonetDBLite::MonetDBLite(), dbDir )
MonetDBLite::monetdb.read.csv(conn = con,
files = csvFile,
header = TRUE,
tablename = tabName,
col.names = colName)
cat("Name of the Table loaded in the database: ",DBI::dbListTables(con), "\n")
dbHandle <- MonetDBLite::src_monetdblite(dbDir)
queryTbl <- dplyr::tbl(dbHandle, tabName)
nstr1 <- sprintf('%s %s count()', 'queryTbl', "%>%", "()" )
cat("Total no of records in table: ", tabName, "\n")
print(base::eval(parse(text = nstr1)))
cat("Select data from the table: ", tabName, "\n")
nstr2 <- sprintf('%s %s select_all()', 'queryTbl', "%>%", "()" )
print(base::eval(parse(text = nstr2)))
MonetDBLite::monetdblite_shutdown()
DBI::dbDisconnect(con, shutdown=TRUE)
}
| /adaboost/R/loadMonetDB.R | no_license | adaboostmm/DataScience_R | R | false | false | 2,866 | r | #' Reads a tibble into a csv file and creates a table into MonetDBLite
#'
#' MonetDBLite is a columnar storage architecture SQL database that runs inside
#' R. This function reads a tibble and stores it into a csv file.
#' In order to create a temporary server for columnar storage MonetDBLite, it
#' create a DBI connection, then it reads the csv file and creates the table
#' with specified column name.
#'
#' It also queries MonetDBLite database and provides results of the following
#' commonly asked queries:
#' How many total records are in the table?
#' Print output of select columns from the table.
#'
#' This function closes the MonetDBLite connection after fecthing the data.
#'
#' @usage loadMonetDB(tibble, colName, tabName)
#'
#' @param tibble Tibble to be written into csv file
#' @param colName Column names for the table to load data
#' @param tabName Name of the table to load the data
#' @return Prints table name and output queries from MonetDB after loading the data
#'
#' @importFrom DBI dbConnect dbExistsTable dbRemoveTable dbDisconnect dbListTables
#' @importFrom MonetDBLite monetdb.read.csv monetdblite_shutdown src_monetdblite
#' @importFrom readr write_csv
#' @importFrom dplyr tbl %>% count select_all
#' @export
#'
#' @examples
#'#=======================================================================#
#'# Load into MonetDBLite and run queries
#'#=======================================================================#
#' MonetDBLite::monetdblite_shutdown()
#' dFile <- system.file("extdata", "lasc04765150.las", package = "adaboost")
#' lasOut <- readFile(dFile)
#' dataTibble <- lasSlotExtract(lasOut, dFile)$dataTibble
#' dataColName <- lasSlotExtract(lasOut, dFile)$dbColnamesData
#' loadMonetDB(dataTibble, dataColName, "slotdata" )
#' MonetDBLite::monetdblite_shutdown()
#'
loadMonetDB <- function(tibble, colName, tabName) {
csvFile <- tempfile()
readr::write_csv(tibble, csvFile)
dbDir <- tempfile()
MonetDBLite::monetdblite_shutdown()
con <- DBI::dbConnect(MonetDBLite::MonetDBLite(), dbDir )
MonetDBLite::monetdb.read.csv(conn = con,
files = csvFile,
header = TRUE,
tablename = tabName,
col.names = colName)
cat("Name of the Table loaded in the database: ",DBI::dbListTables(con), "\n")
dbHandle <- MonetDBLite::src_monetdblite(dbDir)
queryTbl <- dplyr::tbl(dbHandle, tabName)
nstr1 <- sprintf('%s %s count()', 'queryTbl', "%>%", "()" )
cat("Total no of records in table: ", tabName, "\n")
print(base::eval(parse(text = nstr1)))
cat("Select data from the table: ", tabName, "\n")
nstr2 <- sprintf('%s %s select_all()', 'queryTbl', "%>%", "()" )
print(base::eval(parse(text = nstr2)))
MonetDBLite::monetdblite_shutdown()
DBI::dbDisconnect(con, shutdown=TRUE)
}
|
### Read Data
df1 <- read.csv('winequality-red.csv',sep=";")
head(df1)
df2 <- read.csv('winequality-white.csv',sep=";")
head(df2)
# Add labels for later (this will not be available in real life)
df1 <- cbind(df1,label='red')
head(df1)
df2 <- cbind(df2,label='white')
head(df2)
# merge dataframes
wine <- rbind(df1,df2)
str(wine)
#################################
### Exploratory Data Analysis ###
#################################
library(ggplot2)
ggplot(wine,aes(residual.sugar)) + geom_histogram(aes(fill=factor(label)),alpha=0.7) + theme_bw()
ggplot(wine,aes(citric.acid,fill=factor(label))) + geom_histogram(alpha=0.7) + theme_bw()
ggplot(wine,aes(alcohol,fill=factor(label))) + geom_histogram(alpha=0.7) + theme_bw()
ggplot(wine,aes(residual.sugar,citric.acid)) + geom_point(aes(color=factor(label)))
ggplot(wine,aes(volatile.acidity,residual.sugar)) + geom_point(aes(color=factor(label)))
####################
#### Clustering ####
####################
str(wine)
clus.data <- wine[,-13]
head(clus.data)
set.seed(101)
wine.cluster <- kmeans(clus.data,2,nstart = 20 ) # data, K (groups), nstart = number of random starts you can do
wine.cluster
# testing, in real world, you will not have this labelled data
table(wine.cluster$cluster,wine$label)
# testing with more clusters to see if we can split the wines into more categories for better prediction
set.seed(101)
wine.cluster <- kmeans(clus.data,4,nstart = 20 ) # data, K (groups), nstart = number of random starts you can do
wine.cluster
# testing, in real world, you will not have this labelled data
table(wine.cluster$cluster,wine$label)
# cluster visualization
library(cluster)
# this plots the clusters against the 2 components that explain the most variability
# this is not very useful if you have a lot of features
clusplot(wine,wine.cluster$cluster,color = TRUE, shade = TRUE, lines = 0)
| /MyExercises/Machine Learning/06_KMeansClustering.R | no_license | ngupta23/ML-R | R | false | false | 2,034 | r | ### Read Data
df1 <- read.csv('winequality-red.csv',sep=";")
head(df1)
df2 <- read.csv('winequality-white.csv',sep=";")
head(df2)
# Add labels for later (this will not be available in real life)
df1 <- cbind(df1,label='red')
head(df1)
df2 <- cbind(df2,label='white')
head(df2)
# merge dataframes
wine <- rbind(df1,df2)
str(wine)
#################################
### Exploratory Data Analysis ###
#################################
library(ggplot2)
ggplot(wine,aes(residual.sugar)) + geom_histogram(aes(fill=factor(label)),alpha=0.7) + theme_bw()
ggplot(wine,aes(citric.acid,fill=factor(label))) + geom_histogram(alpha=0.7) + theme_bw()
ggplot(wine,aes(alcohol,fill=factor(label))) + geom_histogram(alpha=0.7) + theme_bw()
ggplot(wine,aes(residual.sugar,citric.acid)) + geom_point(aes(color=factor(label)))
ggplot(wine,aes(volatile.acidity,residual.sugar)) + geom_point(aes(color=factor(label)))
####################
#### Clustering ####
####################
str(wine)
clus.data <- wine[,-13]
head(clus.data)
set.seed(101)
wine.cluster <- kmeans(clus.data,2,nstart = 20 ) # data, K (groups), nstart = number of random starts you can do
wine.cluster
# testing, in real world, you will not have this labelled data
table(wine.cluster$cluster,wine$label)
# testing with more clusters to see if we can split the wines into more categories for better prediction
set.seed(101)
wine.cluster <- kmeans(clus.data,4,nstart = 20 ) # data, K (groups), nstart = number of random starts you can do
wine.cluster
# testing, in real world, you will not have this labelled data
table(wine.cluster$cluster,wine$label)
# cluster visualization
library(cluster)
# this plots the clusters against the 2 components that explain the most variability
# this is not very useful if you have a lot of features
clusplot(wine,wine.cluster$cluster,color = TRUE, shade = TRUE, lines = 0)
|
# Dit is de RCT die Arne wil analyseren
# 2020-dec-01
# set working directory
setwd("O:/DGK/DGL/Onderzoek/ULP/PhD/JessieHesseling")
# load library to import excel data
library(readxl)
#HV
list.files()
excel_sheets("RCT-DD-wound.xlsx")
### import excel data ###
# import excel data "ruwe data"
raw <- read_excel("RCT-DD-wound.xlsx", sheet = 2)
# import excel data "mscore-treated-excluded"
mscore <- read_excel("RCT-DD-wound.xlsx", sheet = 4)
# import excel data "mscore-treated-excluded-followup"
fup <- read_excel("RCT-DD-wound.xlsx", sheet = 5)
# import excel data "wound-healing-progress"
whp <- read_excel("RCT-DD-wound.xlsx", sheet = 8)
### study flow ###
#HV
names(raw) # variabele namen
summary(raw) # samenvatting per variabele
# activeer dataset "raw"
attach(raw)
# ruwe data - aantal poten gescoord, M-scores en behandelingen
(tab <- table(M_D0, Treatment,useNA = "ifany"))
#HV eerste ( en laatste ) in voorgaande regel zorgen er voor dat het resultaat wordt getoond
#HV is in dit geval niet nodig want vervolgens doe je addmargins(..) en wordt ook dat resultaat getoond
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# ruwe data - prevalentie Mscores per bedrijf
(tab <- table(Farm, M_D0, useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# figuur maken met in Y-as prevalentie op D0 als percentage, X-as Farm en voor iedere farm 100% stacked column met M0, active lesions (M1 + M2 + M4.1) en chronic lesions (M3 + M4)
# de-activeer dataset "raw"
detach(raw)
# selecteer treated cases
# excludeer lost cases (dit is op basis van remarks student, vb wilde koe dus niet te vangen of verband kwam er eerder af, en bekijken ruwe dataset - hoe doe ik dit netjes in R? of is het toch gebruikelijk dit in en copy van de ruwe dataset in excel te doen?)
# geef deze dataset de nieuwe naam "mscore"
# activeer dataset "mscore"
attach(mscore)
# mscore - aantal poten gescoord, M-scores en behandelingen
(tab <- table(M_D0, Treatment,useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# mscore - aantal poten in iedere behandelgroep
(tab <- table(M_D0, Treatment,useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# mscore - aantal koeien in iedere behandelgroep
# mscore - aantal koeien met 2 poten en aantal koeien met 1 poot in de studie
# de-activeer dataset "mscore"
detach(mscore)
# activeer dataset "fup"
attach(fup)
# follow-up - aantal poten start (dag 0) in iedere behandelgroep
(tab <- table(M_D0, Treatment,useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# follow-up - aantal poten dag 21 in iedere behandelgroep
(tab <- table(M_D21, Treatment,useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# follow-up - aantal poten dag 35 in iedere behandelgroep
(tab <- table(M_D35, Treatment,useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# de-activeer dataset "fup"
detach(fup)
### Treatment outcomes ###
# activeer dataset "mscore"
attach(mscore)
# mscore - transitiematrix D0-D10
(tab <- table(M_D10, Treatment, M_D0, useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
addmargins(tab)
rm(tab)
# mscore - transitiematrix per farm D0-D10
(tab <- table(M_D10, Treatment, Farm, M_D0, useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
addmargins(tab)
rm(tab)
# mscore - create variable 'clinical improvement on D10 (imp_D10)'
# de-activeer dataset "mscore"
detach(mscore)
# activeer dataset "fup"
attach(fup)
# follow-up - transitiematrix D0-D21
(tab <- table(M_D21, Treatment, M_D0))
addmargins(tab)
round(prop.table(tab, 1), 3)
addmargins(tab)
#HV toevoegen proporties per subtabel
prop.table(tab, c(1,3)) # de 1 geeft aan totaliseren in de regel en de 3 betekent per M_D0
rm(tab)
# follow-up - transitiematrix D0-D35
(tab <- table(M_D35, Treatment, M_D0))
addmargins(tab)
round(prop.table(tab, 1), 3)
addmargins(tab)
rm(tab)
# fup - create variable 'clinical improvement on D21 (imp_D21)'
# fup - pearson chi squared test of independence for treatment and clinical improvement on D21
chisq.test(Treatment, imp_D21)
# fup - create variable 'clinical improvement on D35 (imp_D35)'
# fup - pearson chi squared test of independence for treatment and clinical improvement on D35
chisq.test(Treatment, imp_D35)
# de-activeer dataset "fup"
detach(fup)
# activeer dataset "whp"
attach(whp)
# whp - overview wound healing progress outcome D0-D10 per treatment group
(tab <- table(WHP_D010, Treatment, useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# whp - overview wound healing progress outcome D0-D3 per treatment group
(tab <- table(WHP_D03, Treatment, useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# whp - overview wound healing progress outcome D3-D7 per treatment group
(tab <- table(WHP_D37, Treatment, useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# whp - overview wound healing progress outcome D7-D10 per treatment group
(tab <- table(WHP_D710, Treatment, useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# whp - overview wound healing progress outcome D0-D10 per treatment group for each D0 M-score
(tab <- table(WHP_D010_rec, Treatment, M_D0, useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# de-activeer dataset "whp"
detach(whp)
###### Analyse ######
#load library linear mixed effects (multivariabele logistische regressie met farm als random effect)
library(lme4)
### UNIVARIABLE logistische regressie met farm als random effect ###
## M-score ##
# univariable logistische regressie clinical improvement mscore D10 & treatment group
fit <- glmer(imp_D10 ~ Treatment + (1|Farm), family="binomial", data=mscore)
#HV doe eens: table(mscore$imp_D10, mscore$Farm, mscore$Treatment)
# dan zie je dat in de de aantallen erg laag zijn met score 0 -> probleem met schatten
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie clinical improvement mscore D10 & Mscore D0
fit <- glmer(imp_D10 ~ factor(M_D0) + (1|Farm), family="binomial", data=mscore)
#HV doe eens: table(mscore$imp_D10, mscore$Farm, mscore$M_D0)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:4,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie clinical improvement mscore D10 & time under bandage
fit <- glmer(imp_D10 ~ Bandage_rec + (1|Farm), family="binomial", data=mscore)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
## wound healing progress met unable to score als 'not improved' ##
# univariable logistische regressie wound healing progress between D0 and D10 & treatment group
fit <- glmer(WHP_D010_rec0 ~ Treatment + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & Mscore D0
fit <- glmer(WHP_D010_rec0 ~ factor(M_D0) + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:4,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & time under bandage
fit <- glmer(WHP_D010_rec0 ~ Bandage_rec + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & wound healing progress between D0 and D3
fit <- glmer(WHP_D010_rec0 ~ WHP_D03_rec0 + (1|Farm), family="binomial", data=whp)
summary(fit)
#HV in de drop1 wordt een warning gegeven. Error in drop1.merMod(fit, test = "Chisq") :
# number of rows in use has changed: remove missing values?
# dit betekenis dat wrs WHP_D03_rec0 een missing heeft zie summary(whp) of summary(whp$WHP_D03_rec0)
# inderdaad 1 NA, dus het model inclusief en exclusief deze variabele zijn gebaseerd op een
# verschillend aantal records.
fit <- glmer(WHP_D010_rec0 ~ WHP_D03_rec0 + (1|Farm), family="binomial", data=whp[!is.na(whp$WHP_D03_rec0),])
#HV met data=whp[!is.na(whp$WHP_D03_rec0),] worden alleen de records geselecteerd zonder NA in deze variabele
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & wound healing progress between D3 and D7
#HV toegevoegd: data=whp[!is.na(whp$WHP_D37_rec0),])
fit <- glmer(WHP_D010_rec0 ~ WHP_D37_rec0 + (1|Farm), family="binomial", data=whp[!is.na(whp$WHP_D37_rec0),])
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & wound healing progress between D7 and D10
fit <- glmer(WHP_D010_rec0 ~ WHP_D710_rec0 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
## wound healing progress met unable to score als 'improved' ##
# univariable logistische regressie wound healing progress between D0 and D10 & treatment group
fit <- glmer(WHP_D010_rec1 ~ Treatment + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & Mscore D0
fit <- glmer(WHP_D010_rec1 ~ factor(M_D0) + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:4,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & time under bandage
fit <- glmer(WHP_D010_rec1 ~ Bandage_rec + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & wound healing progress between D0 and D3
fit <- glmer(WHP_D010_rec1 ~ WHP_D03_rec1 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & wound healing progress between D3 and D7
fit <- glmer(WHP_D010_rec1 ~ WHP_D37_rec1 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & wound healing progress between D7 and D10
fit <- glmer(WHP_D010_rec1 ~ WHP_D710_rec1 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
### MULTIVARIABLE linear mixed effects model, with farm as random effect, manual backwards elimination based on lowest AIC ###
## M-score ##
#full LME model
#HV omdat in de univariabele modellen er al een probleem was met de lage aantallen, zal zeker met
# multivariabele modellen het probleem alleen maar groter worden!
fit <- glmer(imp_D10 ~ Treatment + factor(M_D0) + Bandage_rec + (1|Farm), family="binomial", data=mscore)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:6,]), 2)
rm(fit, beta, ci)
# Alle single term deletion AIC zijn hoger dan AIC model en we willen eigenlijk een lagere AIC
# Minst hoge AIC tov model is voor M-score D0: delta AIC = 1.99 dit is kleiner dan |2| dus M-score D0 uit model halen omdat je zo minder variabelen nodig hebt voor je model, ook al past het iets minder goed (omdat de AIC groter wordt)
# LME model zonder (M-score D0)
fit <- glmer(imp_D10 ~ Treatment + Bandage_rec + (1|Farm), family="binomial", data=mscore)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:4,]), 2)
rm(fit, beta, ci)
# AIC model is van 151.0 naar 153.0 gestegen, binnen grens van |2| dus ok ondanks slechtere fit
# Alle single term deletion AIC zijn groter dan model en we willen eigenlijk een lagere AIC
# Minst hoge AIC tov model is voor Bandage: delta AIC = 24.02 dit is groter dan |2| dus geen variabelen meer uit model halen
## wound healing progress met unable to score als 'not improved' ##
#full LME model
fit <- glmer(WHP_D010_rec0 ~ Treatment + factor(M_D0) + Bandage_rec + WHP_D03_rec0 + WHP_D37_rec0 + WHP_D710_rec0 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:9,]), 2)
rm(fit, beta, ci)
# variable met lowest single term deletion AIC is M-score D0 en lager dan AIC model
# LME model zonder (M-score D0)
fit <- glmer(WHP_D010_rec0 ~ Treatment + Bandage_rec + WHP_D03_rec0 + WHP_D37_rec0 + WHP_D710_rec0 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:7,]), 2)
rm(fit, beta, ci)
# variable met lowest single term deletion AIC is Bandage en lager dan AIC model
# LME model zonder (M-score D0 en Bandage)
fit <- glmer(WHP_D010_rec0 ~ Treatment + WHP_D03_rec0 + WHP_D37_rec0 + WHP_D710_rec0 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:6,]), 2)
rm(fit, beta, ci)
# Alle single term deletion AIC zijn hoger dan AIC model en we willen eigenlijk een lagere AIC
# Minst hoge AIC tov model is voor WP0-3: delta AIC = 0.51 dit is kleiner dan |2| dus WHP0-3 uit model halen omdat je zo minder variabelen nodig hebt voor je model, ook al past het iets minder goed (omdat de AIC groter wordt)
# LME model zonder (M-score D0, Bandage en WHP0-3)
fit <- glmer(WHP_D010_rec0 ~ Treatment + WHP_D37_rec0 + WHP_D710_rec0 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:5,]), 2)
rm(fit, beta, ci)
# ERROR! ik krijg geen single term deletion AIC meer en kan dus niet meer verder... :(
# komt dit omdat er voor de WHP0-3 missing values zijn? Er zijn daar nl enkele lege cellen omdat deze foto's ontbraken in de ruwe data.
## wound healing progress met unable to score als ' improved' ##
#full LME model
fit <- glmer(WHP_D010_rec1 ~ Treatment + factor(M_D0) + Bandage_rec + WHP_D03_rec1 + WHP_D37_rec1 + WHP_D710_rec1 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:9,]), 2)
rm(fit, beta, ci)
# variable met lowest single term deletion AIC is M-score D0 en lager dan AIC model
# LME model zonder (M-score D0)
fit <- glmer(WHP_D010_rec1 ~ Treatment + Bandage_rec + WHP_D03_rec1 + WHP_D37_rec1 + WHP_D710_rec1 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:7,]), 2)
rm(fit, beta, ci)
# variable met lowest single term deletion AIC is WHP3-7 en lager dan AIC model
# LME model zonder (M-score D0 en WHP3-7)
fit <- glmer(WHP_D010_rec1 ~ Treatment + Bandage_rec + WHP_D03_rec1 + WHP_D710_rec1 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:6,]), 2)
rm(fit, beta, ci)
# ERROR! ik krijg geen single term deletion AIC meer en kan dus niet meer verder... :(
# komt dit omdat er voor de WHP3-7 missing values zijn? Er zijn daar nl enkele lege cellen omdat deze foto's ontbraken in de ruwe data. | /src/20201201-R-script-RCT_DD_wondgenezing_HV.R | permissive | ArneVanhoudt/DD-algino | R | false | false | 16,565 | r | # Dit is de RCT die Arne wil analyseren
# 2020-dec-01
# set working directory
setwd("O:/DGK/DGL/Onderzoek/ULP/PhD/JessieHesseling")
# load library to import excel data
library(readxl)
#HV
list.files()
excel_sheets("RCT-DD-wound.xlsx")
### import excel data ###
# import excel data "ruwe data"
raw <- read_excel("RCT-DD-wound.xlsx", sheet = 2)
# import excel data "mscore-treated-excluded"
mscore <- read_excel("RCT-DD-wound.xlsx", sheet = 4)
# import excel data "mscore-treated-excluded-followup"
fup <- read_excel("RCT-DD-wound.xlsx", sheet = 5)
# import excel data "wound-healing-progress"
whp <- read_excel("RCT-DD-wound.xlsx", sheet = 8)
### study flow ###
#HV
names(raw) # variabele namen
summary(raw) # samenvatting per variabele
# activeer dataset "raw"
attach(raw)
# ruwe data - aantal poten gescoord, M-scores en behandelingen
(tab <- table(M_D0, Treatment,useNA = "ifany"))
#HV eerste ( en laatste ) in voorgaande regel zorgen er voor dat het resultaat wordt getoond
#HV is in dit geval niet nodig want vervolgens doe je addmargins(..) en wordt ook dat resultaat getoond
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# ruwe data - prevalentie Mscores per bedrijf
(tab <- table(Farm, M_D0, useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# figuur maken met in Y-as prevalentie op D0 als percentage, X-as Farm en voor iedere farm 100% stacked column met M0, active lesions (M1 + M2 + M4.1) en chronic lesions (M3 + M4)
# de-activeer dataset "raw"
detach(raw)
# selecteer treated cases
# excludeer lost cases (dit is op basis van remarks student, vb wilde koe dus niet te vangen of verband kwam er eerder af, en bekijken ruwe dataset - hoe doe ik dit netjes in R? of is het toch gebruikelijk dit in en copy van de ruwe dataset in excel te doen?)
# geef deze dataset de nieuwe naam "mscore"
# activeer dataset "mscore"
attach(mscore)
# mscore - aantal poten gescoord, M-scores en behandelingen
(tab <- table(M_D0, Treatment,useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# mscore - aantal poten in iedere behandelgroep
(tab <- table(M_D0, Treatment,useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# mscore - aantal koeien in iedere behandelgroep
# mscore - aantal koeien met 2 poten en aantal koeien met 1 poot in de studie
# de-activeer dataset "mscore"
detach(mscore)
# activeer dataset "fup"
attach(fup)
# follow-up - aantal poten start (dag 0) in iedere behandelgroep
(tab <- table(M_D0, Treatment,useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# follow-up - aantal poten dag 21 in iedere behandelgroep
(tab <- table(M_D21, Treatment,useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# follow-up - aantal poten dag 35 in iedere behandelgroep
(tab <- table(M_D35, Treatment,useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# de-activeer dataset "fup"
detach(fup)
### Treatment outcomes ###
# activeer dataset "mscore"
attach(mscore)
# mscore - transitiematrix D0-D10
(tab <- table(M_D10, Treatment, M_D0, useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
addmargins(tab)
rm(tab)
# mscore - transitiematrix per farm D0-D10
(tab <- table(M_D10, Treatment, Farm, M_D0, useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
addmargins(tab)
rm(tab)
# mscore - create variable 'clinical improvement on D10 (imp_D10)'
# de-activeer dataset "mscore"
detach(mscore)
# activeer dataset "fup"
attach(fup)
# follow-up - transitiematrix D0-D21
(tab <- table(M_D21, Treatment, M_D0))
addmargins(tab)
round(prop.table(tab, 1), 3)
addmargins(tab)
#HV toevoegen proporties per subtabel
prop.table(tab, c(1,3)) # de 1 geeft aan totaliseren in de regel en de 3 betekent per M_D0
rm(tab)
# follow-up - transitiematrix D0-D35
(tab <- table(M_D35, Treatment, M_D0))
addmargins(tab)
round(prop.table(tab, 1), 3)
addmargins(tab)
rm(tab)
# fup - create variable 'clinical improvement on D21 (imp_D21)'
# fup - pearson chi squared test of independence for treatment and clinical improvement on D21
chisq.test(Treatment, imp_D21)
# fup - create variable 'clinical improvement on D35 (imp_D35)'
# fup - pearson chi squared test of independence for treatment and clinical improvement on D35
chisq.test(Treatment, imp_D35)
# de-activeer dataset "fup"
detach(fup)
# activeer dataset "whp"
attach(whp)
# whp - overview wound healing progress outcome D0-D10 per treatment group
(tab <- table(WHP_D010, Treatment, useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# whp - overview wound healing progress outcome D0-D3 per treatment group
(tab <- table(WHP_D03, Treatment, useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# whp - overview wound healing progress outcome D3-D7 per treatment group
(tab <- table(WHP_D37, Treatment, useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# whp - overview wound healing progress outcome D7-D10 per treatment group
(tab <- table(WHP_D710, Treatment, useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# whp - overview wound healing progress outcome D0-D10 per treatment group for each D0 M-score
(tab <- table(WHP_D010_rec, Treatment, M_D0, useNA = "ifany"))
addmargins(tab)
round(prop.table(tab, 1), 3)
rm(tab)
# de-activeer dataset "whp"
detach(whp)
###### Analyse ######
#load library linear mixed effects (multivariabele logistische regressie met farm als random effect)
library(lme4)
### UNIVARIABLE logistische regressie met farm als random effect ###
## M-score ##
# univariable logistische regressie clinical improvement mscore D10 & treatment group
fit <- glmer(imp_D10 ~ Treatment + (1|Farm), family="binomial", data=mscore)
#HV doe eens: table(mscore$imp_D10, mscore$Farm, mscore$Treatment)
# dan zie je dat in de de aantallen erg laag zijn met score 0 -> probleem met schatten
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie clinical improvement mscore D10 & Mscore D0
fit <- glmer(imp_D10 ~ factor(M_D0) + (1|Farm), family="binomial", data=mscore)
#HV doe eens: table(mscore$imp_D10, mscore$Farm, mscore$M_D0)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:4,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie clinical improvement mscore D10 & time under bandage
fit <- glmer(imp_D10 ~ Bandage_rec + (1|Farm), family="binomial", data=mscore)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
## wound healing progress met unable to score als 'not improved' ##
# univariable logistische regressie wound healing progress between D0 and D10 & treatment group
fit <- glmer(WHP_D010_rec0 ~ Treatment + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & Mscore D0
fit <- glmer(WHP_D010_rec0 ~ factor(M_D0) + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:4,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & time under bandage
fit <- glmer(WHP_D010_rec0 ~ Bandage_rec + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & wound healing progress between D0 and D3
fit <- glmer(WHP_D010_rec0 ~ WHP_D03_rec0 + (1|Farm), family="binomial", data=whp)
summary(fit)
#HV in de drop1 wordt een warning gegeven. Error in drop1.merMod(fit, test = "Chisq") :
# number of rows in use has changed: remove missing values?
# dit betekenis dat wrs WHP_D03_rec0 een missing heeft zie summary(whp) of summary(whp$WHP_D03_rec0)
# inderdaad 1 NA, dus het model inclusief en exclusief deze variabele zijn gebaseerd op een
# verschillend aantal records.
fit <- glmer(WHP_D010_rec0 ~ WHP_D03_rec0 + (1|Farm), family="binomial", data=whp[!is.na(whp$WHP_D03_rec0),])
#HV met data=whp[!is.na(whp$WHP_D03_rec0),] worden alleen de records geselecteerd zonder NA in deze variabele
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & wound healing progress between D3 and D7
#HV toegevoegd: data=whp[!is.na(whp$WHP_D37_rec0),])
fit <- glmer(WHP_D010_rec0 ~ WHP_D37_rec0 + (1|Farm), family="binomial", data=whp[!is.na(whp$WHP_D37_rec0),])
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & wound healing progress between D7 and D10
fit <- glmer(WHP_D010_rec0 ~ WHP_D710_rec0 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
## wound healing progress met unable to score als 'improved' ##
# univariable logistische regressie wound healing progress between D0 and D10 & treatment group
fit <- glmer(WHP_D010_rec1 ~ Treatment + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & Mscore D0
fit <- glmer(WHP_D010_rec1 ~ factor(M_D0) + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:4,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & time under bandage
fit <- glmer(WHP_D010_rec1 ~ Bandage_rec + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & wound healing progress between D0 and D3
fit <- glmer(WHP_D010_rec1 ~ WHP_D03_rec1 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & wound healing progress between D3 and D7
fit <- glmer(WHP_D010_rec1 ~ WHP_D37_rec1 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
# univariable logistische regressie wound healing progress between D0 and D10 & wound healing progress between D7 and D10
fit <- glmer(WHP_D010_rec1 ~ WHP_D710_rec1 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:3,]), 2)
rm(fit, beta, ci)
### MULTIVARIABLE linear mixed effects model, with farm as random effect, manual backwards elimination based on lowest AIC ###
## M-score ##
#full LME model
#HV omdat in de univariabele modellen er al een probleem was met de lage aantallen, zal zeker met
# multivariabele modellen het probleem alleen maar groter worden!
fit <- glmer(imp_D10 ~ Treatment + factor(M_D0) + Bandage_rec + (1|Farm), family="binomial", data=mscore)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:6,]), 2)
rm(fit, beta, ci)
# Alle single term deletion AIC zijn hoger dan AIC model en we willen eigenlijk een lagere AIC
# Minst hoge AIC tov model is voor M-score D0: delta AIC = 1.99 dit is kleiner dan |2| dus M-score D0 uit model halen omdat je zo minder variabelen nodig hebt voor je model, ook al past het iets minder goed (omdat de AIC groter wordt)
# LME model zonder (M-score D0)
fit <- glmer(imp_D10 ~ Treatment + Bandage_rec + (1|Farm), family="binomial", data=mscore)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:4,]), 2)
rm(fit, beta, ci)
# AIC model is van 151.0 naar 153.0 gestegen, binnen grens van |2| dus ok ondanks slechtere fit
# Alle single term deletion AIC zijn groter dan model en we willen eigenlijk een lagere AIC
# Minst hoge AIC tov model is voor Bandage: delta AIC = 24.02 dit is groter dan |2| dus geen variabelen meer uit model halen
## wound healing progress met unable to score als 'not improved' ##
#full LME model
fit <- glmer(WHP_D010_rec0 ~ Treatment + factor(M_D0) + Bandage_rec + WHP_D03_rec0 + WHP_D37_rec0 + WHP_D710_rec0 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:9,]), 2)
rm(fit, beta, ci)
# variable met lowest single term deletion AIC is M-score D0 en lager dan AIC model
# LME model zonder (M-score D0)
fit <- glmer(WHP_D010_rec0 ~ Treatment + Bandage_rec + WHP_D03_rec0 + WHP_D37_rec0 + WHP_D710_rec0 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:7,]), 2)
rm(fit, beta, ci)
# variable met lowest single term deletion AIC is Bandage en lager dan AIC model
# LME model zonder (M-score D0 en Bandage)
fit <- glmer(WHP_D010_rec0 ~ Treatment + WHP_D03_rec0 + WHP_D37_rec0 + WHP_D710_rec0 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:6,]), 2)
rm(fit, beta, ci)
# Alle single term deletion AIC zijn hoger dan AIC model en we willen eigenlijk een lagere AIC
# Minst hoge AIC tov model is voor WP0-3: delta AIC = 0.51 dit is kleiner dan |2| dus WHP0-3 uit model halen omdat je zo minder variabelen nodig hebt voor je model, ook al past het iets minder goed (omdat de AIC groter wordt)
# LME model zonder (M-score D0, Bandage en WHP0-3)
fit <- glmer(WHP_D010_rec0 ~ Treatment + WHP_D37_rec0 + WHP_D710_rec0 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:5,]), 2)
rm(fit, beta, ci)
# ERROR! ik krijg geen single term deletion AIC meer en kan dus niet meer verder... :(
# komt dit omdat er voor de WHP0-3 missing values zijn? Er zijn daar nl enkele lege cellen omdat deze foto's ontbraken in de ruwe data.
## wound healing progress met unable to score als ' improved' ##
#full LME model
fit <- glmer(WHP_D010_rec1 ~ Treatment + factor(M_D0) + Bandage_rec + WHP_D03_rec1 + WHP_D37_rec1 + WHP_D710_rec1 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:9,]), 2)
rm(fit, beta, ci)
# variable met lowest single term deletion AIC is M-score D0 en lager dan AIC model
# LME model zonder (M-score D0)
fit <- glmer(WHP_D010_rec1 ~ Treatment + Bandage_rec + WHP_D03_rec1 + WHP_D37_rec1 + WHP_D710_rec1 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:7,]), 2)
rm(fit, beta, ci)
# variable met lowest single term deletion AIC is WHP3-7 en lager dan AIC model
# LME model zonder (M-score D0 en WHP3-7)
fit <- glmer(WHP_D010_rec1 ~ Treatment + Bandage_rec + WHP_D03_rec1 + WHP_D710_rec1 + (1|Farm), family="binomial", data=whp)
summary(fit)
drop1(fit, test= "Chisq")
beta <- exp(getME(fit, "beta"))
ci <- exp(confint(fit))
round(cbind(beta, ci[2:6,]), 2)
rm(fit, beta, ci)
# ERROR! ik krijg geen single term deletion AIC meer en kan dus niet meer verder... :(
# komt dit omdat er voor de WHP3-7 missing values zijn? Er zijn daar nl enkele lege cellen omdat deze foto's ontbraken in de ruwe data. |
library(Rphylip)
### Name: Rdnapars
### Title: R interface for dnapars
### Aliases: Rdnapars
### Keywords: phylogenetics inference parsimony
### ** Examples
## Not run:
##D data(primates)
##D tree<-Rdnapars(primates)
## End(Not run)
| /data/genthat_extracted_code/Rphylip/examples/Rdnapars.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 241 | r | library(Rphylip)
### Name: Rdnapars
### Title: R interface for dnapars
### Aliases: Rdnapars
### Keywords: phylogenetics inference parsimony
### ** Examples
## Not run:
##D data(primates)
##D tree<-Rdnapars(primates)
## End(Not run)
|
# Histogram of 1 week simulations
H1_sim_1W_histogram <- function(x){x %>% xts_tbl() %>%
gather(Simulation, Return, -date) %>%
group_by(Simulation) %>%
ggplot() + geom_histogram(aes(x = Return, color = Simulation, fill = Simulation), position = "stack", binwidth = 0.001, alpha=0.7) +
theme(legend.position = "none", panel.background = element_rect(fill = "white", colour = "grey50")) +
geom_vline(aes(xintercept=mean(Return)), color="blue", linetype="dashed", size=1) +
labs(title = "One Week Simulated Returns for STANLIB",
x = " Simulated Returns", y = "Count")} | /Final Project/code/H1_sim_1W_histogram.R | no_license | Richard19leigh93/Financial-Econometrics | R | false | false | 619 | r | # Histogram of 1 week simulations
H1_sim_1W_histogram <- function(x){x %>% xts_tbl() %>%
gather(Simulation, Return, -date) %>%
group_by(Simulation) %>%
ggplot() + geom_histogram(aes(x = Return, color = Simulation, fill = Simulation), position = "stack", binwidth = 0.001, alpha=0.7) +
theme(legend.position = "none", panel.background = element_rect(fill = "white", colour = "grey50")) +
geom_vline(aes(xintercept=mean(Return)), color="blue", linetype="dashed", size=1) +
labs(title = "One Week Simulated Returns for STANLIB",
x = " Simulated Returns", y = "Count")} |
#######################################################################################################*
# CORE-TRANSIENT FUNCTIONS *
#######################################################################################################*
# This script contains all of the functions used in the analyses that summarize
# core-transient data by site (and across sites).
#======================================================================================================*
# ---- GENERAL FUNCTIONS ----
#======================================================================================================*
# Standard error:
se = function(x) sd(x)/sqrt(length(x))
# Function to change date object to year:
getYear = function(date){
if (class(date)[1] == 'factor') date = as.POSIXlt(date)
return(as.numeric(format(date, '%Y')))
}
#######################################################################################################*
# ---- DATA PREPARATION ----
#######################################################################################################*
#======================================================================================================*
# ---- FUNCTIONS FOR DATA FORMATTING ----
#======================================================================================================*
#------------------------------------------------------------------------------------------------------*
# ---- Functions to modify a value in the data formatting table for a specific field ----
#------------------------------------------------------------------------------------------------------*
dataFormattingTableFieldUpdate = function(datasetID, Field, Value){
rowIndex = which(dataFormattingTable$dataset_ID == datasetID)
if (is.factor(dataFormattingTable[,Field])) {
dataFormattingTable[,Field] = as.character(dataFormattingTable[,Field])
dataFormattingTable[rowIndex, Field] = Value
dataFormattingTable[,Field] = factor(dataFormattingTable[,Field])
} else {
dataFormattingTable[rowIndex, Field] = Value
}
return(dataFormattingTable[,Field])
}
# This function fills in numeric summary values for the cleaned raw dataset
# in the data formatting table.
dataFormattingTableUpdate = function(datasetID, datasetFinal){
rowIndex = which(dataFormattingTable$dataset_ID == datasetID)
year = as.numeric(substr(datasetFinal$date, 1, 4))
dataFormattingTable[,'Raw_nRecs'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_nRecs',
nrow(datasetFinal))
dataFormattingTable[,'Raw_nTime'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_nTime',
length(unique(datasetFinal$date)))
dataFormattingTable[,'Raw_nSpecies'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_nSpecies',
length(unique(datasetFinal$species)))
dataFormattingTable[,'Raw_nSites'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_nSites',
length(unique(datasetFinal$site)))
dataFormattingTable[,'Raw_start_year'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_start_year',
min(year))
dataFormattingTable[,'Raw_end_year'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_end_year',
max(year))
if(dataFormattingTable[rowIndex, 'countFormat'] == 'count'){
if(dataFormattingTable[rowIndex, 'subannualTgrain'] == 'Y'){
datasetFinal$date = as.numeric(format(datasetFinal$date, '%Y'))
}
siteYearCounts = ddply(datasetFinal, .(site, date),
summarize, tCount = sum(count))
dataFormattingTable[,'Raw_Mean_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_Mean_Individuals_perSiteYear',
mean(siteYearCounts$tCount))
dataFormattingTable[,'Raw_Min_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_Min_Individuals_perSiteYear',
min(siteYearCounts$tCount))
dataFormattingTable[,'Raw_Max_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_Max_Individuals_perSiteYear',
max(siteYearCounts$tCount))
siteYearCounts = ddply(datasetFinal, .(site, date),
summarize, tCount = sum(count))
} else {
dataFormattingTable[,'Raw_Mean_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_Mean_Individuals_perSiteYear','NA')
dataFormattingTable[,'Raw_Min_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_Min_Individuals_perSiteYear','NA')
dataFormattingTable[,'Raw_Max_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_Max_Individuals_perSiteYear','NA')
}
return(dataFormattingTable)
}
# This function fills in numeric summary values for the formatted dataset subsetted
# to standardized levels of spatial and temporal subsampling in the data formatting table.
dataFormattingTableUpdateFinished = function(datasetID, datasetFinal){
rowIndex = which(dataFormattingTable$dataset_ID == datasetID)
year = as.numeric(substr(datasetFinal$year, 1, 4))
dataFormattingTable[,'Formatted_nRecs'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_nRecs',
nrow(datasetFinal))
dataFormattingTable[,'Formatted_nTime'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_nTime',
length(unique(datasetFinal$year)))
dataFormattingTable[,'Formatted_nSpecies'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_nSpecies',
length(unique(datasetFinal$species)))
dataFormattingTable[,'Formatted_nSites'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_nSites',
length(unique(datasetFinal$site)))
dataFormattingTable[,'Formatted_start_year'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_start_year',
min(year))
dataFormattingTable[,'Formatted_end_year'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_end_year',
max(year))
if(dataFormattingTable[rowIndex, 'countFormat'] == 'count'){
if(dataFormattingTable[rowIndex, 'subannualTgrain'] == 'Y'){
datasetFinal$date = as.numeric(format(datasetFinal$date, '%Y'))
}
siteYearCounts = ddply(datasetFinal, .(site, year),
summarize, tCount = sum(count))
dataFormattingTable[,'Formatted_Mean_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_Mean_Individuals_perSiteYear',
mean(siteYearCounts$tCount))
dataFormattingTable[,'Formatted_Min_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_Min_Individuals_perSiteYear',
min(siteYearCounts$tCount))
dataFormattingTable[,'Formatted_Max_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_Max_Individuals_perSiteYear',
max(siteYearCounts$tCount))
} else {
dataFormattingTable[,'Formatted_Mean_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_Mean_Individuals_perSiteYear','NA')
dataFormattingTable[,'Formatted_Min_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_Min_Individuals_perSiteYear','NA')
dataFormattingTable[,'Formatted_Max_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_Max_Individuals_perSiteYear','NA')
}
return(dataFormattingTable)
}
#======================================================================================================*
# ---- FUNCTIONS for making proportional occurrence dataframes ----
#======================================================================================================*
#------------------------------------------------------------------------------------------------------*
# Function to round dataset to lat and long and summarize data by the new rounded values:
#------------------------------------------------------------------------------------------------------*
datasetRoundLatLong = function(dataset, accuracy){
# Split LL column into a dataframe of lat and long:
siteLL = data.frame(do.call(rbind, strsplit(as.character(dataset$site), '_' )))
# Round to chosen accuracy:
roundLL = function(LatORLong){
LatorLongVector = as.numeric(as.character(siteLL[,LatORLong]))
return(round_any(LatorLongVector, accuracy, f = floor))
}
lat = roundLL(1)
long = roundLL(2)
# Paste to a new site vector and return dataset:
dataset$analysisSite = paste(lat, long, sep = '_')
return(dataset)
}
#------------------------------------------------------------------------------------------------------*
# Function to summarize data to a given site level:
#------------------------------------------------------------------------------------------------------*
getNestedSiteDataset = function(dataset, siteGrain, dataDescription){
#NOTE THAT siteGrain CAN BE EITHER A CHARACTER STRING NAMING THE
#CATEGORICAL SCALE FOR ANALYSIS, OR A NUMERIC VALUE INDICATING THE
#SPATIAL SCALE IN DEGREES LAT-LONG.
# If sites are not nested (lat-long or categorical):
if(dataDescription$spatial_scale_variable == 'N'){
dataset$analysisSite = dataset$site
return(dataset)
} else {
# If sites are defined by lat-longs:
if(dataDescription$LatLong_sites == 'Y')
{dataset = datasetRoundLatLong(dataset, accuracy = siteGrain)
return(dataset)} else {
# If sites are categorical but nested ...
# Get the definition for a site and store each level as separate columns:
# siteLevels = strsplit(siteGrain, '_')[[1]]
# Convert site data to a table and add names based on site definition:
siteTable = read.table(text = as.character(dataset$site), sep = '_', stringsAsFactors = F)
siteDefinition = dataDescription$Raw_siteUnit
names(siteTable) = strsplit(as.character(siteDefinition), '_')[[1]]
# Get pre-determined site levels and maintain site based on match:
siteLevels = strsplit(siteGrain, '_')[[1]]
dataset$analysisSite = do.call('paste', c(siteTable[siteLevels], sep = '_'))
return(dataset)
}}
}
#------------------------------------------------------------------------------------------------------*
# Nested time dataset (spatial nesting is categorical, not lat-long):
#------------------------------------------------------------------------------------------------------*
getNestedTimeDataset = function(dataset, temporalGrain, dataDescription){
if(temporalGrain != 'year'){ # if analysis will be performed at fine temporal grain
dataset$date = as.POSIXct(strptime(dataset$date, '%Y-%m-%d'))
year = as.numeric(format(dataset$date, '%Y'))
day = as.numeric(strftime(dataset$date, format = '%j'))
week = trunc(day/7)+1
biweek = trunc(week/2)+1
month = as.numeric(format(dataset$date, '%m'))
bimonth = trunc(month/2)+1
season = ifelse(day < 80 |day >= 356, 1,
ifelse(day >= 80 & day < 172, 2,
ifelse(day >= 172 & day < 266, 3, 4)))
# Combine time data into a dataframe:
timeFrame = cbind(day, week, biweek, month, bimonth, season, year)
# # Add analysisDate column at a given temporal grain and year:
dataset$analysisDate = paste(year, timeFrame[,temporalGrain], sep ='_')
dataset$year = year
# Summarize data to the new time scale:
} else if (class(dataset$date)[1] == 'numeric') { # if analysis will be performed at annual resolution
dataset$analysisDate = dataset$date
dataset$year = dataset$date
} else {
dataset$analysisDate = as.numeric(format(dataset$date, "%Y"))
dataset$year = as.numeric(format(dataset$date, "%Y"))
}
return(dataset)
}
#------------------------------------------------------------------------------------------------------*
# Wrapper function for nested data (if necessary):
#------------------------------------------------------------------------------------------------------*
getNestedDataset = function(dataset, siteGrain, temporalGrain, dataDescription){
datasetSpace = getNestedSiteDataset(dataset, siteGrain, dataDescription)
datasetTime = getNestedTimeDataset(datasetSpace, temporalGrain, dataDescription)
return(datasetTime)
}
#------------------------------------------------------------------------------------------------------*
# ---- SUBSET DATASET TO SITES WITH ADEQUATE TIME SAMPLES AND RICHNESS ----
#======================================================================================================*
richnessYearSubsetFun = function(dataset, spatialGrain, temporalGrain, minNTime = 10, minSpRich = 10, dataDescription){
dataset1 = getNestedDataset(dataset, spatialGrain, temporalGrain, dataDescription)
# Get the number of years and species richness for each site:
siteSr_nTime = ddply(dataset1, .(analysisSite), summarize,
sr = length(unique(species)),
nTime = length(unique(analysisDate)))
# Subset to sites with a high enough species richness and year samples:
goodSites = siteSr_nTime$analysisSite[siteSr_nTime$sr >= minSpRich &
siteSr_nTime$nTime >= minNTime]
# If statement to return if there are no good sites:
if(length(goodSites) == 0) {
return(print('No acceptable sites, rethink site definitions or temporal scale'))}
else {
# Match good sites and the dataframe:
outFrame = na.omit(dataset1[dataset1$analysisSite %in% goodSites,])
return(outFrame)
}}
#------------------------------------------------------------------------------------------------------*
# ---- CALCULATE the Z-threshold ----
#------------------------------------------------------------------------------------------------------*
# The Z-threshold refers to the maximum number of temporal subsamples that provide the most sites with greater than a minimum number of years of data. The following function returns this value.
# Note: Prior to running "zFinder", you must have already run the function "richnessYearSubsetFun" for which "inData" is the function's output.
zFinder = function(inData, minNTime = 10, proportionalThreshold = .5){
# Calculate the number of temporal samples per site and year:
spaceTime = ddply(inData, .(analysisSite, analysisDate),
summarize, temporalSubsamples = length(unique(date)))
spaceTime$siteTime = paste(spaceTime$analysisSite, spaceTime$analysisDate, sep = '_')
# zPossible is a potential threshold of temporal subsampling:
zPossible = sort(unique(spaceTime$temporalSubsamples))
# Create an empty matrix to store summary data for possible Z-values:
zMatrix = matrix(ncol = 3, nrow = length(zPossible),
dimnames = list(NULL, c('z','nSiteTimes','propSites')))
# Create an empty list of sites to store site names of good sites at a given Z-value:
zSiteList = list(length = length(zPossible))
# For loop to populate the zMatrix and zSite Lists:
for(i in 1:length(zPossible)){
# Subset spaceTime to subsamples greater than or equal to z for a given site:
spaceTimeGTEz = spaceTime[spaceTime$temporalSubsamples >= zPossible[i], ]
# Determine sites and siteTimes in which the temporal subsampling was greater
# than equal to z for at least the minimum time samples:
yearCountBySiteGTEz = count(spaceTimeGTEz, analysisSite)
goodSites = yearCountBySiteGTEz$analysisSite[yearCountBySiteGTEz$n >= minNTime]
goodSiteTimes = spaceTimeGTEz$siteTime[spaceTimeGTEz$analysisSite %in% goodSites]
# Construct matrix of z values, the number and proportion of siteYears with that level of subsampling:
zMatrix[i,'z'] = zPossible[i]
zMatrix[i, 'nSiteTimes'] = length(goodSiteTimes)
zMatrix[i, 'propSites'] = length(goodSiteTimes)/length(unique(spaceTime$siteTime))
# List the names of goodSites for a given Z-value:
zSiteList[[i]] = goodSiteTimes
# Name each list entry by the Z-value
names(zSiteList)[[i]] = zPossible[i]
}
# Make a dataframe
zTable = data.frame(zMatrix)
# Get the highest Z value with at least minNYears:
z = max(zTable$z[zTable$propSites >= proportionalThreshold])
# Get the names of the site Times that satisfy Z:
zSiteTimes = factor(zSiteList[[as.character(z)]])
# Return the z value and site names
return(list (z = z, zSiteTimes = zSiteTimes, zTable = data.frame(zMatrix)))
}
#------------------------------------------------------------------------------------------------------*
# ---- Subset data based on z-threshold ----
#------------------------------------------------------------------------------------------------------*
dataZSubFun = function(inData, minNTime = 10, proportionalThreshold = .5){
# Get z-values
zOutput = zFinder(inData, minNTime, proportionalThreshold)
z = zOutput[[1]]
# Add a siteTime column:
data = inData
data$siteTime = paste(data$analysisSite, data$analysisDate, sep ='_')
# Subset data to just the site-timeSamples that meet the z-threshold for temporal subsampling:
dataZSub = subset(data, siteTime %in% zOutput$zSiteTimes)
# Add a column that concatenates siteTime and date:
dataZSub$siteTimeDate = paste(dataZSub$siteTime, dataZSub$date, sep = '_')
# For each siteID and time sample, sample z number of sub-sampling events:
siteTimes = unique(dataZSub$siteTime)
events = list(length = z*length(siteTimes))
for(i in 1:length(siteTimes)){
# Subset to a given siteYear:
siteDateSub = subset(dataZSub, siteTime == siteTimes[i])
# Get unique frame of siteYearDates
siteDates = unique(siteDateSub$siteTimeDate)
# Sample the events by the Z-value:
siteTimeDateSample = sample(unique(siteDateSub$siteTimeDate), size = z)
events[[i]] = subset(siteDateSub, siteTimeDate %in% siteTimeDateSample )
}
# Subset data to sampled events:
dataZSub = rbind.fill(events)
return(dataZSub)
}
#------------------------------------------------------------------------------------------------------*
# ---- CALCULATE the W-threshold ----
#------------------------------------------------------------------------------------------------------*
# The W-threshold refers to the maximum number of spatial subsamples that provide a given proportion of siteYears.
# This returns a w-value and a list of siteDates that satisfy this value:
# Note: Prior to running the "wFinder", you must have already run the function "richnessYearSubsetFun".
wFinder = function(inData, minNTime = 10, proportionalThreshold = .5){
# Get data subset by Z-value:
dataZSub = dataZSubFun(inData, minNTime, proportionalThreshold)
# Summarize number of spatial subsamples per siteTime :
spaceTime = ddply(dataZSub, .(siteTimeDate), summarize,
spatialSubsamples = length(unique(site)))
# Determine the number of siteTimes present:
nSiteTimeDates = nrow(spaceTime)
# Get possible values for w:
wPossible = sort(unique(spaceTime$spatialSubsamples))
# Create an empty matrix to store summary data for possible W-values:
wMatrix = matrix(ncol = 3, nrow = length(wPossible),
dimnames = list(NULL, c('w','nSiteTimeDates','propSiteTimeDates')))
# Create an empty list of sites to store site names of good sites at a given W-value:
wSiteTimeDateList = list(length = length(wPossible))
# For loop to populate the wMatrix and wSite Lists:
for(i in 1:length(wPossible)){
# Calculate the years in which the subsamplings was greater than equal to w for a given site:
siteTimeDateGTEw = subset(spaceTime, spatialSubsamples>=wPossible[i])$siteTimeDate
# Construct matrix of w values, the number and proportion of sites:
wMatrix[i,'w'] = wPossible[i]
wMatrix[i, 'nSiteTimeDates'] = length(siteTimeDateGTEw)
wMatrix[i, 'propSiteTimeDates'] = length(siteTimeDateGTEw)/nrow(spaceTime)
# List the names of siteYears for a given W-value:
wSiteTimeDateList[[i]] = siteTimeDateGTEw
# Name each list entry by the Z-value
names(wSiteTimeDateList)[[i]] = wPossible[i]
}
# Get the highest W value that includes >= proportionalThreshold of siteYears:
wFrame = data.frame(wMatrix)
w = max(wFrame$w[wFrame$propSiteTimeDates >= proportionalThreshold])
# Get the names of the siteYearDates that satisfy W:
wSiteTimeDates = factor(wSiteTimeDateList[[as.character(w)]])
# Return list of necessary items for the subset:
outList = list(dataZSub, wSiteTimeDates, w)
names(outList) = c('dataZSub', 'wSiteTimeDates', 'w')
return(outList)
}
#------------------------------------------------------------------------------------------------------*
# ---- Subset the data based on w and z values ----
#------------------------------------------------------------------------------------------------------*
wzSubsetFun = function(inData, minNTime = 10, proportionalThreshold = .5){
wOut = wFinder(inData, minNTime, proportionalThreshold)
# Subset data
dataW = subset(wOut$dataZSub, siteTimeDate %in% wOut$wSiteTimeDates)
# For each siteYearDate, sample w sampling events:
siteTimeDateNames = unique(dataW$siteTimeDate)
events = list(length = wOut$w*length(siteTimeDateNames))
for(i in 1:length(siteTimeDateNames)){
siteTimeDateSub = subset(dataW, siteTimeDate == siteTimeDateNames[i])
UniqueSubsites = unique(siteTimeDateSub$site)
sampledSubsites = sample(UniqueSubsites, wOut$w, replace = F)
events[[i]] = subset(siteTimeDateSub, site %in% sampledSubsites)
}
outSampledData = rbind.fill(events)
# Keep only pertinent columns:
outData = dplyr::select(outSampledData, one_of(c('analysisSite', 'analysisDate','species', 'count')))
names(outData)[1:2] = c('site', 'year')
# Return the subsetted data frame:
return(outData)
}
#------------------------------------------------------------------------------------------------------*
# ---- Function for getting the subsetted dataset ----
#------------------------------------------------------------------------------------------------------*
# The subsetted dataset is limited to sites above a minimum overall species richness and number of years and each site year is subset to w and z
# Prior to running this function, make sure to run the richnessYearSubsetFun, if there are no good sites, the proportional occurrence frame cannot be made!
subsetDataFun = function(dataset, datasetID, spatialGrain, temporalGrain,
minNTime = 10, minSpRich = 10,
proportionalThreshold = .5,
dataDescription){
inData = richnessYearSubsetFun(dataset, spatialGrain, temporalGrain, minNTime, minSpRich, dataDescription)
subsettedData = wzSubsetFun(inData, minNTime, proportionalThreshold)
outData = data.frame(datasetID = datasetID, site = subsettedData$site, year = subsettedData$year,
species = subsettedData$species, count = subsettedData$count)
return(outData)
}
#------------------------------------------------------------------------------------------------------*
# ---- Make the proportional occurrence frame ----
#------------------------------------------------------------------------------------------------------*
propOccFun = function(subsettedData){
subsettedData1 = subset(subsettedData, count > 0)
spTime = ddply(subsettedData1, .(datasetID, site, species), summarize,
spTime = length(unique(year)))
siteTime = ddply(subsettedData1, .(site), summarize,
siteTime = length(unique(year)))
spSiteTime = merge(spTime, siteTime)
propOcc = data.frame(datasetID = datasetID, site = spSiteTime$site,
species = spSiteTime$species,
propOcc = spSiteTime$spTime/spSiteTime$siteTime)
return(propOcc)
}
#------------------------------------------------------------------------------------------------------*
# The following function is used to create and explore and extract the species richness and number of time samples for a site.
#------------------------------------------------------------------------------------------------------*
# Note: because data are subset to w and z, some sites will no longer have a species richness or number of time samples greater than the decided upon minimum
siteSummaryFun = function(subsettedData){
subsettedData1 = subset(subsettedData, count > 0)
ddply(subsettedData1, .(datasetID, site), summarize,
spRich = length(unique(species)),
nTime = length(unique(year)),
meanAbundance = sum(count)/length(unique(year)))
}
#------------------------------------------------------------------------------------------------------*
# Write files
#------------------------------------------------------------------------------------------------------*
# Note: This will not work if the temporal or spatial sampling is inadequate! Make sure to run richnessYearSubsetFun prior to to test whether the spatial and temporal scales are adequate!
writePropOccSiteSummary = function(subsettedData){
propOcc = propOccFun(subsettedData)
siteSummary = siteSummaryFun(subsettedData)
datasetID = unique(siteSummary$datasetID)
write.csv(propOcc,
paste('data/propOcc_datasets/propOcc_', datasetID, '.csv', sep = ''), row.names = F)
write.csv(siteSummary,
paste('data/siteSummaries/siteSummary_', datasetID, '.csv', sep = ''), row.names = F)
}
#######################################################################################################*
#######################################################################################################*
# ---- END DATA PREPARATION ----
#######################################################################################################*
#######################################################################################################*
# ---- BEGIN DATA ANALYSIS ----
#######################################################################################################*
#======================================================================================================*
# ---- GET DATA ----
#======================================================================================================*
# The following function reads in the data and returns a list of the proportional
# occurence data frame, the site summary (sp richness and number of time samples
# for a given site), system, and taxa:
getDataList = function(datasetID){
propOcc = read.csv(paste('data/propOcc_datasets/propOcc_',
datasetID, '.csv', sep = ''))
siteSummary = read.csv(paste('data/siteSummaries/siteSummary_',
datasetID, '.csv', sep = ''))
metaData = subset(read.csv('data_formatting_table.csv'),
dataset_ID == datasetID)
system = metaData$system
taxa = metaData$taxa
return(list(propOcc = propOcc, siteSummary = siteSummary,
system = system, taxa = taxa))
}
#======================================================================================================*
# ---- BIMODALILITY ----
#======================================================================================================*
# NOTE: For these functions to run, occProp, Ntime, and outSummary frames must
# already be loaded and the "Sampling summary" lines of code MUST be run in the
# dashboard!
#
# Functions:
# - bimodality: Calculates the bimodality metric developed by Allen and Ethan.
# Inputs: Site
# Outputs: A single numeric bimodality value
#
# - random.bimodality: The bimodality for a random sample of the dataset.
# Inputs: Site
# Outputs: A single numeric bimodality value
#
# - p.bimodal: Randomization test for bimodality. Runs n-reps of the random.
# bimodality function and compares the actual bimodality with the
# distribution of random values.
# Inputs: Site, number of reps
# Outputs: A single numeric p-value.
#
# - occs.scaled: Scales occupancy from [0,1] to (0,1) -- because beta distribution
# inputs must not contain 0's or 1's.
# Inputs: Site
# Outputs: A numeric vector of scaled occupancy values.
#
# - fitbeta: Calculates the shape parameters for a fitted beta distribution.
# Inputs: Site
# Outputs: A vector of shape parameters (alpha and beta).
#
#------------------------------------------------------------------------------------------------------*
# ---- Function for calculating bimodality ----
#======================================================================================================*
# Note 1: Bimodality is the fraction of species occurring at either end of
# occupancy distribution. We use a randomization approach to test whether the
# distribution is significantly bimodal.
# Note 2: To run this function the number of time samples for the site (nt) needs
# to be specified. This is done so in the wrapper summary table function.
# In these functions, propOcc refers to a vector of occupancy values for the
# species at a single site, and nTime is the number of time samples (typically
# years) as an integer.
bimodalityFun = function(propOcc_or_RandomPropOcc, nTime){
occs = propOcc_or_RandomPropOcc
maxvar = var(c(rep(1/nTime,floor(length(occs)/2)),
rep(1,ceiling(length(occs)/2))))
return(var(occs)/maxvar)
}
# Random sample of occurences for a given site (to be used in randomization, below):
randomOccsFun = function(propOcc, nTime){
# Generate a table (data frame) of occProps and frequencies:
occPropTable = data.frame(table(propOcc))
# Create a data frame of possible occProps:
occPropDummyTable = data.frame(propOcc = seq(1/nTime, 1, by = 1/nTime))
# Merge the two data frames:
combinedTable = merge(occPropDummyTable, occPropTable, all.x = T)
combinedTable[is.na(combinedTable[,2]),2]<-0 # Replace NA's with zeros
# Reassign bin values randomly and add to frame:
newFreq = sample(combinedTable$Freq, length(combinedTable[,1]))
randomTable = data.frame(combinedTable[,1], newFreq)
randomOccs=unlist(apply(randomTable, 1, function(x) rep(x[1], x[2])))
return(as.vector(randomOccs))
}
# Randomization test for bimodality:
pBimodalFun = function(propOcc,nTime, reps){
actualBimod = bimodalityFun(propOcc, nTime)
# For loop to get random bimodality values
randomBimod = numeric(length = reps)
for (i in 1:reps){
randomBimod[i] = bimodalityFun(randomOccsFun(propOcc, nTime), nTime)
}
# Calculate the p-value (proportion of sites with higher bimodality than the
# actual bimodality value):
sum(randomBimod >= actualBimod)/(reps + 1)
}
#------------------------------------------------------------------------------------------------------*
# ---- Function for fitting the beta distribution ----
#======================================================================================================*
# Required packages = MASS
# Scale occupancy from [0,1] to (0,1) following Smithson and Verkuilen 2006
# Note: See supplemental at
# http://supp.apa.org/psycarticles/supplemental/met_11_1_54/met_11_1_54_supp.html
occsScaledFun = function(occProp){
x = occProp# [as.character(occProp$site) == site,'occ']
n = length(x)
s = .5
(x*(n-1)+s)/n
}
# Fit beta distribution:
fitBeta = function(occProp, nTime) {
bi = bimodalityFun(occProp,nTime)
if (bi != 0 & !is.na(bi))
{occs = occsScaledFun(occProp)
shape.params = tryCatch( #############################TRYCATCH
{
fitdistr(occs, "beta",
list(shape1 = 2, shape2 = 2)) ###
},
error = function(cond) {
message(paste("Error in fitdistr:", cond)) ###
fitdistr(occs, "beta", list(shape1 = 1, shape2 = 1)) ###alternative starting params
},
warning = function(cond) {
message(cond) ###
}
)
################### END EDITING #########
return(as.vector(shape.params$estimate))
} else c(NA, NA)
}
#======================================================================================================*
# ---- CORE-TRANSIENT MODE STATISTICS ----
#======================================================================================================*
# Proportion of samples that are core or transient:
# For these functions, mode argument takes either "core" or "transient".
# The threshold argument specifies the maximum occupancy to be considered
# transient, and therefore (1 - threshold) is the minimum occupancy to be
# considered core.
modeProp = function(propOcc, mode, threshold) {
if (mode == 'core') sum(propOcc >= 1 - threshold)/length(propOcc)
else if (mode == 'transient') sum(propOcc <= threshold)/length(propOcc)
else return(print('Invalid mode'))
}
# Randomization test for a given mode (is the proportion of samples in core or
# transient greater than we would expect by random chance):
pModeFun = function(propOcc, nTime, mode, threshold, reps){
actualProp = modeProp(propOcc, mode, threshold)
# For loop to get random frequncies in the mode:
randomProps = numeric(length = reps)
for (i in 1:reps){
randomProps[i] = modeProp(randomOccsFun(propOcc, nTime), mode, threshold)
}
# Calculate the p-value (proportion of sites with higher frequency than the
# actual bimodality value):
pVal = sum(randomProps >= actualProp)/(reps + 1)
return(pVal)
}
#======================================================================================================*
# ---- DATASET SUMMARY FUNCTIONS ----
#======================================================================================================*
# NOTE: For these functions to run, occProp, Ntime, and outSummary frames must
# already be loaded!
#
# Functions:
# - summaryStats: Produces summary sampling data for one site.
# Inputs: Site and the threshold value for core and transient designation.
# Threshold is the value of occupancy below which a species is
# considered transient, and therefore (1 - threshold) is the min
# value for a species to be considered core.
# Outputs: A one-row dataframe with dataset ID, site ID, threshold used,
# the system, taxa, # of time samples, total, core, and transient richness
# proportion of core and transient species, and the average proportion of
# occurance across species.
#
# - ctSummary: A partial-wrapper function that runs and compiles bimodality test
# statistics across sites and adds it to the sampling summary frame above.
# Inputs: Site and the threshold value for core and transient designation.
# Outputs: A one-row dataframe with the summary output above and bimodality
# (Allen + Ethan formula), randomization-derived p-value, and the alpha and
# beta shape parameters for the beta distibution.
#
#------------------------------------------------------------------------------------------------------*
# ---- Function to generate summary of sampling ----
#======================================================================================================*
# Summary stats for all sites in a dataset:
summaryStatsFun = function(datasetID, threshold, reps){
# Get data:
dataList = getDataList(datasetID)
sites = dataList$siteSummary$site
# Get summary stats for each site:
outList = list(length = length(sites))
for(i in 1:length(sites)){
propOcc = subset(dataList$propOcc, site == sites[i])$propOcc
siteSummary = subset(dataList$siteSummary, site == sites[i])
nTime = siteSummary$nTime
spRichTotal = siteSummary$spRich
spRichCore = length(propOcc[propOcc >= 1 - threshold])
spRichTrans = length(propOcc[propOcc <= threshold])
propCore = spRichCore/spRichTotal
propCore_pVal = pModeFun(propOcc, nTime, 'core', threshold, reps)
propTrans = spRichTrans/spRichTotal
propTrans_pVal = pModeFun(propOcc, nTime, 'transient', threshold, reps)
meanAbundance = siteSummary$meanAbundance
mu = mean(propOcc)
bimodality = bimodalityFun(propOcc, nTime)
pBimodal = pBimodalFun(propOcc, nTime, reps)
#################### EDITING ############
betaParms = fitBeta(propOcc, nTime)
alpha = betaParms[1]
beta = betaParms[2]
outList[[i]] = data.frame(datasetID, site = sites[i],
system = dataList$system, taxa = dataList$taxa,
nTime, spRichTotal, spRichCore, spRichTrans,
propCore, propCore_pVal, propTrans, propTrans_pVal,
meanAbundance, mu, bimodality, pBimodal, alpha, beta)
}
return(rbind.fill(outList))
}
#------------------------------------------------------------------------------------------------------*
# ---- MAKE SUMMARY STATS OF ANY NEW PROPOCC FILES ----
#======================================================================================================*
require(MASS)
require(plyr)
addNewSummariesFun = function(threshold, reps, write = FALSE, allNew = FALSE){
if (allNew == FALSE &
file.exists('output/tabular_data/core-transient_summary.csv')) {
currentSummaryData = read.csv('output/tabular_data/core-transient_summary.csv')
currentDatasetIDs = unique(currentSummaryData$datasetID)
} else {
currentSummaryData = c()
currentDatasetIDs = c()
}
propOcc_datasets = list.files('data/propOcc_datasets')
# The following gets the integer values for the datasetID's from
# "propOcc_##.csv" or "propOcc_###.csv":
propOccDatasetIDs = read.table(text =
as.character(read.table(text = propOcc_datasets,
sep ='_')[,2]),sep ='.')[,1]
# Find dataset IDs that are not yet summarized:
newDatasetIDs = propOccDatasetIDs[!propOccDatasetIDs %in% currentDatasetIDs]
# For loop to extract summary stats for new datasetIDs
outList = list(length = length(newDatasetIDs))
for(i in 1:length(newDatasetIDs)){
outList[[i]] = summaryStatsFun(newDatasetIDs[i], threshold, reps)
}
newSummaryData = rbind.fill(outList)
updatedSummaryData = rbind(currentSummaryData, newSummaryData)
updatedSummaryData = updatedSummaryData[order(updatedSummaryData$datasetID),]
if (write) {
write.csv(updatedSummaryData,
'output/tabular_data/core-transient_summary.csv', row.names = F)
}
return(updatedSummaryData)
}
#======================================================================================================*
# ---- PLOT FUNCTIONS ----
#======================================================================================================*
# NOTE: For these functions to run, occProp, Ntime, and outSummary frames must
# already be loaded!
#------------------------------------------------------------------------------------------------------*
# ---- Custom themes ----
#======================================================================================================*
# Theme for plot with no background grid:
theme_CT_NoGrid = function(base_size = 12) {
theme(
axis.text.x = element_text(size=14, color = 'black',vjust = 1, hjust = .5),
axis.text.y = element_text(size=12, color = 'black', hjust = 1),
axis.title.x = element_text(size = 18, vjust = -1),
axis.title.y = element_text(size = 18, vjust = 1.5),
title = element_text(size=16, vjust = 1),
legend.title=element_blank(),
axis.line = element_line(color = 'black'),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.margin = unit(c(2,.5,1.5,.5), 'lines'))
}
theme_CT_Grid = function(base_size = 12) {
theme(axis.text = element_text(size=14, color = 'black'),
axis.title.x = element_text(size = 18, vjust = -1),
axis.title.y = element_text(size = 18, vjust = 1),
title = element_text(size=18, vjust = -0.5),
axis.line = element_line(colour = 'black'),
panel.background = element_blank(),
panel.grid.major = element_line(size = .5, color = 'gray90'),
panel.grid.minor = element_line(size = .25, color = 'gray90'),
plot.margin = unit(c(0,.5,1.5,.5), 'lines'))
}
#------------------------------------------------------------------------------------------------------*
# ---- Function to make core-transient histogram ----
#======================================================================================================*
# This function creates a ct histogram for one site:
ct.hist = function(site) {
# Get data, subset to a given site:
occProp = occProp[as.character(occProp$site) == site,]
ct = ct[as.character(ct$site) == site, ]
# Plot labels:
main = paste('Site ', site, paste('(', as.character(ct$system),
', ', as.character(ct$taxa),')', sep = ''))
sub = bquote(b ~ '=' ~ .(round(ct$bimodal, 2)) ~ ' '~
P['b'] ~ '=' ~ .(round(ct$bimodal.p, 3)) ~ ' '~
mu ~ '=' ~ .(round(ct$mu, 2)) ~ ' '~
t ~ '=' ~ .(ct$nTime))
sub2 = bquote(alpha ~ '=' ~ .(round(ct$alpha, 3)) ~ ' '~
beta ~ '=' ~ .(round(ct$beta, 3)))
# Set band width, breaks and possible values of x for the histogram:
bw = 1/nTime#(max(occProp$occ)-min(occProp$occ))/10
brks = seq(min(occProp$occ), max(occProp$occ),bw)
x = seq(1/ct$nTime,1-1/ct$nTime, .01)
beta.df = data.frame(x = x, y = dbeta(x, ct$alpha, ct$beta))
# Plot data:
out.plot = ggplot(occProp, aes(x=occ)) +
geom_histogram(aes(y = ..density..), binwidth = bw, breaks = brks, right = F,
fill = 'gray', color = 1) +
xlim(1/nTime, 1) +
geom_line(data = beta.df, aes(x = x, y = y), color = 'red') +
# stat_function(fun = function(x) dbeta(x, ct$alpha, ct$beta), color = 'red') +
# Add labels:
xlab('Proportion of temporal samples') + ylab('Density') +
ggtitle(bquote(atop(.(main), atop(.(sub), atop(.(sub2)))))) +
# Add themes:
theme(axis.text = element_text(size=14, color = 1),
axis.title.x = element_text(vjust = -1),
axis.title.y = element_text(vjust = 2),
title = element_text(size=16, vjust = -1),
axis.line = element_line(colour = "black"),
panel.background = element_blank(),
plot.margin = unit(c(.5,.5,1.5,1), "lines"))
return(out.plot)
}
| /scripts/R-scripts/CT_backup.R | no_license | ethanwhite/core-transient | R | false | false | 43,181 | r | #######################################################################################################*
# CORE-TRANSIENT FUNCTIONS *
#######################################################################################################*
# This script contains all of the functions used in the analyses that summarize
# core-transient data by site (and across sites).
#======================================================================================================*
# ---- GENERAL FUNCTIONS ----
#======================================================================================================*
# Standard error:
se = function(x) sd(x)/sqrt(length(x))
# Function to change date object to year:
getYear = function(date){
if (class(date)[1] == 'factor') date = as.POSIXlt(date)
return(as.numeric(format(date, '%Y')))
}
#######################################################################################################*
# ---- DATA PREPARATION ----
#######################################################################################################*
#======================================================================================================*
# ---- FUNCTIONS FOR DATA FORMATTING ----
#======================================================================================================*
#------------------------------------------------------------------------------------------------------*
# ---- Functions to modify a value in the data formatting table for a specific field ----
#------------------------------------------------------------------------------------------------------*
dataFormattingTableFieldUpdate = function(datasetID, Field, Value){
rowIndex = which(dataFormattingTable$dataset_ID == datasetID)
if (is.factor(dataFormattingTable[,Field])) {
dataFormattingTable[,Field] = as.character(dataFormattingTable[,Field])
dataFormattingTable[rowIndex, Field] = Value
dataFormattingTable[,Field] = factor(dataFormattingTable[,Field])
} else {
dataFormattingTable[rowIndex, Field] = Value
}
return(dataFormattingTable[,Field])
}
# This function fills in numeric summary values for the cleaned raw dataset
# in the data formatting table.
dataFormattingTableUpdate = function(datasetID, datasetFinal){
rowIndex = which(dataFormattingTable$dataset_ID == datasetID)
year = as.numeric(substr(datasetFinal$date, 1, 4))
dataFormattingTable[,'Raw_nRecs'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_nRecs',
nrow(datasetFinal))
dataFormattingTable[,'Raw_nTime'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_nTime',
length(unique(datasetFinal$date)))
dataFormattingTable[,'Raw_nSpecies'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_nSpecies',
length(unique(datasetFinal$species)))
dataFormattingTable[,'Raw_nSites'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_nSites',
length(unique(datasetFinal$site)))
dataFormattingTable[,'Raw_start_year'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_start_year',
min(year))
dataFormattingTable[,'Raw_end_year'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_end_year',
max(year))
if(dataFormattingTable[rowIndex, 'countFormat'] == 'count'){
if(dataFormattingTable[rowIndex, 'subannualTgrain'] == 'Y'){
datasetFinal$date = as.numeric(format(datasetFinal$date, '%Y'))
}
siteYearCounts = ddply(datasetFinal, .(site, date),
summarize, tCount = sum(count))
dataFormattingTable[,'Raw_Mean_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_Mean_Individuals_perSiteYear',
mean(siteYearCounts$tCount))
dataFormattingTable[,'Raw_Min_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_Min_Individuals_perSiteYear',
min(siteYearCounts$tCount))
dataFormattingTable[,'Raw_Max_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_Max_Individuals_perSiteYear',
max(siteYearCounts$tCount))
siteYearCounts = ddply(datasetFinal, .(site, date),
summarize, tCount = sum(count))
} else {
dataFormattingTable[,'Raw_Mean_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_Mean_Individuals_perSiteYear','NA')
dataFormattingTable[,'Raw_Min_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_Min_Individuals_perSiteYear','NA')
dataFormattingTable[,'Raw_Max_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_Max_Individuals_perSiteYear','NA')
}
return(dataFormattingTable)
}
# This function fills in numeric summary values for the formatted dataset subsetted
# to standardized levels of spatial and temporal subsampling in the data formatting table.
dataFormattingTableUpdateFinished = function(datasetID, datasetFinal){
rowIndex = which(dataFormattingTable$dataset_ID == datasetID)
year = as.numeric(substr(datasetFinal$year, 1, 4))
dataFormattingTable[,'Formatted_nRecs'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_nRecs',
nrow(datasetFinal))
dataFormattingTable[,'Formatted_nTime'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_nTime',
length(unique(datasetFinal$year)))
dataFormattingTable[,'Formatted_nSpecies'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_nSpecies',
length(unique(datasetFinal$species)))
dataFormattingTable[,'Formatted_nSites'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_nSites',
length(unique(datasetFinal$site)))
dataFormattingTable[,'Formatted_start_year'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_start_year',
min(year))
dataFormattingTable[,'Formatted_end_year'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_end_year',
max(year))
if(dataFormattingTable[rowIndex, 'countFormat'] == 'count'){
if(dataFormattingTable[rowIndex, 'subannualTgrain'] == 'Y'){
datasetFinal$date = as.numeric(format(datasetFinal$date, '%Y'))
}
siteYearCounts = ddply(datasetFinal, .(site, year),
summarize, tCount = sum(count))
dataFormattingTable[,'Formatted_Mean_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_Mean_Individuals_perSiteYear',
mean(siteYearCounts$tCount))
dataFormattingTable[,'Formatted_Min_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_Min_Individuals_perSiteYear',
min(siteYearCounts$tCount))
dataFormattingTable[,'Formatted_Max_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_Max_Individuals_perSiteYear',
max(siteYearCounts$tCount))
} else {
dataFormattingTable[,'Formatted_Mean_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_Mean_Individuals_perSiteYear','NA')
dataFormattingTable[,'Formatted_Min_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_Min_Individuals_perSiteYear','NA')
dataFormattingTable[,'Formatted_Max_Individuals_perSiteYear'] =
dataFormattingTableFieldUpdate(datasetID, 'Formatted_Max_Individuals_perSiteYear','NA')
}
return(dataFormattingTable)
}
#======================================================================================================*
# ---- FUNCTIONS for making proportional occurrence dataframes ----
#======================================================================================================*
#------------------------------------------------------------------------------------------------------*
# Function to round dataset to lat and long and summarize data by the new rounded values:
#------------------------------------------------------------------------------------------------------*
datasetRoundLatLong = function(dataset, accuracy){
# Split LL column into a dataframe of lat and long:
siteLL = data.frame(do.call(rbind, strsplit(as.character(dataset$site), '_' )))
# Round to chosen accuracy:
roundLL = function(LatORLong){
LatorLongVector = as.numeric(as.character(siteLL[,LatORLong]))
return(round_any(LatorLongVector, accuracy, f = floor))
}
lat = roundLL(1)
long = roundLL(2)
# Paste to a new site vector and return dataset:
dataset$analysisSite = paste(lat, long, sep = '_')
return(dataset)
}
#------------------------------------------------------------------------------------------------------*
# Function to summarize data to a given site level:
#------------------------------------------------------------------------------------------------------*
getNestedSiteDataset = function(dataset, siteGrain, dataDescription){
#NOTE THAT siteGrain CAN BE EITHER A CHARACTER STRING NAMING THE
#CATEGORICAL SCALE FOR ANALYSIS, OR A NUMERIC VALUE INDICATING THE
#SPATIAL SCALE IN DEGREES LAT-LONG.
# If sites are not nested (lat-long or categorical):
if(dataDescription$spatial_scale_variable == 'N'){
dataset$analysisSite = dataset$site
return(dataset)
} else {
# If sites are defined by lat-longs:
if(dataDescription$LatLong_sites == 'Y')
{dataset = datasetRoundLatLong(dataset, accuracy = siteGrain)
return(dataset)} else {
# If sites are categorical but nested ...
# Get the definition for a site and store each level as separate columns:
# siteLevels = strsplit(siteGrain, '_')[[1]]
# Convert site data to a table and add names based on site definition:
siteTable = read.table(text = as.character(dataset$site), sep = '_', stringsAsFactors = F)
siteDefinition = dataDescription$Raw_siteUnit
names(siteTable) = strsplit(as.character(siteDefinition), '_')[[1]]
# Get pre-determined site levels and maintain site based on match:
siteLevels = strsplit(siteGrain, '_')[[1]]
dataset$analysisSite = do.call('paste', c(siteTable[siteLevels], sep = '_'))
return(dataset)
}}
}
#------------------------------------------------------------------------------------------------------*
# Nested time dataset (spatial nesting is categorical, not lat-long):
#------------------------------------------------------------------------------------------------------*
getNestedTimeDataset = function(dataset, temporalGrain, dataDescription){
if(temporalGrain != 'year'){ # if analysis will be performed at fine temporal grain
dataset$date = as.POSIXct(strptime(dataset$date, '%Y-%m-%d'))
year = as.numeric(format(dataset$date, '%Y'))
day = as.numeric(strftime(dataset$date, format = '%j'))
week = trunc(day/7)+1
biweek = trunc(week/2)+1
month = as.numeric(format(dataset$date, '%m'))
bimonth = trunc(month/2)+1
season = ifelse(day < 80 |day >= 356, 1,
ifelse(day >= 80 & day < 172, 2,
ifelse(day >= 172 & day < 266, 3, 4)))
# Combine time data into a dataframe:
timeFrame = cbind(day, week, biweek, month, bimonth, season, year)
# # Add analysisDate column at a given temporal grain and year:
dataset$analysisDate = paste(year, timeFrame[,temporalGrain], sep ='_')
dataset$year = year
# Summarize data to the new time scale:
} else if (class(dataset$date)[1] == 'numeric') { # if analysis will be performed at annual resolution
dataset$analysisDate = dataset$date
dataset$year = dataset$date
} else {
dataset$analysisDate = as.numeric(format(dataset$date, "%Y"))
dataset$year = as.numeric(format(dataset$date, "%Y"))
}
return(dataset)
}
#------------------------------------------------------------------------------------------------------*
# Wrapper function for nested data (if necessary):
#------------------------------------------------------------------------------------------------------*
getNestedDataset = function(dataset, siteGrain, temporalGrain, dataDescription){
datasetSpace = getNestedSiteDataset(dataset, siteGrain, dataDescription)
datasetTime = getNestedTimeDataset(datasetSpace, temporalGrain, dataDescription)
return(datasetTime)
}
#------------------------------------------------------------------------------------------------------*
# ---- SUBSET DATASET TO SITES WITH ADEQUATE TIME SAMPLES AND RICHNESS ----
#======================================================================================================*
richnessYearSubsetFun = function(dataset, spatialGrain, temporalGrain, minNTime = 10, minSpRich = 10, dataDescription){
dataset1 = getNestedDataset(dataset, spatialGrain, temporalGrain, dataDescription)
# Get the number of years and species richness for each site:
siteSr_nTime = ddply(dataset1, .(analysisSite), summarize,
sr = length(unique(species)),
nTime = length(unique(analysisDate)))
# Subset to sites with a high enough species richness and year samples:
goodSites = siteSr_nTime$analysisSite[siteSr_nTime$sr >= minSpRich &
siteSr_nTime$nTime >= minNTime]
# If statement to return if there are no good sites:
if(length(goodSites) == 0) {
return(print('No acceptable sites, rethink site definitions or temporal scale'))}
else {
# Match good sites and the dataframe:
outFrame = na.omit(dataset1[dataset1$analysisSite %in% goodSites,])
return(outFrame)
}}
#------------------------------------------------------------------------------------------------------*
# ---- CALCULATE the Z-threshold ----
#------------------------------------------------------------------------------------------------------*
# The Z-threshold refers to the maximum number of temporal subsamples that provide the most sites with greater than a minimum number of years of data. The following function returns this value.
# Note: Prior to running "zFinder", you must have already run the function "richnessYearSubsetFun" for which "inData" is the function's output.
zFinder = function(inData, minNTime = 10, proportionalThreshold = .5){
# Calculate the number of temporal samples per site and year:
spaceTime = ddply(inData, .(analysisSite, analysisDate),
summarize, temporalSubsamples = length(unique(date)))
spaceTime$siteTime = paste(spaceTime$analysisSite, spaceTime$analysisDate, sep = '_')
# zPossible is a potential threshold of temporal subsampling:
zPossible = sort(unique(spaceTime$temporalSubsamples))
# Create an empty matrix to store summary data for possible Z-values:
zMatrix = matrix(ncol = 3, nrow = length(zPossible),
dimnames = list(NULL, c('z','nSiteTimes','propSites')))
# Create an empty list of sites to store site names of good sites at a given Z-value:
zSiteList = list(length = length(zPossible))
# For loop to populate the zMatrix and zSite Lists:
for(i in 1:length(zPossible)){
# Subset spaceTime to subsamples greater than or equal to z for a given site:
spaceTimeGTEz = spaceTime[spaceTime$temporalSubsamples >= zPossible[i], ]
# Determine sites and siteTimes in which the temporal subsampling was greater
# than equal to z for at least the minimum time samples:
yearCountBySiteGTEz = count(spaceTimeGTEz, analysisSite)
goodSites = yearCountBySiteGTEz$analysisSite[yearCountBySiteGTEz$n >= minNTime]
goodSiteTimes = spaceTimeGTEz$siteTime[spaceTimeGTEz$analysisSite %in% goodSites]
# Construct matrix of z values, the number and proportion of siteYears with that level of subsampling:
zMatrix[i,'z'] = zPossible[i]
zMatrix[i, 'nSiteTimes'] = length(goodSiteTimes)
zMatrix[i, 'propSites'] = length(goodSiteTimes)/length(unique(spaceTime$siteTime))
# List the names of goodSites for a given Z-value:
zSiteList[[i]] = goodSiteTimes
# Name each list entry by the Z-value
names(zSiteList)[[i]] = zPossible[i]
}
# Make a dataframe
zTable = data.frame(zMatrix)
# Get the highest Z value with at least minNYears:
z = max(zTable$z[zTable$propSites >= proportionalThreshold])
# Get the names of the site Times that satisfy Z:
zSiteTimes = factor(zSiteList[[as.character(z)]])
# Return the z value and site names
return(list (z = z, zSiteTimes = zSiteTimes, zTable = data.frame(zMatrix)))
}
#------------------------------------------------------------------------------------------------------*
# ---- Subset data based on z-threshold ----
#------------------------------------------------------------------------------------------------------*
dataZSubFun = function(inData, minNTime = 10, proportionalThreshold = .5){
# Get z-values
zOutput = zFinder(inData, minNTime, proportionalThreshold)
z = zOutput[[1]]
# Add a siteTime column:
data = inData
data$siteTime = paste(data$analysisSite, data$analysisDate, sep ='_')
# Subset data to just the site-timeSamples that meet the z-threshold for temporal subsampling:
dataZSub = subset(data, siteTime %in% zOutput$zSiteTimes)
# Add a column that concatenates siteTime and date:
dataZSub$siteTimeDate = paste(dataZSub$siteTime, dataZSub$date, sep = '_')
# For each siteID and time sample, sample z number of sub-sampling events:
siteTimes = unique(dataZSub$siteTime)
events = list(length = z*length(siteTimes))
for(i in 1:length(siteTimes)){
# Subset to a given siteYear:
siteDateSub = subset(dataZSub, siteTime == siteTimes[i])
# Get unique frame of siteYearDates
siteDates = unique(siteDateSub$siteTimeDate)
# Sample the events by the Z-value:
siteTimeDateSample = sample(unique(siteDateSub$siteTimeDate), size = z)
events[[i]] = subset(siteDateSub, siteTimeDate %in% siteTimeDateSample )
}
# Subset data to sampled events:
dataZSub = rbind.fill(events)
return(dataZSub)
}
#------------------------------------------------------------------------------------------------------*
# ---- CALCULATE the W-threshold ----
#------------------------------------------------------------------------------------------------------*
# The W-threshold refers to the maximum number of spatial subsamples that provide a given proportion of siteYears.
# This returns a w-value and a list of siteDates that satisfy this value:
# Note: Prior to running the "wFinder", you must have already run the function "richnessYearSubsetFun".
wFinder = function(inData, minNTime = 10, proportionalThreshold = .5){
# Get data subset by Z-value:
dataZSub = dataZSubFun(inData, minNTime, proportionalThreshold)
# Summarize number of spatial subsamples per siteTime :
spaceTime = ddply(dataZSub, .(siteTimeDate), summarize,
spatialSubsamples = length(unique(site)))
# Determine the number of siteTimes present:
nSiteTimeDates = nrow(spaceTime)
# Get possible values for w:
wPossible = sort(unique(spaceTime$spatialSubsamples))
# Create an empty matrix to store summary data for possible W-values:
wMatrix = matrix(ncol = 3, nrow = length(wPossible),
dimnames = list(NULL, c('w','nSiteTimeDates','propSiteTimeDates')))
# Create an empty list of sites to store site names of good sites at a given W-value:
wSiteTimeDateList = list(length = length(wPossible))
# For loop to populate the wMatrix and wSite Lists:
for(i in 1:length(wPossible)){
# Calculate the years in which the subsamplings was greater than equal to w for a given site:
siteTimeDateGTEw = subset(spaceTime, spatialSubsamples>=wPossible[i])$siteTimeDate
# Construct matrix of w values, the number and proportion of sites:
wMatrix[i,'w'] = wPossible[i]
wMatrix[i, 'nSiteTimeDates'] = length(siteTimeDateGTEw)
wMatrix[i, 'propSiteTimeDates'] = length(siteTimeDateGTEw)/nrow(spaceTime)
# List the names of siteYears for a given W-value:
wSiteTimeDateList[[i]] = siteTimeDateGTEw
# Name each list entry by the Z-value
names(wSiteTimeDateList)[[i]] = wPossible[i]
}
# Get the highest W value that includes >= proportionalThreshold of siteYears:
wFrame = data.frame(wMatrix)
w = max(wFrame$w[wFrame$propSiteTimeDates >= proportionalThreshold])
# Get the names of the siteYearDates that satisfy W:
wSiteTimeDates = factor(wSiteTimeDateList[[as.character(w)]])
# Return list of necessary items for the subset:
outList = list(dataZSub, wSiteTimeDates, w)
names(outList) = c('dataZSub', 'wSiteTimeDates', 'w')
return(outList)
}
#------------------------------------------------------------------------------------------------------*
# ---- Subset the data based on w and z values ----
#------------------------------------------------------------------------------------------------------*
wzSubsetFun = function(inData, minNTime = 10, proportionalThreshold = .5){
wOut = wFinder(inData, minNTime, proportionalThreshold)
# Subset data
dataW = subset(wOut$dataZSub, siteTimeDate %in% wOut$wSiteTimeDates)
# For each siteYearDate, sample w sampling events:
siteTimeDateNames = unique(dataW$siteTimeDate)
events = list(length = wOut$w*length(siteTimeDateNames))
for(i in 1:length(siteTimeDateNames)){
siteTimeDateSub = subset(dataW, siteTimeDate == siteTimeDateNames[i])
UniqueSubsites = unique(siteTimeDateSub$site)
sampledSubsites = sample(UniqueSubsites, wOut$w, replace = F)
events[[i]] = subset(siteTimeDateSub, site %in% sampledSubsites)
}
outSampledData = rbind.fill(events)
# Keep only pertinent columns:
outData = dplyr::select(outSampledData, one_of(c('analysisSite', 'analysisDate','species', 'count')))
names(outData)[1:2] = c('site', 'year')
# Return the subsetted data frame:
return(outData)
}
#------------------------------------------------------------------------------------------------------*
# ---- Function for getting the subsetted dataset ----
#------------------------------------------------------------------------------------------------------*
# The subsetted dataset is limited to sites above a minimum overall species richness and number of years and each site year is subset to w and z
# Prior to running this function, make sure to run the richnessYearSubsetFun, if there are no good sites, the proportional occurrence frame cannot be made!
subsetDataFun = function(dataset, datasetID, spatialGrain, temporalGrain,
minNTime = 10, minSpRich = 10,
proportionalThreshold = .5,
dataDescription){
inData = richnessYearSubsetFun(dataset, spatialGrain, temporalGrain, minNTime, minSpRich, dataDescription)
subsettedData = wzSubsetFun(inData, minNTime, proportionalThreshold)
outData = data.frame(datasetID = datasetID, site = subsettedData$site, year = subsettedData$year,
species = subsettedData$species, count = subsettedData$count)
return(outData)
}
#------------------------------------------------------------------------------------------------------*
# ---- Make the proportional occurrence frame ----
#------------------------------------------------------------------------------------------------------*
propOccFun = function(subsettedData){
subsettedData1 = subset(subsettedData, count > 0)
spTime = ddply(subsettedData1, .(datasetID, site, species), summarize,
spTime = length(unique(year)))
siteTime = ddply(subsettedData1, .(site), summarize,
siteTime = length(unique(year)))
spSiteTime = merge(spTime, siteTime)
propOcc = data.frame(datasetID = datasetID, site = spSiteTime$site,
species = spSiteTime$species,
propOcc = spSiteTime$spTime/spSiteTime$siteTime)
return(propOcc)
}
#------------------------------------------------------------------------------------------------------*
# The following function is used to create and explore and extract the species richness and number of time samples for a site.
#------------------------------------------------------------------------------------------------------*
# Note: because data are subset to w and z, some sites will no longer have a species richness or number of time samples greater than the decided upon minimum
siteSummaryFun = function(subsettedData){
subsettedData1 = subset(subsettedData, count > 0)
ddply(subsettedData1, .(datasetID, site), summarize,
spRich = length(unique(species)),
nTime = length(unique(year)),
meanAbundance = sum(count)/length(unique(year)))
}
#------------------------------------------------------------------------------------------------------*
# Write files
#------------------------------------------------------------------------------------------------------*
# Note: This will not work if the temporal or spatial sampling is inadequate! Make sure to run richnessYearSubsetFun prior to to test whether the spatial and temporal scales are adequate!
writePropOccSiteSummary = function(subsettedData){
propOcc = propOccFun(subsettedData)
siteSummary = siteSummaryFun(subsettedData)
datasetID = unique(siteSummary$datasetID)
write.csv(propOcc,
paste('data/propOcc_datasets/propOcc_', datasetID, '.csv', sep = ''), row.names = F)
write.csv(siteSummary,
paste('data/siteSummaries/siteSummary_', datasetID, '.csv', sep = ''), row.names = F)
}
#######################################################################################################*
#######################################################################################################*
# ---- END DATA PREPARATION ----
#######################################################################################################*
#######################################################################################################*
# ---- BEGIN DATA ANALYSIS ----
#######################################################################################################*
#======================================================================================================*
# ---- GET DATA ----
#======================================================================================================*
# The following function reads in the data and returns a list of the proportional
# occurence data frame, the site summary (sp richness and number of time samples
# for a given site), system, and taxa:
getDataList = function(datasetID){
propOcc = read.csv(paste('data/propOcc_datasets/propOcc_',
datasetID, '.csv', sep = ''))
siteSummary = read.csv(paste('data/siteSummaries/siteSummary_',
datasetID, '.csv', sep = ''))
metaData = subset(read.csv('data_formatting_table.csv'),
dataset_ID == datasetID)
system = metaData$system
taxa = metaData$taxa
return(list(propOcc = propOcc, siteSummary = siteSummary,
system = system, taxa = taxa))
}
#======================================================================================================*
# ---- BIMODALILITY ----
#======================================================================================================*
# NOTE: For these functions to run, occProp, Ntime, and outSummary frames must
# already be loaded and the "Sampling summary" lines of code MUST be run in the
# dashboard!
#
# Functions:
# - bimodality: Calculates the bimodality metric developed by Allen and Ethan.
# Inputs: Site
# Outputs: A single numeric bimodality value
#
# - random.bimodality: The bimodality for a random sample of the dataset.
# Inputs: Site
# Outputs: A single numeric bimodality value
#
# - p.bimodal: Randomization test for bimodality. Runs n-reps of the random.
# bimodality function and compares the actual bimodality with the
# distribution of random values.
# Inputs: Site, number of reps
# Outputs: A single numeric p-value.
#
# - occs.scaled: Scales occupancy from [0,1] to (0,1) -- because beta distribution
# inputs must not contain 0's or 1's.
# Inputs: Site
# Outputs: A numeric vector of scaled occupancy values.
#
# - fitbeta: Calculates the shape parameters for a fitted beta distribution.
# Inputs: Site
# Outputs: A vector of shape parameters (alpha and beta).
#
#------------------------------------------------------------------------------------------------------*
# ---- Function for calculating bimodality ----
#======================================================================================================*
# Note 1: Bimodality is the fraction of species occurring at either end of
# occupancy distribution. We use a randomization approach to test whether the
# distribution is significantly bimodal.
# Note 2: To run this function the number of time samples for the site (nt) needs
# to be specified. This is done so in the wrapper summary table function.
# In these functions, propOcc refers to a vector of occupancy values for the
# species at a single site, and nTime is the number of time samples (typically
# years) as an integer.
bimodalityFun = function(propOcc_or_RandomPropOcc, nTime){
occs = propOcc_or_RandomPropOcc
maxvar = var(c(rep(1/nTime,floor(length(occs)/2)),
rep(1,ceiling(length(occs)/2))))
return(var(occs)/maxvar)
}
# Random sample of occurences for a given site (to be used in randomization, below):
randomOccsFun = function(propOcc, nTime){
# Generate a table (data frame) of occProps and frequencies:
occPropTable = data.frame(table(propOcc))
# Create a data frame of possible occProps:
occPropDummyTable = data.frame(propOcc = seq(1/nTime, 1, by = 1/nTime))
# Merge the two data frames:
combinedTable = merge(occPropDummyTable, occPropTable, all.x = T)
combinedTable[is.na(combinedTable[,2]),2]<-0 # Replace NA's with zeros
# Reassign bin values randomly and add to frame:
newFreq = sample(combinedTable$Freq, length(combinedTable[,1]))
randomTable = data.frame(combinedTable[,1], newFreq)
randomOccs=unlist(apply(randomTable, 1, function(x) rep(x[1], x[2])))
return(as.vector(randomOccs))
}
# Randomization test for bimodality:
pBimodalFun = function(propOcc,nTime, reps){
actualBimod = bimodalityFun(propOcc, nTime)
# For loop to get random bimodality values
randomBimod = numeric(length = reps)
for (i in 1:reps){
randomBimod[i] = bimodalityFun(randomOccsFun(propOcc, nTime), nTime)
}
# Calculate the p-value (proportion of sites with higher bimodality than the
# actual bimodality value):
sum(randomBimod >= actualBimod)/(reps + 1)
}
#------------------------------------------------------------------------------------------------------*
# ---- Function for fitting the beta distribution ----
#======================================================================================================*
# Required packages = MASS
# Scale occupancy from [0,1] to (0,1) following Smithson and Verkuilen 2006
# Note: See supplemental at
# http://supp.apa.org/psycarticles/supplemental/met_11_1_54/met_11_1_54_supp.html
occsScaledFun = function(occProp){
x = occProp# [as.character(occProp$site) == site,'occ']
n = length(x)
s = .5
(x*(n-1)+s)/n
}
# Fit beta distribution:
fitBeta = function(occProp, nTime) {
bi = bimodalityFun(occProp,nTime)
if (bi != 0 & !is.na(bi))
{occs = occsScaledFun(occProp)
shape.params = tryCatch( #############################TRYCATCH
{
fitdistr(occs, "beta",
list(shape1 = 2, shape2 = 2)) ###
},
error = function(cond) {
message(paste("Error in fitdistr:", cond)) ###
fitdistr(occs, "beta", list(shape1 = 1, shape2 = 1)) ###alternative starting params
},
warning = function(cond) {
message(cond) ###
}
)
################### END EDITING #########
return(as.vector(shape.params$estimate))
} else c(NA, NA)
}
#======================================================================================================*
# ---- CORE-TRANSIENT MODE STATISTICS ----
#======================================================================================================*
# Proportion of samples that are core or transient:
# For these functions, mode argument takes either "core" or "transient".
# The threshold argument specifies the maximum occupancy to be considered
# transient, and therefore (1 - threshold) is the minimum occupancy to be
# considered core.
modeProp = function(propOcc, mode, threshold) {
if (mode == 'core') sum(propOcc >= 1 - threshold)/length(propOcc)
else if (mode == 'transient') sum(propOcc <= threshold)/length(propOcc)
else return(print('Invalid mode'))
}
# Randomization test for a given mode (is the proportion of samples in core or
# transient greater than we would expect by random chance):
pModeFun = function(propOcc, nTime, mode, threshold, reps){
actualProp = modeProp(propOcc, mode, threshold)
# For loop to get random frequncies in the mode:
randomProps = numeric(length = reps)
for (i in 1:reps){
randomProps[i] = modeProp(randomOccsFun(propOcc, nTime), mode, threshold)
}
# Calculate the p-value (proportion of sites with higher frequency than the
# actual bimodality value):
pVal = sum(randomProps >= actualProp)/(reps + 1)
return(pVal)
}
#======================================================================================================*
# ---- DATASET SUMMARY FUNCTIONS ----
#======================================================================================================*
# NOTE: For these functions to run, occProp, Ntime, and outSummary frames must
# already be loaded!
#
# Functions:
# - summaryStats: Produces summary sampling data for one site.
# Inputs: Site and the threshold value for core and transient designation.
# Threshold is the value of occupancy below which a species is
# considered transient, and therefore (1 - threshold) is the min
# value for a species to be considered core.
# Outputs: A one-row dataframe with dataset ID, site ID, threshold used,
# the system, taxa, # of time samples, total, core, and transient richness
# proportion of core and transient species, and the average proportion of
# occurance across species.
#
# - ctSummary: A partial-wrapper function that runs and compiles bimodality test
# statistics across sites and adds it to the sampling summary frame above.
# Inputs: Site and the threshold value for core and transient designation.
# Outputs: A one-row dataframe with the summary output above and bimodality
# (Allen + Ethan formula), randomization-derived p-value, and the alpha and
# beta shape parameters for the beta distibution.
#
#------------------------------------------------------------------------------------------------------*
# ---- Function to generate summary of sampling ----
#======================================================================================================*
# Summary stats for all sites in a dataset:
summaryStatsFun = function(datasetID, threshold, reps){
# Get data:
dataList = getDataList(datasetID)
sites = dataList$siteSummary$site
# Get summary stats for each site:
outList = list(length = length(sites))
for(i in 1:length(sites)){
propOcc = subset(dataList$propOcc, site == sites[i])$propOcc
siteSummary = subset(dataList$siteSummary, site == sites[i])
nTime = siteSummary$nTime
spRichTotal = siteSummary$spRich
spRichCore = length(propOcc[propOcc >= 1 - threshold])
spRichTrans = length(propOcc[propOcc <= threshold])
propCore = spRichCore/spRichTotal
propCore_pVal = pModeFun(propOcc, nTime, 'core', threshold, reps)
propTrans = spRichTrans/spRichTotal
propTrans_pVal = pModeFun(propOcc, nTime, 'transient', threshold, reps)
meanAbundance = siteSummary$meanAbundance
mu = mean(propOcc)
bimodality = bimodalityFun(propOcc, nTime)
pBimodal = pBimodalFun(propOcc, nTime, reps)
#################### EDITING ############
betaParms = fitBeta(propOcc, nTime)
alpha = betaParms[1]
beta = betaParms[2]
outList[[i]] = data.frame(datasetID, site = sites[i],
system = dataList$system, taxa = dataList$taxa,
nTime, spRichTotal, spRichCore, spRichTrans,
propCore, propCore_pVal, propTrans, propTrans_pVal,
meanAbundance, mu, bimodality, pBimodal, alpha, beta)
}
return(rbind.fill(outList))
}
#------------------------------------------------------------------------------------------------------*
# ---- MAKE SUMMARY STATS OF ANY NEW PROPOCC FILES ----
#======================================================================================================*
require(MASS)
require(plyr)
addNewSummariesFun = function(threshold, reps, write = FALSE, allNew = FALSE){
if (allNew == FALSE &
file.exists('output/tabular_data/core-transient_summary.csv')) {
currentSummaryData = read.csv('output/tabular_data/core-transient_summary.csv')
currentDatasetIDs = unique(currentSummaryData$datasetID)
} else {
currentSummaryData = c()
currentDatasetIDs = c()
}
propOcc_datasets = list.files('data/propOcc_datasets')
# The following gets the integer values for the datasetID's from
# "propOcc_##.csv" or "propOcc_###.csv":
propOccDatasetIDs = read.table(text =
as.character(read.table(text = propOcc_datasets,
sep ='_')[,2]),sep ='.')[,1]
# Find dataset IDs that are not yet summarized:
newDatasetIDs = propOccDatasetIDs[!propOccDatasetIDs %in% currentDatasetIDs]
# For loop to extract summary stats for new datasetIDs
outList = list(length = length(newDatasetIDs))
for(i in 1:length(newDatasetIDs)){
outList[[i]] = summaryStatsFun(newDatasetIDs[i], threshold, reps)
}
newSummaryData = rbind.fill(outList)
updatedSummaryData = rbind(currentSummaryData, newSummaryData)
updatedSummaryData = updatedSummaryData[order(updatedSummaryData$datasetID),]
if (write) {
write.csv(updatedSummaryData,
'output/tabular_data/core-transient_summary.csv', row.names = F)
}
return(updatedSummaryData)
}
#======================================================================================================*
# ---- PLOT FUNCTIONS ----
#======================================================================================================*
# NOTE: For these functions to run, occProp, Ntime, and outSummary frames must
# already be loaded!
#------------------------------------------------------------------------------------------------------*
# ---- Custom themes ----
#======================================================================================================*
# Theme for plot with no background grid:
theme_CT_NoGrid = function(base_size = 12) {
theme(
axis.text.x = element_text(size=14, color = 'black',vjust = 1, hjust = .5),
axis.text.y = element_text(size=12, color = 'black', hjust = 1),
axis.title.x = element_text(size = 18, vjust = -1),
axis.title.y = element_text(size = 18, vjust = 1.5),
title = element_text(size=16, vjust = 1),
legend.title=element_blank(),
axis.line = element_line(color = 'black'),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.margin = unit(c(2,.5,1.5,.5), 'lines'))
}
theme_CT_Grid = function(base_size = 12) {
theme(axis.text = element_text(size=14, color = 'black'),
axis.title.x = element_text(size = 18, vjust = -1),
axis.title.y = element_text(size = 18, vjust = 1),
title = element_text(size=18, vjust = -0.5),
axis.line = element_line(colour = 'black'),
panel.background = element_blank(),
panel.grid.major = element_line(size = .5, color = 'gray90'),
panel.grid.minor = element_line(size = .25, color = 'gray90'),
plot.margin = unit(c(0,.5,1.5,.5), 'lines'))
}
#------------------------------------------------------------------------------------------------------*
# ---- Function to make core-transient histogram ----
#======================================================================================================*
# This function creates a ct histogram for one site:
ct.hist = function(site) {
# Get data, subset to a given site:
occProp = occProp[as.character(occProp$site) == site,]
ct = ct[as.character(ct$site) == site, ]
# Plot labels:
main = paste('Site ', site, paste('(', as.character(ct$system),
', ', as.character(ct$taxa),')', sep = ''))
sub = bquote(b ~ '=' ~ .(round(ct$bimodal, 2)) ~ ' '~
P['b'] ~ '=' ~ .(round(ct$bimodal.p, 3)) ~ ' '~
mu ~ '=' ~ .(round(ct$mu, 2)) ~ ' '~
t ~ '=' ~ .(ct$nTime))
sub2 = bquote(alpha ~ '=' ~ .(round(ct$alpha, 3)) ~ ' '~
beta ~ '=' ~ .(round(ct$beta, 3)))
# Set band width, breaks and possible values of x for the histogram:
bw = 1/nTime#(max(occProp$occ)-min(occProp$occ))/10
brks = seq(min(occProp$occ), max(occProp$occ),bw)
x = seq(1/ct$nTime,1-1/ct$nTime, .01)
beta.df = data.frame(x = x, y = dbeta(x, ct$alpha, ct$beta))
# Plot data:
out.plot = ggplot(occProp, aes(x=occ)) +
geom_histogram(aes(y = ..density..), binwidth = bw, breaks = brks, right = F,
fill = 'gray', color = 1) +
xlim(1/nTime, 1) +
geom_line(data = beta.df, aes(x = x, y = y), color = 'red') +
# stat_function(fun = function(x) dbeta(x, ct$alpha, ct$beta), color = 'red') +
# Add labels:
xlab('Proportion of temporal samples') + ylab('Density') +
ggtitle(bquote(atop(.(main), atop(.(sub), atop(.(sub2)))))) +
# Add themes:
theme(axis.text = element_text(size=14, color = 1),
axis.title.x = element_text(vjust = -1),
axis.title.y = element_text(vjust = 2),
title = element_text(size=16, vjust = -1),
axis.line = element_line(colour = "black"),
panel.background = element_blank(),
plot.margin = unit(c(.5,.5,1.5,1), "lines"))
return(out.plot)
}
|
library(SNPRelate)
input_file <- "---"
output_file <- "---"
snpgdsVCF2GDS(input_file, output_file, method="biallelic.only") | /Andmetöötlus ja -analüüs/Quach_2016/q16_vcfist_gds.R | no_license | ralf-tambets/bachelor-thesis | R | false | false | 123 | r | library(SNPRelate)
input_file <- "---"
output_file <- "---"
snpgdsVCF2GDS(input_file, output_file, method="biallelic.only") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SEAtables.R
\name{tableHydro}
\alias{tableHydro}
\title{Creates Hydrocast table}
\usage{
tableHydro(filename, saveLoc = "~/Desktop")
}
\arguments{
\item{filename}{Path of the file to be read in}
\item{saveLoc}{Folder to save the output}
}
\description{
Creates Hydrocast table
}
\examples{
tableHydro()
}
| /man/tableHydro.Rd | no_license | benharden27/sea | R | false | true | 384 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SEAtables.R
\name{tableHydro}
\alias{tableHydro}
\title{Creates Hydrocast table}
\usage{
tableHydro(filename, saveLoc = "~/Desktop")
}
\arguments{
\item{filename}{Path of the file to be read in}
\item{saveLoc}{Folder to save the output}
}
\description{
Creates Hydrocast table
}
\examples{
tableHydro()
}
|
estep <- function(x,mean, std, weight) {
i=3
posterior = matrix(,150,3)
prior = matrix(,150,3)
for (k in 1:i)
prior[,k] = apply(x, 1, function(x) exp(-(1/2) * (t(x) - mean[k,]) %*% MASS::ginv(std[[k]]) %*%
t(t(x) - mean[k,]))/sqrt(det(2 * pi * std[[k]])))
for ( k in 1:i)
posterior[,k] = (weight[k]*prior[,k])/rowSums(t((t(prior)*weight)))
logpost <- log(posterior)
loglikelihood <- sum(logpost,2)
list("loglikelihood" = loglikelihood,
"posterior" = posterior)
}
| /estep.R | no_license | NikitaD1/Fitting-a-Gaussian-mixture-model-using-Expectation-Maximization-Algorithm | R | false | false | 562 | r | estep <- function(x,mean, std, weight) {
i=3
posterior = matrix(,150,3)
prior = matrix(,150,3)
for (k in 1:i)
prior[,k] = apply(x, 1, function(x) exp(-(1/2) * (t(x) - mean[k,]) %*% MASS::ginv(std[[k]]) %*%
t(t(x) - mean[k,]))/sqrt(det(2 * pi * std[[k]])))
for ( k in 1:i)
posterior[,k] = (weight[k]*prior[,k])/rowSums(t((t(prior)*weight)))
logpost <- log(posterior)
loglikelihood <- sum(logpost,2)
list("loglikelihood" = loglikelihood,
"posterior" = posterior)
}
|
#' @title Code to the Minor Allele
#'
#' @description This function ensures the genotype is coded to the minor allele.
#'
#' @param Z A genotype matrix (dosage matrix) - rows correspond to
#' individuals and columns correspond to SNPs. Use 'NA' for missing values.
#'
#' @details This function ensures the minor allele frequency is < 0.5. Note
#' this assumes the genotype matrix is coded 0/1/2.
#'
#' If Z is not a matrix it will be coerced with as.matrix.
#'
#' @return a recoded genotype matrix (dosage matrix)
#'
codeToMinor <- function(Z) {
if (!is.matrix(Z)) {
Z <- as.matrix(Z)
}
maf <- colMeans(Z, na.rm=TRUE)/2.0
idx <- which(maf > .50)
Z[ , idx] <- 2 - Z[ , idx]
return(Z)
}
| /R/codeToMinor.R | no_license | DavisBrian/Firth | R | false | false | 723 | r | #' @title Code to the Minor Allele
#'
#' @description This function ensures the genotype is coded to the minor allele.
#'
#' @param Z A genotype matrix (dosage matrix) - rows correspond to
#' individuals and columns correspond to SNPs. Use 'NA' for missing values.
#'
#' @details This function ensures the minor allele frequency is < 0.5. Note
#' this assumes the genotype matrix is coded 0/1/2.
#'
#' If Z is not a matrix it will be coerced with as.matrix.
#'
#' @return a recoded genotype matrix (dosage matrix)
#'
codeToMinor <- function(Z) {
if (!is.matrix(Z)) {
Z <- as.matrix(Z)
}
maf <- colMeans(Z, na.rm=TRUE)/2.0
idx <- which(maf > .50)
Z[ , idx] <- 2 - Z[ , idx]
return(Z)
}
|
#################################################################################
## The functions in this program are designed to reduce the cost of inverting
## a matrix. There are two functions in this program:
##
## makeCacheMatrix: makes a list of functions whose elements:
## 1. create a matrix
## 2. cache the matrix
## 3. invert the matrix
## 4. cache the inverted matrix
##
## cacheSolve: Requires the list from makeCacheMatrix to function
## Returns the inverted matrix, using cache if it exists or
## performing the "solve" function if cache does not exist
## makeCacheMatrix --------------------------------------------------------------
## Args: x: Must be a matrix
## Returns: List with following functions:
## (1) setMatrix - caches matrix
## (2) getMatrix - reads matrix from cache
## (3) setInverse - caches inverted matrix
## (4) getInverse - reads inverted matrix from cache
makeCacheMatrix <- function(x = matrix()) {
## define the four return functions
s <- NULL
setMatrix <- function(y) {
x <<- y
s <<- NULL
} ## end function setMatrix
getMatrix <- function() x
setInverse <- function(solve) s <<- solve
getInverse <- function() s
## return the list with the four functions ordered properly
list(setMatrix = setMatrix, getMatrix = getMatrix,
setInverse = setInverse, getInverse = getInverse)
} ## end function makeCacheMatrix
## cacheSolve -------------------------------------------------------------------
## Args: List output from makeCacheMatrix
## Returns: inverted matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
s <- x$getInverse()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
## If not cached, calculate the inverse & return
data <- x$getMatrix()
s <- solve(data, ...)
x$setMatrix(s)
s
} ## end function cacheSolve
| /cachematrix.R | no_license | BShanrockSolberg/ProgrammingAssignment2 | R | false | false | 2,164 | r | #################################################################################
## The functions in this program are designed to reduce the cost of inverting
## a matrix. There are two functions in this program:
##
## makeCacheMatrix: makes a list of functions whose elements:
## 1. create a matrix
## 2. cache the matrix
## 3. invert the matrix
## 4. cache the inverted matrix
##
## cacheSolve: Requires the list from makeCacheMatrix to function
## Returns the inverted matrix, using cache if it exists or
## performing the "solve" function if cache does not exist
## makeCacheMatrix --------------------------------------------------------------
## Args: x: Must be a matrix
## Returns: List with following functions:
## (1) setMatrix - caches matrix
## (2) getMatrix - reads matrix from cache
## (3) setInverse - caches inverted matrix
## (4) getInverse - reads inverted matrix from cache
makeCacheMatrix <- function(x = matrix()) {
## define the four return functions
s <- NULL
setMatrix <- function(y) {
x <<- y
s <<- NULL
} ## end function setMatrix
getMatrix <- function() x
setInverse <- function(solve) s <<- solve
getInverse <- function() s
## return the list with the four functions ordered properly
list(setMatrix = setMatrix, getMatrix = getMatrix,
setInverse = setInverse, getInverse = getInverse)
} ## end function makeCacheMatrix
## cacheSolve -------------------------------------------------------------------
## Args: List output from makeCacheMatrix
## Returns: inverted matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
s <- x$getInverse()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
## If not cached, calculate the inverse & return
data <- x$getMatrix()
s <- solve(data, ...)
x$setMatrix(s)
s
} ## end function cacheSolve
|
library('shiny')
library('shinydashboard')
# Fijar el directorio de trabajo en el lugar de donde esta el archivo ejecutandose
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# Cargar fichero para exportar funciones
source("histograma.R")
# ---------------------
# Barra Lateral - Sidebar
# ---------------------
sidebar <- dashboardSidebar(
sidebarMenu(
# Selector de fechas
selectInput("selector", "Selector",
choices = c(2000:2013), multiple=TRUE, selectize=TRUE,
width = '95%'),
# Input inside of menuSubItem
menuSubItem(icon = NULL,
sliderInput("maximo", "Maximo", min=0, max=25, value=20,
width = '95%')
),
box(
width = 1, solidHeader = TRUE,
radioButtons("color", "Color", # inline = TRUE,
c(Blue = "blue", Green = "green", Red = "red")
)
)
)
)
# ---------------------
# cuerpo - Body
# ---------------------
body <- dashboardBody(
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Tab 1",
fluidRow(
box(
title = "Histograma Basico",
status = "primary",
plotOutput("grafico1", height = 400)
),
box(
title = "Histograma ggplot2",
plotOutput("grafico2", height = 400)
)
)
),
tabPanel("Tab 2", h2("Tab 2")),
tabPanel("Tab 3", h2("Tab 3"))
)
)
)
# ---------------------
# Inteface - UI
# ---------------------
ui <- dashboardPage(
dashboardHeader(title = 'GVSystem Tecopy', titleWidth = 290),
sidebar,
body
)
# ---------------------
# Servidor - Server
# ---------------------
server <- function(input, output) {
output$grafico1 <- renderPlot({
if (is.null(input$maximo) || is.null(input$color))
return()
visual(input$maximo,input$color)
})
output$grafico2 <- renderPlot({
source("histograma2.R")
})
}
# ---------------------
# Aplicacion Shiny
# ---------------------
shinyApp(ui, server) | /ScriptPrueba.R | no_license | joaquinbentancourt/exercises-language-r | R | false | false | 2,439 | r | library('shiny')
library('shinydashboard')
# Fijar el directorio de trabajo en el lugar de donde esta el archivo ejecutandose
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# Cargar fichero para exportar funciones
source("histograma.R")
# ---------------------
# Barra Lateral - Sidebar
# ---------------------
sidebar <- dashboardSidebar(
sidebarMenu(
# Selector de fechas
selectInput("selector", "Selector",
choices = c(2000:2013), multiple=TRUE, selectize=TRUE,
width = '95%'),
# Input inside of menuSubItem
menuSubItem(icon = NULL,
sliderInput("maximo", "Maximo", min=0, max=25, value=20,
width = '95%')
),
box(
width = 1, solidHeader = TRUE,
radioButtons("color", "Color", # inline = TRUE,
c(Blue = "blue", Green = "green", Red = "red")
)
)
)
)
# ---------------------
# cuerpo - Body
# ---------------------
body <- dashboardBody(
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Tab 1",
fluidRow(
box(
title = "Histograma Basico",
status = "primary",
plotOutput("grafico1", height = 400)
),
box(
title = "Histograma ggplot2",
plotOutput("grafico2", height = 400)
)
)
),
tabPanel("Tab 2", h2("Tab 2")),
tabPanel("Tab 3", h2("Tab 3"))
)
)
)
# ---------------------
# Inteface - UI
# ---------------------
ui <- dashboardPage(
dashboardHeader(title = 'GVSystem Tecopy', titleWidth = 290),
sidebar,
body
)
# ---------------------
# Servidor - Server
# ---------------------
server <- function(input, output) {
output$grafico1 <- renderPlot({
if (is.null(input$maximo) || is.null(input$color))
return()
visual(input$maximo,input$color)
})
output$grafico2 <- renderPlot({
source("histograma2.R")
})
}
# ---------------------
# Aplicacion Shiny
# ---------------------
shinyApp(ui, server) |
#!/usr/bin/env Rscript
# --------------------------------------------------------
# Author: Vanja Börjesson
# Date: 2017-02-09
# This script plots the GC frequency over several sequences
# --------------------------------------------------------
rm(list=ls())
# Control that there is three arguments
arg <- commandArgs(trailingOnly=TRUE)
if (length(arg)!=3) {
stop("Three argument must be supplied; input1.csv input2.csv output", call.=FALSE)
}
#install.packages("ggplot2")
library(ggplot2)
library(data.table)
# read in csv-files
gc <- fread(arg[1])
info <- fread(arg[2])
# Calculate number of ids and windows
nr_id <- ncol(gc)
nr_wind <- nrow(gc)
# Create an x-axis and y-axis
gc$length <- 1:nr_wind
library(tidyr)
library(ggplot2)
gc_plotdata <- gather(gc, "length", 'ID', 1:nr_id)
names(gc_plotdata) <- c("length", 'ID', "PercentageGC")
# install.packages("viridis")
library(viridis)
p <- ggplot(gc_plotdata, aes(ID, length)) +
geom_raster(aes(fill=PercentageGC)) + coord_flip() +
scale_fill_gradientn(colours = viridis(256))+
# scale_fill_gradient(low = "white", high = "darkblue") +
theme_minimal()
p
#### plot data table ######
library(gridExtra)
names(info) <- c("Ids", 'Length', 'GC frequence')
info
# Set theme to table
t_grey <- ttheme_default(colhead=list(fg_params = list(parse=TRUE)))
tbl <- tableGrob(info,
rows=NULL,
theme=t_grey)
# Plot chart and table into one object
grid.arrange(p, tbl,
as.table=TRUE)
#### Save and print plot to pdf ####
user_name = arg[3]
ggsave(paste0(user_name, '.pdf'))
| /HanSolo.R | no_license | vborjesson/HanSolo | R | false | false | 1,589 | r | #!/usr/bin/env Rscript
# --------------------------------------------------------
# Author: Vanja Börjesson
# Date: 2017-02-09
# This script plots the GC frequency over several sequences
# --------------------------------------------------------
rm(list=ls())
# Control that there is three arguments
arg <- commandArgs(trailingOnly=TRUE)
if (length(arg)!=3) {
stop("Three argument must be supplied; input1.csv input2.csv output", call.=FALSE)
}
#install.packages("ggplot2")
library(ggplot2)
library(data.table)
# read in csv-files
gc <- fread(arg[1])
info <- fread(arg[2])
# Calculate number of ids and windows
nr_id <- ncol(gc)
nr_wind <- nrow(gc)
# Create an x-axis and y-axis
gc$length <- 1:nr_wind
library(tidyr)
library(ggplot2)
gc_plotdata <- gather(gc, "length", 'ID', 1:nr_id)
names(gc_plotdata) <- c("length", 'ID', "PercentageGC")
# install.packages("viridis")
library(viridis)
p <- ggplot(gc_plotdata, aes(ID, length)) +
geom_raster(aes(fill=PercentageGC)) + coord_flip() +
scale_fill_gradientn(colours = viridis(256))+
# scale_fill_gradient(low = "white", high = "darkblue") +
theme_minimal()
p
#### plot data table ######
library(gridExtra)
names(info) <- c("Ids", 'Length', 'GC frequence')
info
# Set theme to table
t_grey <- ttheme_default(colhead=list(fg_params = list(parse=TRUE)))
tbl <- tableGrob(info,
rows=NULL,
theme=t_grey)
# Plot chart and table into one object
grid.arrange(p, tbl,
as.table=TRUE)
#### Save and print plot to pdf ####
user_name = arg[3]
ggsave(paste0(user_name, '.pdf'))
|
#' Create a group-based S3 object of class partition for the SetTarget function
#'
#' Group partitioning constructs data partitions such that all records with each
#' level in the column specified by the parameter partitionKeyCols occur
#' together in the same partition.
#'
#' This function is one of several convenience functions provided to simplify the task
#' of starting modeling projects with custom partitioning options. The other
#' functions are \code{CreateRandomPartition}, \code{CreateStratifiedPartition}, and
#' \code{CreateUserPartition}.
#'
#' @param validationType character. String specifying the type of partition
#' generated, either "TVH" or "CV".
#' @param holdoutPct integer. The percentage of data to be used as the holdout subset.
#' @param partitionKeyCols list. List containing a single string specifying
#' the name of the variable used in defining the group partition.
#' @param reps integer. The number of cross-validation folds to generate; only applicable
#' when validationType = "CV".
#' @param validationPct integer. The percentage of data to be used as the validation subset.
#' @return An S3 object of class 'partition' including the parameters required
#' by the SetTarget function to generate a group-based partitioning of
#' the modeling dataset.
#' @seealso \code{\link{CreateRandomPartition}}, \code{\link{CreateStratifiedPartition}},
#' \code{\link{CreateUserPartition}}.
#' @examples
#' CreateGroupPartition(validationType = "CV",
#' holdoutPct = 20,
#' partitionKeyCols = list("groupId"),
#' reps = 5)
#' @export
CreateGroupPartition <- function(validationType, holdoutPct, partitionKeyCols,
reps = NULL, validationPct = NULL) {
if (!is.list(partitionKeyCols)) {
stop("Please specify partition column name as a list containing a single string.")
}
if (length(partitionKeyCols) > 1) {
stop("Currently only one partition key column is supported.")
}
partition <- list(cvMethod = cvMethods$GROUP, validationType = validationType,
holdoutPct = holdoutPct,
partitionKeyCols = partitionKeyCols)
ValidatePartition(validationType = validationType,
partition = partition,
reps = reps,
validationPct = validationPct)
}
#' Create a random sampling-based S3 object of class partition for the SetTarget function
#'
#' Random partitioning is supported for either Training/Validation/Holdout
#' ("TVH") or cross-validation ("CV") splits. In either case, the holdout
#' percentage (holdoutPct) must be specified; for the "CV" method, the
#' number of cross-validation folds (reps) must also be specified, while
#' for the "TVH" method, the validation subset percentage (validationPct)
#' must be specified.
#'
#' This function is one of several convenience functions provided to simplify the task
#' of starting modeling projects with custom partitioning options. The other
#' functions are \code{CreateGroupPartition}, \code{CreateStratifiedPartition}, and
#' \code{CreateUserPartition}.
#'
#' @inheritParams CreateGroupPartition
#' @return An S3 object of class partition including the parameters
#' required by SetTarget to generate a random partitioning of
#' the modeling dataset.
#' @seealso \code{\link{CreateStratifiedPartition}}, \code{\link{CreateGroupPartition}},
#' \code{\link{CreateUserPartition}}.
#' @examples
#' CreateRandomPartition(validationType = "CV", holdoutPct = 20, reps = 5)
#' @export
CreateRandomPartition <- function(validationType, holdoutPct, reps = NULL,
validationPct = NULL) {
partition <- list(cvMethod = cvMethods$RANDOM, validationType = validationType,
holdoutPct = holdoutPct)
ValidatePartition(validationType = validationType,
partition = partition,
reps = reps,
validationPct = validationPct)
}
#' Create a stratified sampling-based S3 object of class partition for the SetTarget function
#'
#' Stratified partitioning is supported for binary classification problems and
#' it randomly partitions the modeling data, keeping the percentage of positive
#' class observations in each partition the same as in the original dataset.
#' Stratified partitioning is supported for either Training/Validation/Holdout
#' ("TVH") or cross-validation ("CV") splits. In either case, the holdout
#' percentage (holdoutPct) must be specified; for the "CV" method, the number
#' of cross-validation folds (reps) must also be specified, while for the "TVH"
#' method, the validation subset percentage (validationPct) must be specified.
#'
#' This function is one of several convenience functions provided to simplify the task
#' of starting modeling projects with custom partitioning options. The other
#' functions are \code{CreateGroupPartition}, \code{CreateRandomPartition}, and
#' \code{CreateUserPartition}.
#'
#' @inheritParams CreateGroupPartition
#' @return An S3 object of class 'partition' including the parameters required
#' by the SetTarget function to generate a stratified partitioning of the
#' modeling dataset.
#' @seealso \code{\link{CreateGroupPartition}}, \code{\link{CreateRandomPartition}},
#' \code{\link{CreateUserPartition}}.
#' @examples
#' CreateStratifiedPartition(validationType = "CV", holdoutPct = 20, reps = 5)
#' @export
CreateStratifiedPartition <- function(validationType, holdoutPct, reps = NULL,
validationPct = NULL) {
partition <- list(cvMethod = cvMethods$STRATIFIED, validationType = validationType,
holdoutPct = holdoutPct)
ValidatePartition(validationType = validationType,
partition = partition,
reps = reps,
validationPct = validationPct)
}
#' Create a class partition object for use in the SetTarget function representing a
#' user-defined partition.
#'
#' Creates a list object used by the SetTarget function to specify either
#' Training/Validation/Holdout (validationType = "TVH") or cross-validation
#' (validationType = "CV") partitions of the modeling dataset based on the values
#' included in a column from the dataset. In either case, the name of this data
#' column must be specified (as userPartitionCol).
#'
#' For the "TVH" option of cvMethod, no cross-validation is used. Users must specify
#' the trainingLevel and validationLevel; use of a holdoutLevel is always recommended
#' but not required. If no holdoutLevel is used, then the column must contain exactly
#' 2 unique values. If a holdoutLevel is used, the column must contain exactly 3 unique
#' values.
#'
#' For the "CV" option, each value in the column will be used to separate rows into
#' cross-validation folds. Use of a holdoutLevel is optional; if not specified, then
#' no holdout is used.
#'
#' This function is one of several convenience functions provided to simplify the task
#' of starting modeling projects with custom partitioning options. The other
#' functions are \code{CreateGroupPartition}, \code{CreateRandomPartition}, and
#' \code{CreateStratifiedPartition}.
#'
#' @inheritParams CreateGroupPartition
#' @param userPartitionCol character. String naming the data column from the
#' modeling dataset containing the subset designations.
#' @param cvHoldoutLevel character. Data value from userPartitionCol that identifies the
#' holdout subset under the "CV" option.
#' @param trainingLevel character. Data value from userPartitionCol that identifies the
#' training subset under the "TVH" option.
#' @param holdoutLevel character. Data value from userPartitionCol that identifies the
#' holdout subset under both "TVH" and "CV" options. To specify that the project should
#' not use a holdout you can omit this parameter or pass NA directly.
#' @param validationLevel character. Data value from userPartitionCol that identifies the
#' validation subset under the "TVH" option.
#' @return An S3 object of class 'partition' including the parameters required
#' by the SetTarget function to generate a user-specified of the modeling
#' dataset.
#' @seealso \code{\link{CreateGroupPartition}}, \code{\link{CreateRandomPartition}},
#' \code{\link{CreateStratifiedPartition}}.
#' @examples
#' CreateUserPartition(validationType = "CV", userPartitionCol = "TVHflag", cvHoldoutLevel = NA)
#' @export
CreateUserPartition <- function(validationType, userPartitionCol,
cvHoldoutLevel = NULL, trainingLevel = NULL,
holdoutLevel = NULL, validationLevel = NULL) {
if (!is.character(userPartitionCol)) {
stop("Please specify partition column name as a character string")
}
partition <- list(cvMethod = cvMethods$USER, validationType = validationType,
userPartitionCol = userPartitionCol)
if (validationType == "CV") {
if (is.null(cvHoldoutLevel)) {
partition$cvHoldoutLevel <- NA
} else {
partition$cvHoldoutLevel <- cvHoldoutLevel
}
} else if (identical(validationType, "TVH")) {
if (is.null(trainingLevel)) {
stop(strwrap("Parameter trainingLevel must be specified for user
partition with validationType = 'TVH'"))
} else {
partition$trainingLevel <- trainingLevel
partition$holdoutLevel <- holdoutLevel
}
if (is.null(validationLevel)) {
stop(strwrap("Parameter validationLevel must be specified for user
partition with validationType = 'TVH'"))
} else {
partition$validationLevel <- validationLevel
}
} else {
stop(strwrap(paste("validationType", validationType, "not valid")))
}
class(partition) <- "partition"
partition
}
#' Create a list describing backtest parameters
#'
#' Uniquely defines a Backtest used in a DatetimePartitioning
#'
#' Includes only the attributes of a backtest directly controllable by users. The other attributes
#' are assigned by the DataRobot application based on the project dataset and the user-controlled
#' settings.
#' All durations should be specified with a duration string such as those returned
#' by the ConstructDurationString helper function.
#'
#' @param index integer. The index of the backtest
#' @param gapDuration character. The desired duration of the gap
#' between training and validation data for the backtest in duration format (ISO8601).
#' @param validationStartDate character. The desired start date of the validation data
#' for this backtest (RFC 3339 format).
#' @param validationDuration character. The desired end date
#' of the validation data for this backtest in duration format (ISO8601).
#' @return list with backtest parameters
#' @examples
#' zeroDayDuration <- ConstructDurationString()
#' hundredDayDuration <- ConstructDurationString(days = 100)
#' CreateBacktestSpecification(index = 0,
#' gapDuration = zeroDayDuration,
#' validationStartDate = "1989-12-01",
#' validationDuration = hundredDayDuration)
#' @export
CreateBacktestSpecification <- function(index, gapDuration, validationStartDate,
validationDuration) {
backtestSpec <- list(index = index, gapDuration = gapDuration,
validationStartDate = validationStartDate,
validationDuration = validationDuration)
return(backtestSpec)
}
as.dataRobotBacktestSpecification <- function(inList) {
elements <- c("index",
"gapDuration",
"validationStartDate",
"validationDuration")
outList <- ApplySchema(inList, elements)
return(outList)
}
#' Construct a valid string representing a duration in accordance with ISO8601
#'
#' A duration of six months, 3 days, and 12 hours could be represented as P6M3DT12H.
#'
#' @param years integer. The number of years in the duration.
#' @param months integer. The number of months in the duration.
#' @param days integer. The number of days in the duration.
#' @param hours integer. The number of hours in the duration.
#' @param minutes integer. The number of minutes in the duration.
#' @param seconds integer. The number of seconds in the duration.
#' @return The duration string, specified compatibly with ISO8601.
#' @examples
#' ConstructDurationString()
#' ConstructDurationString(days = 100)
#' ConstructDurationString(years = 10, months = 2, days = 5, seconds = 12)
#' @export
ConstructDurationString <- function(years = 0, months = 0, days = 0,
hours = 0, minutes = 0, seconds = 0) {
return(paste("P", years, "Y",
months, "M",
days, "DT",
hours, "H",
minutes, "M",
seconds, "S", sep = ""))
}
#' Create a list describing datetime partition parameters
#'
#' Uniquely defines a DatetimePartitioning for some project
#'
#' Includes only the attributes of DatetimePartitioning that are directly controllable by users,
#' not those determined by the DataRobot application based on the project dataset and the
#' user-controlled settings.
#' This is the specification that should be passed to SetTarget via the
#' partition parameter. To see the full partitioning based on the project dataset,
#' GenerateDatetimePartition.
#' All durations should be specified with a duration string such as those returned
#' by the ConstructDurationString helper function.
#'
#' @param datetimePartitionColumn character. The name of the column whose values as dates
#' are used to assign a row to a particular partition
#' @param autopilotDataSelectionMethod character. Optional. Whether models created
#' by the autopilot should use "rowCount" or "duration" as their dataSelectionMethod
#' @param validationDuration character. Optional. The default validationDuration for the
#' backtests
#' @param holdoutStartDate character. The start date of holdout scoring data
#' (RFC 3339 format). If holdoutStartDate is specified, holdoutDuration must also be specified.
#' @param holdoutDuration character. Optional. The duration of the holdout scoring data.
#' If holdoutDuration is specified, holdoutStartDate must also be specified.
#' @param disableHoldout logical. Optional. Whether to suppress allocating the holdout fold.
#' If set to TRUE, holdoutStartDate and holdoutDuration must not be specified.
#' @param gapDuration character. Optional. The duration of the gap between training and
#' holdout scoring data.
#' @param numberOfBacktests integer. The number of backtests to use.
#' @param backtests list. List of BacktestSpecification the exact specification of backtests to use.
#' The indexes of the specified backtests should range from 0 to numberOfBacktests - 1.
#' If any backtest is left unspecified, a default configuration will be chosen.
#' @param useTimeSeries logical. Whether to create a time series project (if TRUE) or an OTV
#' project which uses datetime partitioning (if FALSE). The default behavior is to create an
#' OTV project.
#' @param defaultToKnownInAdvance logical. Whether to default to treating features as known in
#' advance. Defaults to FALSE. Only used for time series project. Known in advance features are
#' expected to be known for dates in the future when making predictions (e.g., "is this a
#' holiday").
#' @param featureDerivationWindowStart integer. Optional. Offset into the past to define how far
#' back relative to the forecast point the feature derivation window should start. Only used for
#' time series projects. Expressed in terms of the \code{timeUnit} of the
#' \code{datetimePartitionColumn}.
#' @param featureDerivationWindowEnd integer. Optional. Offset into the past to define how far
#' back relative to the forecast point the feature derivation window should end. Only used for
#' time series projects. Expressed in terms of the \code{timeUnit} of the
#' \code{datetimePartitionColumn}.
#' @param featureSettings list. Optional. A list specifying settings for each feature. For each
#' feature you would like to set feature settings for, pass the following in a list:
#' \itemize{
#' \item featureName character. The name of the feature to set feature settings.
#' \item knownInAdvance logical. Optional. Whether or not the feature is known in advance.
#' Used for time series only. Defaults to \code{FALSE}.
#' \item doNotDerive logical. Optional. If \code{TRUE}, no time series derived features
#' (e.g., lags) will be automatically engineered from this feature. Used for time series only.
#' Defaults to \code{FALSE}.
#' }
#' @param treatAsExponential character. Optional. Defaults to "auto". Used to specify whether to
#' treat data as exponential trend and apply transformations like log-transform. Use values
#' from \code{TreatAsExponential} enum.
#' @param differencingMethod character. Optional. Defaults to "auto". Used to specify differencing
#' method to apply if data is stationary. Use values from \code{DifferencingMethod}.
#' @param periodicities list. Optional. A list of periodicities for different times. Must be
#' specified as a list of lists, where each list item specifies the `timeSteps` for a
#' particular `timeUnit`. Should be "ROW" if \code{windowsBasisUnit} is "ROW".
#' @param windowsBasisUnit character. Optional. Indicates which unit is the basis for the feature
#' derivation window and forecast window. Valid options are a time unit (see \code{TimeUnit})
#' or "ROW".
#' @param forecastWindowStart integer. Optional. Offset into the future to define how far forward
#' relative to the forecast point the forecast window should start. Only used for time series
#' projects. Expressed in terms of the \code{timeUnit} of the \code{datetimePartitionColumn}.
#' @param forecastWindowEnd integer. Optional. Offset into the future to define how far forward
#' relative to the forecast point the forecast window should end. Only used for time series
#' projects. Expressed in terms of the \code{timeUnit} of the \code{datetimePartitionColumn}.
#' @param multiseriesIdColumns list. A list of the names of multiseries id columns to define series
#' @param useCrossSeries logical. If \code{TRUE}, cross series features will be included. For
#' details, see "Calculating features across series" in the Time Series section of the
#' DataRobot user guide.
#' @param aggregationType character. Optional. The aggregation type to apply when creating cross
#' series features. Must be either "total" or "average". See \code{SeriesAggregationType}.
#' @param calendar character. Optional. Either the calendar object or calendar id to use
#' for this project.
#' @param crossSeriesGroupByColumns character. Optional. Column to split a cross series into
#' further groups. For example, if every series is sales of an individual product, the cross
#' series group could be e product category with values like "men's clothing", "sports
#' equipment", etc. Requires multiseries with \code{useCrossSeries} enabled.
#' @return An S3 object of class 'partition' including the parameters required by the
#' SetTarget function to generate a datetime partitioning of the modeling dataset.
#' @examples
#' CreateDatetimePartitionSpecification("date_col")
#' CreateDatetimePartitionSpecification("date",
#' featureSettings = list(
#' list("featureName" = "Product_offers",
#' "defaultToKnownInAdvance" = TRUE)))
#' partition <- CreateDatetimePartitionSpecification("dateColumn",
#' treatAsExponential = TreatAsExponential$Always,
#' differencingMethod = DifferencingMethod$Seasonal,
#' periodicities = list(list("timeSteps" = 10,
#' "timeUnit" = "HOUR"),
#' list("timeSteps" = 600,
#' "timeUnit" = "MINUTE"),
#' list("timeSteps" = 7,
#' "timeUnit" = "DAY")))
#' @export
CreateDatetimePartitionSpecification <- function(datetimePartitionColumn,
autopilotDataSelectionMethod = NULL,
validationDuration = NULL,
holdoutStartDate = NULL,
holdoutDuration = NULL,
disableHoldout = NULL,
gapDuration = NULL,
numberOfBacktests = NULL,
backtests = NULL,
useTimeSeries = FALSE,
defaultToKnownInAdvance = FALSE,
featureDerivationWindowStart = NULL,
featureDerivationWindowEnd = NULL,
featureSettings = NULL,
treatAsExponential = NULL,
differencingMethod = NULL,
windowsBasisUnit = NULL,
periodicities = NULL,
forecastWindowStart = NULL,
forecastWindowEnd = NULL,
multiseriesIdColumns = NULL,
useCrossSeries = NULL,
aggregationType = NULL,
crossSeriesGroupByColumns = NULL,
calendar = NULL) {
if (is(calendar, "dataRobotCalendar")) {
calendarId <- ValidateCalendar(calendar)
} else if (IsId(calendar) || is.null(calendar)) {
calendarId <- calendar
} else {
stop("Invalid calendar specification.")
}
partition <- list(cvMethod = cvMethods$DATETIME)
partition$datetimePartitionColumn <- datetimePartitionColumn
partition$autopilotDataSelectionMethod <- autopilotDataSelectionMethod
partition$validationDuration <- validationDuration
partition$holdoutStartDate <- holdoutStartDate
partition$holdoutDuration <- holdoutDuration
partition$disableHoldout <- disableHoldout
partition$gapDuration <- gapDuration
partition$numberOfBacktests <- numberOfBacktests
partition$backtests <- backtests
partition$useTimeSeries <- useTimeSeries
partition$defaultToKnownInAdvance <- defaultToKnownInAdvance
partition$featureDerivationWindowStart <- featureDerivationWindowStart
partition$featureDerivationWindowEnd <- featureDerivationWindowEnd
partition$featureSettings <- featureSettings
partition$treatAsExponential <- treatAsExponential
partition$differencingMethod <- differencingMethod
partition$periodicities <- periodicities
partition$windowsBasisUnit <- windowsBasisUnit
partition$forecastWindowStart <- forecastWindowStart
partition$forecastWindowEnd <- forecastWindowEnd
partition$multiseriesIdColumns <- multiseriesIdColumns
partition$useCrossSeriesFeatures <- useCrossSeries
partition$aggregationType <- aggregationType
partition$calendarId <- calendarId
partition$crossSeriesGroupByColumns <- crossSeriesGroupByColumns
class(partition) <- "partition"
partition
}
as.dataRobotDatetimePartitionSpecification <- function(inList) {
elements <- c("cvMethod",
"datetimePartitionColumn",
"autopilotDataSelectionMethod",
"validationDuration",
"holdoutStartDate",
"holdoutDuration",
"disableHoldout",
"gapDuration",
"numberOfBacktests",
"backtests",
"useTimeSeries",
"defaultToKnownInAdvance",
"featureDerivationWindowStart",
"featureDerivationWindowEnd",
"featureSettings",
"treatAsExponential",
"differencingMethod",
"windowsBasisUnit",
"periodicities",
"forecastWindowStart",
"forecastWindowEnd",
"multiseriesIdColumns",
"numberOfKnownInAdvanceFeatures",
"useCrossSeriesFeatures",
"aggregationType",
"calendarId",
"crossSeriesGroupByColumns")
outList <- ApplySchema(inList, elements)
featureSettings <- c("featureName", "knownInAdvance", "doNotDerive")
if (!is.null(outList$featureSettings) && !is.null(names(outList$featureSettings))) {
outList$featureSettings <- list(outList$featureSettings)
}
outList$featureSettings <- lapply(outList$featureSettings, ApplySchema, featureSettings)
if (!is.null(outList$backtests)) {
if (is.list(outList$backtests)) {
outList$backtests <- lapply(outList$backtests, as.dataRobotBacktestSpecification)
} else if (is.data.frame(outList$backtests)) {
outList$backtests <- as.dataRobotBacktestSpecification(outList$backtests)
}
}
outList
}
#' Preview the full partitioning determined by a DatetimePartitioningSpecification
#'
#' Based on the project dataset and the partitioning specification, inspect the full
#' partitioning that would be used if the same specification were passed into SetTarget.
#' This is not intended to be passed to SetTarget.
#'
#' @inheritParams DeleteProject
#' @param spec list. Datetime partition specification returned by
#' \code{CreateDatetimePartitionSpecification}
#' @return list describing datetime partition with following components
#' \itemize{
#' \item cvMethod. The type of validation scheme used for the project.
#' \item projectId character. The id of the project this partitioning applies to.
#' \item datetimePartitionColumn character. The name of the column whose values
#' as dates are used to assign a row to a particular partition.
#' \item dateFormat character. The format (e.g. "%Y-%m-%d %H:%M:%S") by which the
#' partition column was interpreted (compatible with strftime
#' [https://docs.python.org/2/library/time.html#time.strftime]).
#' \item autopilotDataSelectionMethod character. Whether models created
#' by the autopilot use "rowCount" or "duration" as their dataSelectionMethod.
#' \item validationDuration character. The validation duration specified when
#' initializing the partitioning - not directly significant if the backtests have been
#' modified, but used as the default validationDuration for the backtests.
#' \item availableTrainingStartDate character. The start date of the available training
#' data for scoring the holdout.
#' \item availableTrainingDuration character. The duration of the available training data
#' for scoring the holdout.
#' \item availableTrainingRowCount integer. The number of rows in the available training data for
#' scoring the holdout. Only available when retrieving the partitioning after setting the
#' target.
#' \item availableTrainingEndDate character. The end date of the available training data
#' for scoring the holdout.
#' \item primaryTrainingStartDate character. The start date of primary training data for
#' scoring the holdout.
#' \item primaryTrainingDuration character. The duration of the primary training data for
#' scoring the holdout.
#' \item primaryTrainingRowCount integer. The number of rows in the primary training data for
#' scoring the holdout. Only available when retrieving the partitioning after setting the
#' target.
#' \item primaryTrainingEndDate character. The end date of the primary training data for
#' scoring the holdout.
#' \item gapStartDate character. The start date of the gap between training and holdout
#' scoring data.
#' \item gapDuration character. The duration of the gap between training and holdout
#' scoring data.
#' \item gapRowCount integer. The number of rows in the gap between training and holdout scoring
#' data.
#' Only available when retrieving the partitioning after setting the target.
#' \item gapEndDate character. The end date of the gap between training and holdout scoring
#' data.
#' \item holdoutStartDate character. The start date of holdout scoring data.
#' \item holdoutDuration character. The duration of the holdout scoring data.
#' \item holdoutRowCount integer. The number of rows in the holdout scoring data.
#' Only available when retrieving the partitioning after setting the target.
#' \item holdoutEndDate character. The end date of the holdout scoring data.
#' \item numberOfBacktests integer. the number of backtests used.
#' \item backtests data.frame. A data frame of partition backtest. Each element represent one
#' backtest and has the following components:
#' index, availableTrainingStartDate, availableTrainingDuration, availableTrainingRowCount,
#' availableTrainingEndDate, primaryTrainingStartDate, primaryTrainingDuration,
#' primaryTrainingRowCount, primaryTrainingEndDate, gapStartDate, gapDuration, gapRowCount,
#' gapEndDate, validationStartDate, validationDuration, validationRowCount,
#' validationEndDate, totalRowCount.
#' \item useTimeSeries logical. Whether the project is a time series project (if TRUE) or an OTV
#' project which uses datetime partitioning (if FALSE).
#' \item defaultToKnownInAdvance logical. Whether the project defaults to treating
#' features as known in advance. Knon in advance features are time series features that
#' are expected to be known for dates in the future when making predictions (e.g., "is
#' this a holiday").
#' \item featureDerivationWindowStart integer. Offset into the past to define how far
#' back relative to the forecast point the feature derivation window should start. Only used for
#' time series projects. Expressed in terms of the \code{timeUnit} of the
#' \code{datetimePartitionColumn}.
#' \item featureDerivationWindowEnd integer. Offset into the past to define how far back relative
#' to the forecast point the feature derivation window should end. Only used for
#' time series projects. Expressed in terms of the \code{timeUnit} of the
#' \code{datetimePartitionColumn}.
#' \item forecastWindowStart integer. Offset into the future to define how far forward relative
#' to the forecast point the forecast window should start. Only used for time series
#' projects. Expressed in terms of the \code{timeUnit} of the \code{datetimePartitionColumn}.
#' \item forecastWindowEnd integer. Offset into the future to define how far forward relative to
#' the forecast point the forecast window should end. Only used for time series
#' projects. Expressed in terms of the \code{timeUnit} of the \code{datetimePartitionColumn}.
#' \item featureSettings list. A list of lists specifying settings for each feature. For each
#' feature you would like to set feature settings for, pass the following in a list:
#' \itemize{
#' \item featureName character. The name of the feature to set feature settings.
#' \item knownInAdvance logical. Optional. Whether or not the feature is known in advance.
#' Used for time series only. Defaults to \code{FALSE}.
#' \item doNotDerive logical. Optional. If \code{TRUE}, no time series derived features
#' (e.g., lags) will be automatically engineered from this feature. Used for time series
#' only. Defaults to \code{FALSE}.
#' }
#' \item treatAsExponential character. Specifies whether to treat data as exponential trend
#' and apply transformations like log-transform. Uses values from from
#' \code{TreatAsExponential}.
#' \item differencingMethod character. Used to specify differencing method to apply if data is
#' stationary. Use values from \code{DifferencingMethod}.
#' \item windowsBasisUnit character. Indicates which unit is the basis for the feature derivation
#' window and forecast window. Uses values from \code{TimeUnit} and the value "ROW".
#' \item periodicities list. A list of periodicities for different times, specified as a list of
#' lists, where each list item specifies the `timeSteps` for a particular `timeUnit`. Will be
#" "ROW" if \code{windowsBasisUnit} is "ROW".
#' \item totalRowCount integer. The number of rows in the project dataset. Only available when
#' retrieving the partitioning after setting the target. Thus it will be NULL for
#' \code{GenerateDatetimePartition} and populated for \code{GetDatetimePartition}.
#' \item validationRowCount integer. The number of rows in the validation set.
#' \item multiseriesIdColumns list. A list of the names of multiseries id columns to define
#' series.
#' \item numberOfKnownInAdvanceFeatures integer. The number of known in advance features.
#' \item useCrossSeriesFeatures logical. Whether or not cross series features are included.
#' \item aggregationType character. The aggregation type to apply when creating cross series
#' features. See \code{SeriesAggregationType}.
#' \item calendarId character. The ID of the calendar used for this project, if any.
#' }
#' @examples
#' \dontrun{
#' projectId <- "59a5af20c80891534e3c2bde"
#' partitionSpec <- CreateDatetimePartitionSpecification("date_col")
#' GenerateDatetimePartition(projectId, partitionSpec)
#' }
#' @export
GenerateDatetimePartition <- function(project, spec) {
projectId <- ValidateProject(project)
spec$cvMethod <- NULL
routeString <- UrlJoin("projects", projectId, "datetimePartitioning")
rawReturn <- DataRobotPOST(routeString, body = spec, encode = "json")
rawReturn$cvMethod <- cvMethods$DATETIME
as.dataRobotDatetimePartition(rawReturn)
}
#' Retrieve the DatetimePartitioning from a project
#'
#' Only available if the project has already set the target as a datetime project.
#'
#' @inheritParams DeleteProject
#' @inherit GenerateDatetimePartition return
#' @examples
#' \dontrun{
#' projectId <- "59a5af20c80891534e3c2bde"
#' GetDatetimePartition(projectId)
#' }
#' @export
GetDatetimePartition <- function(project) {
projectId <- ValidateProject(project)
routeString <- UrlJoin("projects", projectId, "datetimePartitioning")
part <- DataRobotGET(routeString)
part$cvMethod <- cvMethods$DATETIME
as.dataRobotDatetimePartition(part)
}
as.dataRobotDatetimePartition <- function(inList) {
elements <- c("cvMethod",
"projectId",
"datetimePartitionColumn",
"dateFormat",
"autopilotDataSelectionMethod",
"validationDuration",
"availableTrainingStartDate",
"availableTrainingDuration",
"availableTrainingRowCount",
"availableTrainingEndDate",
"primaryTrainingStartDate",
"primaryTrainingDuration",
"primaryTrainingRowCount",
"primaryTrainingEndDate",
"gapStartDate",
"gapDuration",
"gapRowCount",
"gapEndDate",
"holdoutStartDate",
"holdoutDuration",
"holdoutRowCount",
"holdoutEndDate",
"numberOfBacktests",
"backtests",
"useTimeSeries",
"defaultToKnownInAdvance",
"featureDerivationWindowStart",
"featureDerivationWindowEnd",
"forecastWindowStart",
"forecastWindowEnd",
"featureSettings",
"treatAsExponential",
"differencingMethod",
"windowsBasisUnit",
"periodicities",
"totalRowCount",
"validationRowCount",
"multiseriesIdColumns",
"numberOfKnownInAdvanceFeatures",
"useCrossSeriesFeatures",
"aggregationType",
"calendarId")
outList <- ApplySchema(inList, elements)
if (!is.null(outList$featureSettings) && !is.null(names(outList$featureSettings))) {
outList$featureSettings <- list(outList$featureSettings)
}
featureSettings <- c("featureName", "knownInAdvance", "doNotDerive")
outList$featureSettings <- lapply(outList$featureSettings, ApplySchema, featureSettings)
backtestElements <- c("index", "validationRowCount", "primaryTrainingDuration",
"primaryTrainingEndDate", "availableTrainingStartDate",
"primaryTrainingStartDate", "validationEndDate",
"availableTrainingDuration", "availableTrainingRowCount",
"gapEndDate", "validationDuration", "gapStartDate",
"availableTrainingEndDate", "primaryTrainingRowCount",
"validationStartDate", "totalRowCount", "gapRowCount", "gapDuration")
outList$backtests <- ApplySchema(outList$backtests, backtestElements)
outList$isTimeSeries <- isTRUE(outList$useTimeSeries)
outList$isMultiSeries <- length(outList$multiseriesIdColumns) > 0
outList$isCrossSeries <- isTRUE(outList$useCrossSeriesFeatures)
outList
}
| /R/Partitions.R | no_license | anno526/datarobot | R | false | false | 38,035 | r | #' Create a group-based S3 object of class partition for the SetTarget function
#'
#' Group partitioning constructs data partitions such that all records with each
#' level in the column specified by the parameter partitionKeyCols occur
#' together in the same partition.
#'
#' This function is one of several convenience functions provided to simplify the task
#' of starting modeling projects with custom partitioning options. The other
#' functions are \code{CreateRandomPartition}, \code{CreateStratifiedPartition}, and
#' \code{CreateUserPartition}.
#'
#' @param validationType character. String specifying the type of partition
#' generated, either "TVH" or "CV".
#' @param holdoutPct integer. The percentage of data to be used as the holdout subset.
#' @param partitionKeyCols list. List containing a single string specifying
#' the name of the variable used in defining the group partition.
#' @param reps integer. The number of cross-validation folds to generate; only applicable
#' when validationType = "CV".
#' @param validationPct integer. The percentage of data to be used as the validation subset.
#' @return An S3 object of class 'partition' including the parameters required
#' by the SetTarget function to generate a group-based partitioning of
#' the modeling dataset.
#' @seealso \code{\link{CreateRandomPartition}}, \code{\link{CreateStratifiedPartition}},
#' \code{\link{CreateUserPartition}}.
#' @examples
#' CreateGroupPartition(validationType = "CV",
#' holdoutPct = 20,
#' partitionKeyCols = list("groupId"),
#' reps = 5)
#' @export
CreateGroupPartition <- function(validationType, holdoutPct, partitionKeyCols,
reps = NULL, validationPct = NULL) {
if (!is.list(partitionKeyCols)) {
stop("Please specify partition column name as a list containing a single string.")
}
if (length(partitionKeyCols) > 1) {
stop("Currently only one partition key column is supported.")
}
partition <- list(cvMethod = cvMethods$GROUP, validationType = validationType,
holdoutPct = holdoutPct,
partitionKeyCols = partitionKeyCols)
ValidatePartition(validationType = validationType,
partition = partition,
reps = reps,
validationPct = validationPct)
}
#' Create a random sampling-based S3 object of class partition for the SetTarget function
#'
#' Random partitioning is supported for either Training/Validation/Holdout
#' ("TVH") or cross-validation ("CV") splits. In either case, the holdout
#' percentage (holdoutPct) must be specified; for the "CV" method, the
#' number of cross-validation folds (reps) must also be specified, while
#' for the "TVH" method, the validation subset percentage (validationPct)
#' must be specified.
#'
#' This function is one of several convenience functions provided to simplify the task
#' of starting modeling projects with custom partitioning options. The other
#' functions are \code{CreateGroupPartition}, \code{CreateStratifiedPartition}, and
#' \code{CreateUserPartition}.
#'
#' @inheritParams CreateGroupPartition
#' @return An S3 object of class partition including the parameters
#' required by SetTarget to generate a random partitioning of
#' the modeling dataset.
#' @seealso \code{\link{CreateStratifiedPartition}}, \code{\link{CreateGroupPartition}},
#' \code{\link{CreateUserPartition}}.
#' @examples
#' CreateRandomPartition(validationType = "CV", holdoutPct = 20, reps = 5)
#' @export
CreateRandomPartition <- function(validationType, holdoutPct, reps = NULL,
validationPct = NULL) {
partition <- list(cvMethod = cvMethods$RANDOM, validationType = validationType,
holdoutPct = holdoutPct)
ValidatePartition(validationType = validationType,
partition = partition,
reps = reps,
validationPct = validationPct)
}
#' Create a stratified sampling-based S3 object of class partition for the SetTarget function
#'
#' Stratified partitioning is supported for binary classification problems and
#' it randomly partitions the modeling data, keeping the percentage of positive
#' class observations in each partition the same as in the original dataset.
#' Stratified partitioning is supported for either Training/Validation/Holdout
#' ("TVH") or cross-validation ("CV") splits. In either case, the holdout
#' percentage (holdoutPct) must be specified; for the "CV" method, the number
#' of cross-validation folds (reps) must also be specified, while for the "TVH"
#' method, the validation subset percentage (validationPct) must be specified.
#'
#' This function is one of several convenience functions provided to simplify the task
#' of starting modeling projects with custom partitioning options. The other
#' functions are \code{CreateGroupPartition}, \code{CreateRandomPartition}, and
#' \code{CreateUserPartition}.
#'
#' @inheritParams CreateGroupPartition
#' @return An S3 object of class 'partition' including the parameters required
#' by the SetTarget function to generate a stratified partitioning of the
#' modeling dataset.
#' @seealso \code{\link{CreateGroupPartition}}, \code{\link{CreateRandomPartition}},
#' \code{\link{CreateUserPartition}}.
#' @examples
#' CreateStratifiedPartition(validationType = "CV", holdoutPct = 20, reps = 5)
#' @export
CreateStratifiedPartition <- function(validationType, holdoutPct, reps = NULL,
validationPct = NULL) {
partition <- list(cvMethod = cvMethods$STRATIFIED, validationType = validationType,
holdoutPct = holdoutPct)
ValidatePartition(validationType = validationType,
partition = partition,
reps = reps,
validationPct = validationPct)
}
#' Create a class partition object for use in the SetTarget function representing a
#' user-defined partition.
#'
#' Creates a list object used by the SetTarget function to specify either
#' Training/Validation/Holdout (validationType = "TVH") or cross-validation
#' (validationType = "CV") partitions of the modeling dataset based on the values
#' included in a column from the dataset. In either case, the name of this data
#' column must be specified (as userPartitionCol).
#'
#' For the "TVH" option of cvMethod, no cross-validation is used. Users must specify
#' the trainingLevel and validationLevel; use of a holdoutLevel is always recommended
#' but not required. If no holdoutLevel is used, then the column must contain exactly
#' 2 unique values. If a holdoutLevel is used, the column must contain exactly 3 unique
#' values.
#'
#' For the "CV" option, each value in the column will be used to separate rows into
#' cross-validation folds. Use of a holdoutLevel is optional; if not specified, then
#' no holdout is used.
#'
#' This function is one of several convenience functions provided to simplify the task
#' of starting modeling projects with custom partitioning options. The other
#' functions are \code{CreateGroupPartition}, \code{CreateRandomPartition}, and
#' \code{CreateStratifiedPartition}.
#'
#' @inheritParams CreateGroupPartition
#' @param userPartitionCol character. String naming the data column from the
#' modeling dataset containing the subset designations.
#' @param cvHoldoutLevel character. Data value from userPartitionCol that identifies the
#' holdout subset under the "CV" option.
#' @param trainingLevel character. Data value from userPartitionCol that identifies the
#' training subset under the "TVH" option.
#' @param holdoutLevel character. Data value from userPartitionCol that identifies the
#' holdout subset under both "TVH" and "CV" options. To specify that the project should
#' not use a holdout you can omit this parameter or pass NA directly.
#' @param validationLevel character. Data value from userPartitionCol that identifies the
#' validation subset under the "TVH" option.
#' @return An S3 object of class 'partition' including the parameters required
#' by the SetTarget function to generate a user-specified of the modeling
#' dataset.
#' @seealso \code{\link{CreateGroupPartition}}, \code{\link{CreateRandomPartition}},
#' \code{\link{CreateStratifiedPartition}}.
#' @examples
#' CreateUserPartition(validationType = "CV", userPartitionCol = "TVHflag", cvHoldoutLevel = NA)
#' @export
CreateUserPartition <- function(validationType, userPartitionCol,
cvHoldoutLevel = NULL, trainingLevel = NULL,
holdoutLevel = NULL, validationLevel = NULL) {
if (!is.character(userPartitionCol)) {
stop("Please specify partition column name as a character string")
}
partition <- list(cvMethod = cvMethods$USER, validationType = validationType,
userPartitionCol = userPartitionCol)
if (validationType == "CV") {
if (is.null(cvHoldoutLevel)) {
partition$cvHoldoutLevel <- NA
} else {
partition$cvHoldoutLevel <- cvHoldoutLevel
}
} else if (identical(validationType, "TVH")) {
if (is.null(trainingLevel)) {
stop(strwrap("Parameter trainingLevel must be specified for user
partition with validationType = 'TVH'"))
} else {
partition$trainingLevel <- trainingLevel
partition$holdoutLevel <- holdoutLevel
}
if (is.null(validationLevel)) {
stop(strwrap("Parameter validationLevel must be specified for user
partition with validationType = 'TVH'"))
} else {
partition$validationLevel <- validationLevel
}
} else {
stop(strwrap(paste("validationType", validationType, "not valid")))
}
class(partition) <- "partition"
partition
}
#' Create a list describing backtest parameters
#'
#' Uniquely defines a Backtest used in a DatetimePartitioning
#'
#' Includes only the attributes of a backtest directly controllable by users. The other attributes
#' are assigned by the DataRobot application based on the project dataset and the user-controlled
#' settings.
#' All durations should be specified with a duration string such as those returned
#' by the ConstructDurationString helper function.
#'
#' @param index integer. The index of the backtest
#' @param gapDuration character. The desired duration of the gap
#' between training and validation data for the backtest in duration format (ISO8601).
#' @param validationStartDate character. The desired start date of the validation data
#' for this backtest (RFC 3339 format).
#' @param validationDuration character. The desired end date
#' of the validation data for this backtest in duration format (ISO8601).
#' @return list with backtest parameters
#' @examples
#' zeroDayDuration <- ConstructDurationString()
#' hundredDayDuration <- ConstructDurationString(days = 100)
#' CreateBacktestSpecification(index = 0,
#' gapDuration = zeroDayDuration,
#' validationStartDate = "1989-12-01",
#' validationDuration = hundredDayDuration)
#' @export
CreateBacktestSpecification <- function(index, gapDuration, validationStartDate,
validationDuration) {
backtestSpec <- list(index = index, gapDuration = gapDuration,
validationStartDate = validationStartDate,
validationDuration = validationDuration)
return(backtestSpec)
}
as.dataRobotBacktestSpecification <- function(inList) {
elements <- c("index",
"gapDuration",
"validationStartDate",
"validationDuration")
outList <- ApplySchema(inList, elements)
return(outList)
}
#' Construct a valid string representing a duration in accordance with ISO8601
#'
#' A duration of six months, 3 days, and 12 hours could be represented as P6M3DT12H.
#'
#' @param years integer. The number of years in the duration.
#' @param months integer. The number of months in the duration.
#' @param days integer. The number of days in the duration.
#' @param hours integer. The number of hours in the duration.
#' @param minutes integer. The number of minutes in the duration.
#' @param seconds integer. The number of seconds in the duration.
#' @return The duration string, specified compatibly with ISO8601.
#' @examples
#' ConstructDurationString()
#' ConstructDurationString(days = 100)
#' ConstructDurationString(years = 10, months = 2, days = 5, seconds = 12)
#' @export
ConstructDurationString <- function(years = 0, months = 0, days = 0,
hours = 0, minutes = 0, seconds = 0) {
return(paste("P", years, "Y",
months, "M",
days, "DT",
hours, "H",
minutes, "M",
seconds, "S", sep = ""))
}
#' Create a list describing datetime partition parameters
#'
#' Uniquely defines a DatetimePartitioning for some project
#'
#' Includes only the attributes of DatetimePartitioning that are directly controllable by users,
#' not those determined by the DataRobot application based on the project dataset and the
#' user-controlled settings.
#' This is the specification that should be passed to SetTarget via the
#' partition parameter. To see the full partitioning based on the project dataset,
#' GenerateDatetimePartition.
#' All durations should be specified with a duration string such as those returned
#' by the ConstructDurationString helper function.
#'
#' @param datetimePartitionColumn character. The name of the column whose values as dates
#' are used to assign a row to a particular partition
#' @param autopilotDataSelectionMethod character. Optional. Whether models created
#' by the autopilot should use "rowCount" or "duration" as their dataSelectionMethod
#' @param validationDuration character. Optional. The default validationDuration for the
#' backtests
#' @param holdoutStartDate character. The start date of holdout scoring data
#' (RFC 3339 format). If holdoutStartDate is specified, holdoutDuration must also be specified.
#' @param holdoutDuration character. Optional. The duration of the holdout scoring data.
#' If holdoutDuration is specified, holdoutStartDate must also be specified.
#' @param disableHoldout logical. Optional. Whether to suppress allocating the holdout fold.
#' If set to TRUE, holdoutStartDate and holdoutDuration must not be specified.
#' @param gapDuration character. Optional. The duration of the gap between training and
#' holdout scoring data.
#' @param numberOfBacktests integer. The number of backtests to use.
#' @param backtests list. List of BacktestSpecification the exact specification of backtests to use.
#' The indexes of the specified backtests should range from 0 to numberOfBacktests - 1.
#' If any backtest is left unspecified, a default configuration will be chosen.
#' @param useTimeSeries logical. Whether to create a time series project (if TRUE) or an OTV
#' project which uses datetime partitioning (if FALSE). The default behavior is to create an
#' OTV project.
#' @param defaultToKnownInAdvance logical. Whether to default to treating features as known in
#' advance. Defaults to FALSE. Only used for time series project. Known in advance features are
#' expected to be known for dates in the future when making predictions (e.g., "is this a
#' holiday").
#' @param featureDerivationWindowStart integer. Optional. Offset into the past to define how far
#' back relative to the forecast point the feature derivation window should start. Only used for
#' time series projects. Expressed in terms of the \code{timeUnit} of the
#' \code{datetimePartitionColumn}.
#' @param featureDerivationWindowEnd integer. Optional. Offset into the past to define how far
#' back relative to the forecast point the feature derivation window should end. Only used for
#' time series projects. Expressed in terms of the \code{timeUnit} of the
#' \code{datetimePartitionColumn}.
#' @param featureSettings list. Optional. A list specifying settings for each feature. For each
#' feature you would like to set feature settings for, pass the following in a list:
#' \itemize{
#' \item featureName character. The name of the feature to set feature settings.
#' \item knownInAdvance logical. Optional. Whether or not the feature is known in advance.
#' Used for time series only. Defaults to \code{FALSE}.
#' \item doNotDerive logical. Optional. If \code{TRUE}, no time series derived features
#' (e.g., lags) will be automatically engineered from this feature. Used for time series only.
#' Defaults to \code{FALSE}.
#' }
#' @param treatAsExponential character. Optional. Defaults to "auto". Used to specify whether to
#' treat data as exponential trend and apply transformations like log-transform. Use values
#' from \code{TreatAsExponential} enum.
#' @param differencingMethod character. Optional. Defaults to "auto". Used to specify differencing
#' method to apply if data is stationary. Use values from \code{DifferencingMethod}.
#' @param periodicities list. Optional. A list of periodicities for different times. Must be
#' specified as a list of lists, where each list item specifies the `timeSteps` for a
#' particular `timeUnit`. Should be "ROW" if \code{windowsBasisUnit} is "ROW".
#' @param windowsBasisUnit character. Optional. Indicates which unit is the basis for the feature
#' derivation window and forecast window. Valid options are a time unit (see \code{TimeUnit})
#' or "ROW".
#' @param forecastWindowStart integer. Optional. Offset into the future to define how far forward
#' relative to the forecast point the forecast window should start. Only used for time series
#' projects. Expressed in terms of the \code{timeUnit} of the \code{datetimePartitionColumn}.
#' @param forecastWindowEnd integer. Optional. Offset into the future to define how far forward
#' relative to the forecast point the forecast window should end. Only used for time series
#' projects. Expressed in terms of the \code{timeUnit} of the \code{datetimePartitionColumn}.
#' @param multiseriesIdColumns list. A list of the names of multiseries id columns to define series
#' @param useCrossSeries logical. If \code{TRUE}, cross series features will be included. For
#' details, see "Calculating features across series" in the Time Series section of the
#' DataRobot user guide.
#' @param aggregationType character. Optional. The aggregation type to apply when creating cross
#' series features. Must be either "total" or "average". See \code{SeriesAggregationType}.
#' @param calendar character. Optional. Either the calendar object or calendar id to use
#' for this project.
#' @param crossSeriesGroupByColumns character. Optional. Column to split a cross series into
#' further groups. For example, if every series is sales of an individual product, the cross
#' series group could be e product category with values like "men's clothing", "sports
#' equipment", etc. Requires multiseries with \code{useCrossSeries} enabled.
#' @return An S3 object of class 'partition' including the parameters required by the
#' SetTarget function to generate a datetime partitioning of the modeling dataset.
#' @examples
#' CreateDatetimePartitionSpecification("date_col")
#' CreateDatetimePartitionSpecification("date",
#' featureSettings = list(
#' list("featureName" = "Product_offers",
#' "defaultToKnownInAdvance" = TRUE)))
#' partition <- CreateDatetimePartitionSpecification("dateColumn",
#' treatAsExponential = TreatAsExponential$Always,
#' differencingMethod = DifferencingMethod$Seasonal,
#' periodicities = list(list("timeSteps" = 10,
#' "timeUnit" = "HOUR"),
#' list("timeSteps" = 600,
#' "timeUnit" = "MINUTE"),
#' list("timeSteps" = 7,
#' "timeUnit" = "DAY")))
#' @export
CreateDatetimePartitionSpecification <- function(datetimePartitionColumn,
autopilotDataSelectionMethod = NULL,
validationDuration = NULL,
holdoutStartDate = NULL,
holdoutDuration = NULL,
disableHoldout = NULL,
gapDuration = NULL,
numberOfBacktests = NULL,
backtests = NULL,
useTimeSeries = FALSE,
defaultToKnownInAdvance = FALSE,
featureDerivationWindowStart = NULL,
featureDerivationWindowEnd = NULL,
featureSettings = NULL,
treatAsExponential = NULL,
differencingMethod = NULL,
windowsBasisUnit = NULL,
periodicities = NULL,
forecastWindowStart = NULL,
forecastWindowEnd = NULL,
multiseriesIdColumns = NULL,
useCrossSeries = NULL,
aggregationType = NULL,
crossSeriesGroupByColumns = NULL,
calendar = NULL) {
if (is(calendar, "dataRobotCalendar")) {
calendarId <- ValidateCalendar(calendar)
} else if (IsId(calendar) || is.null(calendar)) {
calendarId <- calendar
} else {
stop("Invalid calendar specification.")
}
partition <- list(cvMethod = cvMethods$DATETIME)
partition$datetimePartitionColumn <- datetimePartitionColumn
partition$autopilotDataSelectionMethod <- autopilotDataSelectionMethod
partition$validationDuration <- validationDuration
partition$holdoutStartDate <- holdoutStartDate
partition$holdoutDuration <- holdoutDuration
partition$disableHoldout <- disableHoldout
partition$gapDuration <- gapDuration
partition$numberOfBacktests <- numberOfBacktests
partition$backtests <- backtests
partition$useTimeSeries <- useTimeSeries
partition$defaultToKnownInAdvance <- defaultToKnownInAdvance
partition$featureDerivationWindowStart <- featureDerivationWindowStart
partition$featureDerivationWindowEnd <- featureDerivationWindowEnd
partition$featureSettings <- featureSettings
partition$treatAsExponential <- treatAsExponential
partition$differencingMethod <- differencingMethod
partition$periodicities <- periodicities
partition$windowsBasisUnit <- windowsBasisUnit
partition$forecastWindowStart <- forecastWindowStart
partition$forecastWindowEnd <- forecastWindowEnd
partition$multiseriesIdColumns <- multiseriesIdColumns
partition$useCrossSeriesFeatures <- useCrossSeries
partition$aggregationType <- aggregationType
partition$calendarId <- calendarId
partition$crossSeriesGroupByColumns <- crossSeriesGroupByColumns
class(partition) <- "partition"
partition
}
as.dataRobotDatetimePartitionSpecification <- function(inList) {
elements <- c("cvMethod",
"datetimePartitionColumn",
"autopilotDataSelectionMethod",
"validationDuration",
"holdoutStartDate",
"holdoutDuration",
"disableHoldout",
"gapDuration",
"numberOfBacktests",
"backtests",
"useTimeSeries",
"defaultToKnownInAdvance",
"featureDerivationWindowStart",
"featureDerivationWindowEnd",
"featureSettings",
"treatAsExponential",
"differencingMethod",
"windowsBasisUnit",
"periodicities",
"forecastWindowStart",
"forecastWindowEnd",
"multiseriesIdColumns",
"numberOfKnownInAdvanceFeatures",
"useCrossSeriesFeatures",
"aggregationType",
"calendarId",
"crossSeriesGroupByColumns")
outList <- ApplySchema(inList, elements)
featureSettings <- c("featureName", "knownInAdvance", "doNotDerive")
if (!is.null(outList$featureSettings) && !is.null(names(outList$featureSettings))) {
outList$featureSettings <- list(outList$featureSettings)
}
outList$featureSettings <- lapply(outList$featureSettings, ApplySchema, featureSettings)
if (!is.null(outList$backtests)) {
if (is.list(outList$backtests)) {
outList$backtests <- lapply(outList$backtests, as.dataRobotBacktestSpecification)
} else if (is.data.frame(outList$backtests)) {
outList$backtests <- as.dataRobotBacktestSpecification(outList$backtests)
}
}
outList
}
#' Preview the full partitioning determined by a DatetimePartitioningSpecification
#'
#' Based on the project dataset and the partitioning specification, inspect the full
#' partitioning that would be used if the same specification were passed into SetTarget.
#' This is not intended to be passed to SetTarget.
#'
#' @inheritParams DeleteProject
#' @param spec list. Datetime partition specification returned by
#' \code{CreateDatetimePartitionSpecification}
#' @return list describing datetime partition with following components
#' \itemize{
#' \item cvMethod. The type of validation scheme used for the project.
#' \item projectId character. The id of the project this partitioning applies to.
#' \item datetimePartitionColumn character. The name of the column whose values
#' as dates are used to assign a row to a particular partition.
#' \item dateFormat character. The format (e.g. "%Y-%m-%d %H:%M:%S") by which the
#' partition column was interpreted (compatible with strftime
#' [https://docs.python.org/2/library/time.html#time.strftime]).
#' \item autopilotDataSelectionMethod character. Whether models created
#' by the autopilot use "rowCount" or "duration" as their dataSelectionMethod.
#' \item validationDuration character. The validation duration specified when
#' initializing the partitioning - not directly significant if the backtests have been
#' modified, but used as the default validationDuration for the backtests.
#' \item availableTrainingStartDate character. The start date of the available training
#' data for scoring the holdout.
#' \item availableTrainingDuration character. The duration of the available training data
#' for scoring the holdout.
#' \item availableTrainingRowCount integer. The number of rows in the available training data for
#' scoring the holdout. Only available when retrieving the partitioning after setting the
#' target.
#' \item availableTrainingEndDate character. The end date of the available training data
#' for scoring the holdout.
#' \item primaryTrainingStartDate character. The start date of primary training data for
#' scoring the holdout.
#' \item primaryTrainingDuration character. The duration of the primary training data for
#' scoring the holdout.
#' \item primaryTrainingRowCount integer. The number of rows in the primary training data for
#' scoring the holdout. Only available when retrieving the partitioning after setting the
#' target.
#' \item primaryTrainingEndDate character. The end date of the primary training data for
#' scoring the holdout.
#' \item gapStartDate character. The start date of the gap between training and holdout
#' scoring data.
#' \item gapDuration character. The duration of the gap between training and holdout
#' scoring data.
#' \item gapRowCount integer. The number of rows in the gap between training and holdout scoring
#' data.
#' Only available when retrieving the partitioning after setting the target.
#' \item gapEndDate character. The end date of the gap between training and holdout scoring
#' data.
#' \item holdoutStartDate character. The start date of holdout scoring data.
#' \item holdoutDuration character. The duration of the holdout scoring data.
#' \item holdoutRowCount integer. The number of rows in the holdout scoring data.
#' Only available when retrieving the partitioning after setting the target.
#' \item holdoutEndDate character. The end date of the holdout scoring data.
#' \item numberOfBacktests integer. the number of backtests used.
#' \item backtests data.frame. A data frame of partition backtest. Each element represent one
#' backtest and has the following components:
#' index, availableTrainingStartDate, availableTrainingDuration, availableTrainingRowCount,
#' availableTrainingEndDate, primaryTrainingStartDate, primaryTrainingDuration,
#' primaryTrainingRowCount, primaryTrainingEndDate, gapStartDate, gapDuration, gapRowCount,
#' gapEndDate, validationStartDate, validationDuration, validationRowCount,
#' validationEndDate, totalRowCount.
#' \item useTimeSeries logical. Whether the project is a time series project (if TRUE) or an OTV
#' project which uses datetime partitioning (if FALSE).
#' \item defaultToKnownInAdvance logical. Whether the project defaults to treating
#' features as known in advance. Knon in advance features are time series features that
#' are expected to be known for dates in the future when making predictions (e.g., "is
#' this a holiday").
#' \item featureDerivationWindowStart integer. Offset into the past to define how far
#' back relative to the forecast point the feature derivation window should start. Only used for
#' time series projects. Expressed in terms of the \code{timeUnit} of the
#' \code{datetimePartitionColumn}.
#' \item featureDerivationWindowEnd integer. Offset into the past to define how far back relative
#' to the forecast point the feature derivation window should end. Only used for
#' time series projects. Expressed in terms of the \code{timeUnit} of the
#' \code{datetimePartitionColumn}.
#' \item forecastWindowStart integer. Offset into the future to define how far forward relative
#' to the forecast point the forecast window should start. Only used for time series
#' projects. Expressed in terms of the \code{timeUnit} of the \code{datetimePartitionColumn}.
#' \item forecastWindowEnd integer. Offset into the future to define how far forward relative to
#' the forecast point the forecast window should end. Only used for time series
#' projects. Expressed in terms of the \code{timeUnit} of the \code{datetimePartitionColumn}.
#' \item featureSettings list. A list of lists specifying settings for each feature. For each
#' feature you would like to set feature settings for, pass the following in a list:
#' \itemize{
#' \item featureName character. The name of the feature to set feature settings.
#' \item knownInAdvance logical. Optional. Whether or not the feature is known in advance.
#' Used for time series only. Defaults to \code{FALSE}.
#' \item doNotDerive logical. Optional. If \code{TRUE}, no time series derived features
#' (e.g., lags) will be automatically engineered from this feature. Used for time series
#' only. Defaults to \code{FALSE}.
#' }
#' \item treatAsExponential character. Specifies whether to treat data as exponential trend
#' and apply transformations like log-transform. Uses values from from
#' \code{TreatAsExponential}.
#' \item differencingMethod character. Used to specify differencing method to apply if data is
#' stationary. Use values from \code{DifferencingMethod}.
#' \item windowsBasisUnit character. Indicates which unit is the basis for the feature derivation
#' window and forecast window. Uses values from \code{TimeUnit} and the value "ROW".
#' \item periodicities list. A list of periodicities for different times, specified as a list of
#' lists, where each list item specifies the `timeSteps` for a particular `timeUnit`. Will be
#" "ROW" if \code{windowsBasisUnit} is "ROW".
#' \item totalRowCount integer. The number of rows in the project dataset. Only available when
#' retrieving the partitioning after setting the target. Thus it will be NULL for
#' \code{GenerateDatetimePartition} and populated for \code{GetDatetimePartition}.
#' \item validationRowCount integer. The number of rows in the validation set.
#' \item multiseriesIdColumns list. A list of the names of multiseries id columns to define
#' series.
#' \item numberOfKnownInAdvanceFeatures integer. The number of known in advance features.
#' \item useCrossSeriesFeatures logical. Whether or not cross series features are included.
#' \item aggregationType character. The aggregation type to apply when creating cross series
#' features. See \code{SeriesAggregationType}.
#' \item calendarId character. The ID of the calendar used for this project, if any.
#' }
#' @examples
#' \dontrun{
#' projectId <- "59a5af20c80891534e3c2bde"
#' partitionSpec <- CreateDatetimePartitionSpecification("date_col")
#' GenerateDatetimePartition(projectId, partitionSpec)
#' }
#' @export
GenerateDatetimePartition <- function(project, spec) {
projectId <- ValidateProject(project)
spec$cvMethod <- NULL
routeString <- UrlJoin("projects", projectId, "datetimePartitioning")
rawReturn <- DataRobotPOST(routeString, body = spec, encode = "json")
rawReturn$cvMethod <- cvMethods$DATETIME
as.dataRobotDatetimePartition(rawReturn)
}
#' Retrieve the DatetimePartitioning from a project
#'
#' Only available if the project has already set the target as a datetime project.
#'
#' @inheritParams DeleteProject
#' @inherit GenerateDatetimePartition return
#' @examples
#' \dontrun{
#' projectId <- "59a5af20c80891534e3c2bde"
#' GetDatetimePartition(projectId)
#' }
#' @export
GetDatetimePartition <- function(project) {
projectId <- ValidateProject(project)
routeString <- UrlJoin("projects", projectId, "datetimePartitioning")
part <- DataRobotGET(routeString)
part$cvMethod <- cvMethods$DATETIME
as.dataRobotDatetimePartition(part)
}
as.dataRobotDatetimePartition <- function(inList) {
elements <- c("cvMethod",
"projectId",
"datetimePartitionColumn",
"dateFormat",
"autopilotDataSelectionMethod",
"validationDuration",
"availableTrainingStartDate",
"availableTrainingDuration",
"availableTrainingRowCount",
"availableTrainingEndDate",
"primaryTrainingStartDate",
"primaryTrainingDuration",
"primaryTrainingRowCount",
"primaryTrainingEndDate",
"gapStartDate",
"gapDuration",
"gapRowCount",
"gapEndDate",
"holdoutStartDate",
"holdoutDuration",
"holdoutRowCount",
"holdoutEndDate",
"numberOfBacktests",
"backtests",
"useTimeSeries",
"defaultToKnownInAdvance",
"featureDerivationWindowStart",
"featureDerivationWindowEnd",
"forecastWindowStart",
"forecastWindowEnd",
"featureSettings",
"treatAsExponential",
"differencingMethod",
"windowsBasisUnit",
"periodicities",
"totalRowCount",
"validationRowCount",
"multiseriesIdColumns",
"numberOfKnownInAdvanceFeatures",
"useCrossSeriesFeatures",
"aggregationType",
"calendarId")
outList <- ApplySchema(inList, elements)
if (!is.null(outList$featureSettings) && !is.null(names(outList$featureSettings))) {
outList$featureSettings <- list(outList$featureSettings)
}
featureSettings <- c("featureName", "knownInAdvance", "doNotDerive")
outList$featureSettings <- lapply(outList$featureSettings, ApplySchema, featureSettings)
backtestElements <- c("index", "validationRowCount", "primaryTrainingDuration",
"primaryTrainingEndDate", "availableTrainingStartDate",
"primaryTrainingStartDate", "validationEndDate",
"availableTrainingDuration", "availableTrainingRowCount",
"gapEndDate", "validationDuration", "gapStartDate",
"availableTrainingEndDate", "primaryTrainingRowCount",
"validationStartDate", "totalRowCount", "gapRowCount", "gapDuration")
outList$backtests <- ApplySchema(outList$backtests, backtestElements)
outList$isTimeSeries <- isTRUE(outList$useTimeSeries)
outList$isMultiSeries <- length(outList$multiseriesIdColumns) > 0
outList$isCrossSeries <- isTRUE(outList$useCrossSeriesFeatures)
outList
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 570
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 232
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 91
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 86
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 86
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query23_ntrivil_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 177
c no.of clauses 570
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 86
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query23_ntrivil_1344.qdimacs 177 570 E1 [3 5 7 8 9 11 12 13 14 15 16 17 18 19 20 21 22 28 29 31 32 33 34 36 38 39 42 44 46 47 49 51 54 55 56 60 62 69 73 80 85 90 96 97 99 100 102 103 123 124 126 127 129 130 150 151 153 154 156 157 2 4 6 41 43 50 52 53 57 58 59 63 64 65 66 67 70 71 74 75 76 77 78 81 82 84 86 87 88 89 92 93 98 101 104 105 107 108 109 110 112 113 114 115 117 118 119 120 125 128 131 132 134 135 136 137 139 140 141 142 144 145 146 147 152 155 158 159 161 162 163 164 166 167 168 169 171 172 173 174 1 45 48 68 79 91] 0 0 27 86 RED
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query23_ntrivil_1344/query23_ntrivil_1344.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 1,442 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 570
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 232
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 91
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 86
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 86
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query23_ntrivil_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 177
c no.of clauses 570
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 86
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query23_ntrivil_1344.qdimacs 177 570 E1 [3 5 7 8 9 11 12 13 14 15 16 17 18 19 20 21 22 28 29 31 32 33 34 36 38 39 42 44 46 47 49 51 54 55 56 60 62 69 73 80 85 90 96 97 99 100 102 103 123 124 126 127 129 130 150 151 153 154 156 157 2 4 6 41 43 50 52 53 57 58 59 63 64 65 66 67 70 71 74 75 76 77 78 81 82 84 86 87 88 89 92 93 98 101 104 105 107 108 109 110 112 113 114 115 117 118 119 120 125 128 131 132 134 135 136 137 139 140 141 142 144 145 146 147 152 155 158 159 161 162 163 164 166 167 168 169 171 172 173 174 1 45 48 68 79 91] 0 0 27 86 RED
|
scdata <- read.csv("mpledata_all.csv",stringsAsFactors=F)
set.seed(123)
niter = 10
predict.results <- matrix(0,niter,6)
for(i in 1:niter){
train.obs <- sample(1:nrow(scdata),round(0.8*nrow(scdata)))
train.data <- scdata[train.obs,]
library(biglm)
# MPLE
mple.est <- bigglm(edgeij ~ istar2+ostar2+mutual+triangle+ edgecov.mq.t+ edgecov.same.issue.area.t+ edgecov.year.diff.t +edgecov.year.diff.square.t +
nodeicov.AbsDiffMQscores+ nodeicov.NumberJusticesPro+ nodeocov.sender.time + nodeifactor.SameIssueArea.2+ nodeifactor.SameIssueArea.3+
nodeifactor.SameIssueArea.4+nodeifactor.SameIssueArea.5+ nodeifactor.SameIssueArea.6+ nodeifactor.SameIssueArea.7+
nodeifactor.SameIssueArea.8+nodeifactor.SameIssueArea.9+ nodeifactor.SameIssueArea.10+
nodeifactor.SameIssueArea.11+ nodeifactor.SameIssueArea.12+ nodeifactor.SameIssueArea.13+nodeifactor.SameIssueArea.14+
nodeofactor.SameIssueArea.2+ nodeofactor.SameIssueArea.3+ nodeofactor.SameIssueArea.4+nodeofactor.SameIssueArea.5+
nodeofactor.SameIssueArea.6+nodeofactor.SameIssueArea.7+
nodeofactor.SameIssueArea.8+nodeofactor.SameIssueArea.9+nodeofactor.SameIssueArea.10+ nodeofactor.SameIssueArea.11+
nodeofactor.SameIssueArea.12+ nodeofactor.SameIssueArea.13+nodeofactor.SameIssueArea.14+
instar2_sendertime+outstar2_sendertime+ mutual_sendertime+ triangle_sendertime+mq_sendertime +sameissuearea_sendertime+
yeardiff_sendertime + yeardiffsquare_sendertime+ AbsDiffMQscores_sendertime + NumberJusticesPro_sendertime,family=binomial(logit),data=train.data)
summary(mple.est)
save(list="mple.est",file="training_full.RData")
# MPLE
mple.est.independent <- bigglm(edgeij ~ edgecov.mq.t+ edgecov.same.issue.area.t+ edgecov.year.diff.t +edgecov.year.diff.square.t +
nodeicov.AbsDiffMQscores+ nodeicov.NumberJusticesPro+ nodeocov.sender.time + nodeifactor.SameIssueArea.2+ nodeifactor.SameIssueArea.3+
nodeifactor.SameIssueArea.4+nodeifactor.SameIssueArea.5+ nodeifactor.SameIssueArea.6+ nodeifactor.SameIssueArea.7+
nodeifactor.SameIssueArea.8+nodeifactor.SameIssueArea.9+ nodeifactor.SameIssueArea.10+
nodeifactor.SameIssueArea.11+ nodeifactor.SameIssueArea.12+ nodeifactor.SameIssueArea.13+nodeifactor.SameIssueArea.14+
nodeofactor.SameIssueArea.2+ nodeofactor.SameIssueArea.3+ nodeofactor.SameIssueArea.4+nodeofactor.SameIssueArea.5+
nodeofactor.SameIssueArea.6+nodeofactor.SameIssueArea.7+
nodeofactor.SameIssueArea.8+nodeofactor.SameIssueArea.9+nodeofactor.SameIssueArea.10+ nodeofactor.SameIssueArea.11+
nodeofactor.SameIssueArea.12+ nodeofactor.SameIssueArea.13+nodeofactor.SameIssueArea.14+
mq_sendertime +sameissuearea_sendertime+
yeardiff_sendertime + yeardiffsquare_sendertime+ AbsDiffMQscores_sendertime + NumberJusticesPro_sendertime,family=binomial(logit),train.data)
summary(mple.est.independent)
save(list="mple.est.independent",file="training_independent.RData")
test.data <- scdata[-train.obs,]
predict.full <- c(predict(mple.est,newdata=test.data,type="response"))
predict.independent <- c(predict(mple.est.independent,newdata=test.data,type="response"))
library(MLmetrics)
true.y <- test.data$edgeij
pred.full <- as.numeric(predict.full>0.5)
pred.ind <- as.numeric(predict.independent>0.5)
precision <- function(true.y,pred.y){
mean(true.y[which(pred.y==1)])
}
recall <- function(true.y,pred.y){
mean(pred.y[which(true.y==1)])
}
prec.full <- precision(true.y,pred.full)
prec.ind <- precision(true.y,pred.ind)
rec.full <- recall(true.y,pred.full)
rec.ind <- recall(true.y,pred.ind)
f1.full <- F1_Score(true.y,pred.full,pos=1)
f1.ind <- F1_Score(true.y,pred.ind,pos=1)
predict.results[i,] <- c(prec.full,prec.ind,rec.full,rec.ind,f1.full,f1.ind)
print(i)
}
save(list="predict.results",file="prediction.performance.RData")
mean.range <- function(x){
c(round(mean(x),dig=4),paste("(",round(min(x),dig=4),", ",round(max(x),dig=4),")",sep=""))
}
results.full <- t(apply(predict.results[,c(1,3,5)],2,mean.range))
results.ind <- t(apply(predict.results[,c(2,4,6)],2,mean.range))
library(xtable)
results.table <- cbind(results.ind,results.full)
rownames(results.table) <- c("precision","recall","F1 score")
colnames(results.table) <- c("mean","range","mean","range")
tex.table <- xtable(results.table)
print(tex.table,file="prediction_table.tex")
| /R-Code/holdOutExperiment.R | no_license | schmid86/Supreme_Court_Citation_Network | R | false | false | 4,577 | r | scdata <- read.csv("mpledata_all.csv",stringsAsFactors=F)
set.seed(123)
niter = 10
predict.results <- matrix(0,niter,6)
for(i in 1:niter){
train.obs <- sample(1:nrow(scdata),round(0.8*nrow(scdata)))
train.data <- scdata[train.obs,]
library(biglm)
# MPLE
mple.est <- bigglm(edgeij ~ istar2+ostar2+mutual+triangle+ edgecov.mq.t+ edgecov.same.issue.area.t+ edgecov.year.diff.t +edgecov.year.diff.square.t +
nodeicov.AbsDiffMQscores+ nodeicov.NumberJusticesPro+ nodeocov.sender.time + nodeifactor.SameIssueArea.2+ nodeifactor.SameIssueArea.3+
nodeifactor.SameIssueArea.4+nodeifactor.SameIssueArea.5+ nodeifactor.SameIssueArea.6+ nodeifactor.SameIssueArea.7+
nodeifactor.SameIssueArea.8+nodeifactor.SameIssueArea.9+ nodeifactor.SameIssueArea.10+
nodeifactor.SameIssueArea.11+ nodeifactor.SameIssueArea.12+ nodeifactor.SameIssueArea.13+nodeifactor.SameIssueArea.14+
nodeofactor.SameIssueArea.2+ nodeofactor.SameIssueArea.3+ nodeofactor.SameIssueArea.4+nodeofactor.SameIssueArea.5+
nodeofactor.SameIssueArea.6+nodeofactor.SameIssueArea.7+
nodeofactor.SameIssueArea.8+nodeofactor.SameIssueArea.9+nodeofactor.SameIssueArea.10+ nodeofactor.SameIssueArea.11+
nodeofactor.SameIssueArea.12+ nodeofactor.SameIssueArea.13+nodeofactor.SameIssueArea.14+
instar2_sendertime+outstar2_sendertime+ mutual_sendertime+ triangle_sendertime+mq_sendertime +sameissuearea_sendertime+
yeardiff_sendertime + yeardiffsquare_sendertime+ AbsDiffMQscores_sendertime + NumberJusticesPro_sendertime,family=binomial(logit),data=train.data)
summary(mple.est)
save(list="mple.est",file="training_full.RData")
# MPLE
mple.est.independent <- bigglm(edgeij ~ edgecov.mq.t+ edgecov.same.issue.area.t+ edgecov.year.diff.t +edgecov.year.diff.square.t +
nodeicov.AbsDiffMQscores+ nodeicov.NumberJusticesPro+ nodeocov.sender.time + nodeifactor.SameIssueArea.2+ nodeifactor.SameIssueArea.3+
nodeifactor.SameIssueArea.4+nodeifactor.SameIssueArea.5+ nodeifactor.SameIssueArea.6+ nodeifactor.SameIssueArea.7+
nodeifactor.SameIssueArea.8+nodeifactor.SameIssueArea.9+ nodeifactor.SameIssueArea.10+
nodeifactor.SameIssueArea.11+ nodeifactor.SameIssueArea.12+ nodeifactor.SameIssueArea.13+nodeifactor.SameIssueArea.14+
nodeofactor.SameIssueArea.2+ nodeofactor.SameIssueArea.3+ nodeofactor.SameIssueArea.4+nodeofactor.SameIssueArea.5+
nodeofactor.SameIssueArea.6+nodeofactor.SameIssueArea.7+
nodeofactor.SameIssueArea.8+nodeofactor.SameIssueArea.9+nodeofactor.SameIssueArea.10+ nodeofactor.SameIssueArea.11+
nodeofactor.SameIssueArea.12+ nodeofactor.SameIssueArea.13+nodeofactor.SameIssueArea.14+
mq_sendertime +sameissuearea_sendertime+
yeardiff_sendertime + yeardiffsquare_sendertime+ AbsDiffMQscores_sendertime + NumberJusticesPro_sendertime,family=binomial(logit),train.data)
summary(mple.est.independent)
save(list="mple.est.independent",file="training_independent.RData")
test.data <- scdata[-train.obs,]
predict.full <- c(predict(mple.est,newdata=test.data,type="response"))
predict.independent <- c(predict(mple.est.independent,newdata=test.data,type="response"))
library(MLmetrics)
true.y <- test.data$edgeij
pred.full <- as.numeric(predict.full>0.5)
pred.ind <- as.numeric(predict.independent>0.5)
precision <- function(true.y,pred.y){
mean(true.y[which(pred.y==1)])
}
recall <- function(true.y,pred.y){
mean(pred.y[which(true.y==1)])
}
prec.full <- precision(true.y,pred.full)
prec.ind <- precision(true.y,pred.ind)
rec.full <- recall(true.y,pred.full)
rec.ind <- recall(true.y,pred.ind)
f1.full <- F1_Score(true.y,pred.full,pos=1)
f1.ind <- F1_Score(true.y,pred.ind,pos=1)
predict.results[i,] <- c(prec.full,prec.ind,rec.full,rec.ind,f1.full,f1.ind)
print(i)
}
save(list="predict.results",file="prediction.performance.RData")
mean.range <- function(x){
c(round(mean(x),dig=4),paste("(",round(min(x),dig=4),", ",round(max(x),dig=4),")",sep=""))
}
results.full <- t(apply(predict.results[,c(1,3,5)],2,mean.range))
results.ind <- t(apply(predict.results[,c(2,4,6)],2,mean.range))
library(xtable)
results.table <- cbind(results.ind,results.full)
rownames(results.table) <- c("precision","recall","F1 score")
colnames(results.table) <- c("mean","range","mean","range")
tex.table <- xtable(results.table)
print(tex.table,file="prediction_table.tex")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions.R
\name{tfd_log_normal}
\alias{tfd_log_normal}
\title{Log-normal distribution}
\usage{
tfd_log_normal(
loc,
scale,
validate_args = FALSE,
allow_nan_stats = TRUE,
name = "LogNormal"
)
tfd_log_normal(
loc,
scale,
validate_args = FALSE,
allow_nan_stats = TRUE,
name = "LogNormal"
)
}
\arguments{
\item{loc}{Floating-point \code{Tensor}; the means of the underlying
Normal distribution(s).}
\item{scale}{Floating-point \code{Tensor}; the stddevs of the underlying
Normal distribution(s).}
\item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
silently render incorrect outputs. Default value: FALSE.}
\item{allow_nan_stats}{Logical, default TRUE. When TRUE, statistics (e.g., mean, mode, variance)
use the value NaN to indicate the result is undefined. When FALSE, an exception is raised if
one or more of the statistic's batch members are undefined.}
\item{name}{name prefixed to Ops created by this class.}
}
\value{
a distribution instance.
}
\description{
The LogNormal distribution models positive-valued random variables
whose logarithm is normally distributed with mean \code{loc} and
standard deviation \code{scale}. It is constructed as the exponential
transformation of a Normal distribution.
The LogNormal distribution models positive-valued random variables
whose logarithm is normally distributed with mean \code{loc} and
standard deviation \code{scale}. It is constructed as the exponential
transformation of a Normal distribution.
}
\seealso{
For usage examples see e.g. \code{\link[=tfd_sample]{tfd_sample()}}, \code{\link[=tfd_log_prob]{tfd_log_prob()}}, \code{\link[=tfd_mean]{tfd_mean()}}.
For usage examples see e.g. \code{\link[=tfd_sample]{tfd_sample()}}, \code{\link[=tfd_log_prob]{tfd_log_prob()}}, \code{\link[=tfd_mean]{tfd_mean()}}.
Other distributions:
\code{\link{tfd_autoregressive}()},
\code{\link{tfd_batch_reshape}()},
\code{\link{tfd_bates}()},
\code{\link{tfd_bernoulli}()},
\code{\link{tfd_beta_binomial}()},
\code{\link{tfd_beta}()},
\code{\link{tfd_binomial}()},
\code{\link{tfd_categorical}()},
\code{\link{tfd_cauchy}()},
\code{\link{tfd_chi2}()},
\code{\link{tfd_chi}()},
\code{\link{tfd_cholesky_lkj}()},
\code{\link{tfd_continuous_bernoulli}()},
\code{\link{tfd_deterministic}()},
\code{\link{tfd_dirichlet_multinomial}()},
\code{\link{tfd_dirichlet}()},
\code{\link{tfd_empirical}()},
\code{\link{tfd_exp_gamma}()},
\code{\link{tfd_exp_inverse_gamma}()},
\code{\link{tfd_exponential}()},
\code{\link{tfd_gamma_gamma}()},
\code{\link{tfd_gamma}()},
\code{\link{tfd_gaussian_process_regression_model}()},
\code{\link{tfd_gaussian_process}()},
\code{\link{tfd_generalized_normal}()},
\code{\link{tfd_geometric}()},
\code{\link{tfd_gumbel}()},
\code{\link{tfd_half_cauchy}()},
\code{\link{tfd_half_normal}()},
\code{\link{tfd_hidden_markov_model}()},
\code{\link{tfd_horseshoe}()},
\code{\link{tfd_independent}()},
\code{\link{tfd_inverse_gamma}()},
\code{\link{tfd_inverse_gaussian}()},
\code{\link{tfd_johnson_s_u}()},
\code{\link{tfd_joint_distribution_named_auto_batched}()},
\code{\link{tfd_joint_distribution_named}()},
\code{\link{tfd_joint_distribution_sequential_auto_batched}()},
\code{\link{tfd_joint_distribution_sequential}()},
\code{\link{tfd_kumaraswamy}()},
\code{\link{tfd_laplace}()},
\code{\link{tfd_linear_gaussian_state_space_model}()},
\code{\link{tfd_lkj}()},
\code{\link{tfd_log_logistic}()},
\code{\link{tfd_logistic}()},
\code{\link{tfd_mixture_same_family}()},
\code{\link{tfd_mixture}()},
\code{\link{tfd_multinomial}()},
\code{\link{tfd_multivariate_normal_diag_plus_low_rank}()},
\code{\link{tfd_multivariate_normal_diag}()},
\code{\link{tfd_multivariate_normal_full_covariance}()},
\code{\link{tfd_multivariate_normal_linear_operator}()},
\code{\link{tfd_multivariate_normal_tri_l}()},
\code{\link{tfd_multivariate_student_t_linear_operator}()},
\code{\link{tfd_negative_binomial}()},
\code{\link{tfd_normal}()},
\code{\link{tfd_one_hot_categorical}()},
\code{\link{tfd_pareto}()},
\code{\link{tfd_pixel_cnn}()},
\code{\link{tfd_poisson_log_normal_quadrature_compound}()},
\code{\link{tfd_poisson}()},
\code{\link{tfd_power_spherical}()},
\code{\link{tfd_probit_bernoulli}()},
\code{\link{tfd_quantized}()},
\code{\link{tfd_relaxed_bernoulli}()},
\code{\link{tfd_relaxed_one_hot_categorical}()},
\code{\link{tfd_sample_distribution}()},
\code{\link{tfd_sinh_arcsinh}()},
\code{\link{tfd_skellam}()},
\code{\link{tfd_spherical_uniform}()},
\code{\link{tfd_student_t_process}()},
\code{\link{tfd_student_t}()},
\code{\link{tfd_transformed_distribution}()},
\code{\link{tfd_triangular}()},
\code{\link{tfd_truncated_cauchy}()},
\code{\link{tfd_truncated_normal}()},
\code{\link{tfd_uniform}()},
\code{\link{tfd_variational_gaussian_process}()},
\code{\link{tfd_vector_diffeomixture}()},
\code{\link{tfd_vector_exponential_diag}()},
\code{\link{tfd_vector_exponential_linear_operator}()},
\code{\link{tfd_vector_laplace_diag}()},
\code{\link{tfd_vector_laplace_linear_operator}()},
\code{\link{tfd_vector_sinh_arcsinh_diag}()},
\code{\link{tfd_von_mises_fisher}()},
\code{\link{tfd_von_mises}()},
\code{\link{tfd_weibull}()},
\code{\link{tfd_wishart_linear_operator}()},
\code{\link{tfd_wishart_tri_l}()},
\code{\link{tfd_wishart}()},
\code{\link{tfd_zipf}()}
}
\concept{distributions}
| /man/tfd_log_normal.Rd | no_license | cran/tfprobability | R | false | true | 5,498 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions.R
\name{tfd_log_normal}
\alias{tfd_log_normal}
\title{Log-normal distribution}
\usage{
tfd_log_normal(
loc,
scale,
validate_args = FALSE,
allow_nan_stats = TRUE,
name = "LogNormal"
)
tfd_log_normal(
loc,
scale,
validate_args = FALSE,
allow_nan_stats = TRUE,
name = "LogNormal"
)
}
\arguments{
\item{loc}{Floating-point \code{Tensor}; the means of the underlying
Normal distribution(s).}
\item{scale}{Floating-point \code{Tensor}; the stddevs of the underlying
Normal distribution(s).}
\item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
silently render incorrect outputs. Default value: FALSE.}
\item{allow_nan_stats}{Logical, default TRUE. When TRUE, statistics (e.g., mean, mode, variance)
use the value NaN to indicate the result is undefined. When FALSE, an exception is raised if
one or more of the statistic's batch members are undefined.}
\item{name}{name prefixed to Ops created by this class.}
}
\value{
a distribution instance.
}
\description{
The LogNormal distribution models positive-valued random variables
whose logarithm is normally distributed with mean \code{loc} and
standard deviation \code{scale}. It is constructed as the exponential
transformation of a Normal distribution.
The LogNormal distribution models positive-valued random variables
whose logarithm is normally distributed with mean \code{loc} and
standard deviation \code{scale}. It is constructed as the exponential
transformation of a Normal distribution.
}
\seealso{
For usage examples see e.g. \code{\link[=tfd_sample]{tfd_sample()}}, \code{\link[=tfd_log_prob]{tfd_log_prob()}}, \code{\link[=tfd_mean]{tfd_mean()}}.
For usage examples see e.g. \code{\link[=tfd_sample]{tfd_sample()}}, \code{\link[=tfd_log_prob]{tfd_log_prob()}}, \code{\link[=tfd_mean]{tfd_mean()}}.
Other distributions:
\code{\link{tfd_autoregressive}()},
\code{\link{tfd_batch_reshape}()},
\code{\link{tfd_bates}()},
\code{\link{tfd_bernoulli}()},
\code{\link{tfd_beta_binomial}()},
\code{\link{tfd_beta}()},
\code{\link{tfd_binomial}()},
\code{\link{tfd_categorical}()},
\code{\link{tfd_cauchy}()},
\code{\link{tfd_chi2}()},
\code{\link{tfd_chi}()},
\code{\link{tfd_cholesky_lkj}()},
\code{\link{tfd_continuous_bernoulli}()},
\code{\link{tfd_deterministic}()},
\code{\link{tfd_dirichlet_multinomial}()},
\code{\link{tfd_dirichlet}()},
\code{\link{tfd_empirical}()},
\code{\link{tfd_exp_gamma}()},
\code{\link{tfd_exp_inverse_gamma}()},
\code{\link{tfd_exponential}()},
\code{\link{tfd_gamma_gamma}()},
\code{\link{tfd_gamma}()},
\code{\link{tfd_gaussian_process_regression_model}()},
\code{\link{tfd_gaussian_process}()},
\code{\link{tfd_generalized_normal}()},
\code{\link{tfd_geometric}()},
\code{\link{tfd_gumbel}()},
\code{\link{tfd_half_cauchy}()},
\code{\link{tfd_half_normal}()},
\code{\link{tfd_hidden_markov_model}()},
\code{\link{tfd_horseshoe}()},
\code{\link{tfd_independent}()},
\code{\link{tfd_inverse_gamma}()},
\code{\link{tfd_inverse_gaussian}()},
\code{\link{tfd_johnson_s_u}()},
\code{\link{tfd_joint_distribution_named_auto_batched}()},
\code{\link{tfd_joint_distribution_named}()},
\code{\link{tfd_joint_distribution_sequential_auto_batched}()},
\code{\link{tfd_joint_distribution_sequential}()},
\code{\link{tfd_kumaraswamy}()},
\code{\link{tfd_laplace}()},
\code{\link{tfd_linear_gaussian_state_space_model}()},
\code{\link{tfd_lkj}()},
\code{\link{tfd_log_logistic}()},
\code{\link{tfd_logistic}()},
\code{\link{tfd_mixture_same_family}()},
\code{\link{tfd_mixture}()},
\code{\link{tfd_multinomial}()},
\code{\link{tfd_multivariate_normal_diag_plus_low_rank}()},
\code{\link{tfd_multivariate_normal_diag}()},
\code{\link{tfd_multivariate_normal_full_covariance}()},
\code{\link{tfd_multivariate_normal_linear_operator}()},
\code{\link{tfd_multivariate_normal_tri_l}()},
\code{\link{tfd_multivariate_student_t_linear_operator}()},
\code{\link{tfd_negative_binomial}()},
\code{\link{tfd_normal}()},
\code{\link{tfd_one_hot_categorical}()},
\code{\link{tfd_pareto}()},
\code{\link{tfd_pixel_cnn}()},
\code{\link{tfd_poisson_log_normal_quadrature_compound}()},
\code{\link{tfd_poisson}()},
\code{\link{tfd_power_spherical}()},
\code{\link{tfd_probit_bernoulli}()},
\code{\link{tfd_quantized}()},
\code{\link{tfd_relaxed_bernoulli}()},
\code{\link{tfd_relaxed_one_hot_categorical}()},
\code{\link{tfd_sample_distribution}()},
\code{\link{tfd_sinh_arcsinh}()},
\code{\link{tfd_skellam}()},
\code{\link{tfd_spherical_uniform}()},
\code{\link{tfd_student_t_process}()},
\code{\link{tfd_student_t}()},
\code{\link{tfd_transformed_distribution}()},
\code{\link{tfd_triangular}()},
\code{\link{tfd_truncated_cauchy}()},
\code{\link{tfd_truncated_normal}()},
\code{\link{tfd_uniform}()},
\code{\link{tfd_variational_gaussian_process}()},
\code{\link{tfd_vector_diffeomixture}()},
\code{\link{tfd_vector_exponential_diag}()},
\code{\link{tfd_vector_exponential_linear_operator}()},
\code{\link{tfd_vector_laplace_diag}()},
\code{\link{tfd_vector_laplace_linear_operator}()},
\code{\link{tfd_vector_sinh_arcsinh_diag}()},
\code{\link{tfd_von_mises_fisher}()},
\code{\link{tfd_von_mises}()},
\code{\link{tfd_weibull}()},
\code{\link{tfd_wishart_linear_operator}()},
\code{\link{tfd_wishart_tri_l}()},
\code{\link{tfd_wishart}()},
\code{\link{tfd_zipf}()}
}
\concept{distributions}
|
# R script to handle a precision map of the QBFBP around EML1,2
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Init
#-------------------------------------------------------------------------------
source("source/init.R")
#-------------------------------------------------------------------------------
# Select Models & libration point
#-------------------------------------------------------------------------------
Li = "L2"
MODEL = "QBCP"
FWRK = "EM"
Type = "rand" #selection or global
Energy = 0
vorders = c(5, 10, 15, 20, 25, 30);
#vorders = c(seq(3,30,2), 30)
currentfolder = paste0(printfolder(MODEL, FWRK, Li), "Serv/")
#-------------------------------------------------------------------------------
#Normalized units (gamma, c1)
#-------------------------------------------------------------------------------
muR = muR(FWRK);
gamma = gamma(Li, FWRK);
c1 = c1(Li, FWRK);
L = Ldist(FWRK);
if(FWRK == "EM")
{
primaryR = 1737.10 #m2
}else{
primaryR = 6378.10 #m2
}
#-------------------------------------------------------------------------------
#Additionnal parameters
#-------------------------------------------------------------------------------
isLegendOn = 1;
legendOnly = 0;
#-------------------------------------------------------------------------------
# Building the data.frame of results
#-------------------------------------------------------------------------------
imap = data.frame()
for (i in vorders) #loop on the orders
{
if(Energy == 0)
{
fileprefix = paste0(currentfolder, "eIm_", Type, "_ofs_30_order_",toString(i), "_PhD");
}else
{
fileprefix = paste0(currentfolder, "eIm_", Type, "_ofs_30_order_",toString(i), "_energy_", toString(Energy));
}
filename = paste0(fileprefix, ".bin")
# Load csv source
#-------------------------------------------------------------------------------
if (file.exists(filename))
{
names = c("label", "x", "y", "z", "px", "py", "pz",
"xEM", "yEM", "zEM", "pxEM", "pyEM", "pzEM",
"s1", "s2", "s3", "s4", "t", "dHz", "eIm");
imapc = dffbinary(filename, 20, names);
imapc$order = i;
}else
{
imapc = data.frame()
}
#rbind in ttm_all
imap = rbind(imap, imapc);
}
#-------------------------------------------------------------------------------
# Postprocessing
#-------------------------------------------------------------------------------
# Using EIm (EM units) instead of eIm (in NC units)
imap$EIm = gamma*imap$eIm
# To centered units
imap = NCtoC(imap, gamma)
# To physical units
imap = CtoPH(imap, L)
# To physical units
imap = EMtoPH(imap, L)
# Compute -log10(precision)
imap$log10eOm = log10(imap$EIm)
# Compute -log10(precision)
imap$flog10eOm = floor(log10(imap$EIm))
#Distance to the center
imap$rC = sqrt(imap$xC^2+imap$yC^2+imap$zC^2)
imap$rCPH = sqrt(imap$xCPH^2+imap$yCPH^2+imap$zCPH^2)
imap$frCPH = floor(imap$rCPH*1e-4)
imap$sC = sqrt(imap$s1^2+imap$s2^2+imap$s3^2++imap$s4^2)
#Abs of zC
imap$zCn = abs(imap$zC)
#-------------------------------------------------------------------------------
# Get rid of bad precision
#-------------------------------------------------------------------------------
imap = imap[which(imap$log10eOm < -1),]
#-------------------------------------------------------------------------------
# Select only some given value in the results
#-------------------------------------------------------------------------------
imapsys = data.frame();
# veps = c(seq(1,8,2)*1e-5, seq(1,8,2)*1e-6, seq(1,8,2)*1e-7, seq(1,8,2)*1e-8, seq(1,8,2)*1e-9);
# Select only positive some given value in the results
if(FWRK == "EM")
{
if(Li == "L2")
{
veps = 10^(-seq(5,9));
}else{
veps = 10^(-seq(5,9));
}
deps = 0.8
}else
{
if(Li == "L2")
{
veps = 10^(-seq(5,12));
}else{
veps = 10^(-seq(5,12));
}
deps = 0.5
}
for (eps in veps) #loop on the orders
{
isCloseToEPS = abs(imap$EIm - eps) < deps*eps;
imapr = imap[which(isCloseToEPS),]
#Ordered version
imapr = imapr[order(imapr$order),]
# Compute the mean position
imapm = ddply(imapr, .(order), summarize, mdHz = mean(dHz), msC = mean(sC), mrCPH = mean(rCPH), mrC = mean(rC), mxC = mean(abs(xC)), myC = mean(abs(yC)), mzC = mean(abs(zC)))
imapm$eps = log10(eps)
#rbind in ttm_all
imapsys = rbind(imapsys, imapm);
}
#-------------------------------------------------------------------------------
# Scale x
#-------------------------------------------------------------------------------
if(FWRK == "EM")
{
scale_x_dH = scale_x_continuous(breaks = seq(0,0.03,0.01))
scale_y_dH = scale_y_continuous(breaks = seq(0,0.03,0.01), limits = c(0, 0.033))
}else
{
scale_x_dH = scale_x_continuous(labels = scientific_format(), breaks = seq(0,12,3)*1e-5)
scale_y_dH = scale_y_continuous(labels = scientific_format(), breaks = seq(0,12,3)*1e-5, limits = c(0, 1.35e-4))
}
#-------------------------------------------------------------------------------
# colorLab
#-------------------------------------------------------------------------------
colorLab = "logEI"
# Or
#colorLab = "$\\raisebox{0.3ex}{\\scriptsize{$\\log_{10}(E_I)$}}$"
#-------------------------------------------------------------------------------
# Main plot: N vs dHz
#-------------------------------------------------------------------------------
ppme = plotdf_path(imapsys, "order", "mdHz", "Order $N$", "$\\delta \\bar{H}_0$", colorCol = "eps", colorLabel = colorLab, isColorFac = TRUE)
ppme = ppme + scale_x_continuous(breaks = seq(5,30,5))
ppme = ppme + scale_y_dH
ppme = ppme + custom_theme + legend_pos(c(0,1))
ppme
#-------------------------------------------------------------------------------
# Main plot: N vs sC
#-------------------------------------------------------------------------------
ppms = plotdf_path(imapsys, "order", "msC", "Order $N$", "$\\bar{\\|\\mathbf{s}\\||}_0$", colorCol = "eps", colorLabel = colorLab, isColorFac = TRUE)
ppms = ppms + scale_x_continuous(breaks = seq(5,30,5))
ppms = ppms + custom_theme + legend_pos(c(0,1))
ppms
#-------------------------------------------------------------------------------
# Main plot: dHz vs N
#
# ppme2 = geom_point_pretty(ppme2, imapsys, aes(mdHz, order, color = factor(eps)))
#-------------------------------------------------------------------------------
ppme2 = plotdf_path(imapsys, "mdHz", "order", "$\\delta \\bar{H}_0$", "Order $N$", colorCol = "eps", colorLabel = colorLab, isColorFac = TRUE)
ppme2 = ppme2 + scale_x_dH
ppme2 = ppme2 + scale_y_continuous(breaks = seq(5,30,5))
ppme2 = ppme2 + legend_inside_theme
# Needed to account for the cutting of the right part of the x labels
ppme2 = ppme2 + theme(plot.margin = margin(10,40,10,10))
ppme2
#-------------------------------------------------------------------------------
#Save in tikz
#-------------------------------------------------------------------------------
stop()
ggplot2tikz_phd(ppme, xSize, ySize, file = paste0(currentfolder, "EIm_", Type, "_ofs_30_order_",toString(i), "_3D.tex"))
ggplot2tikz_phd(ppms, xSize, ySize, file = paste0(currentfolder, "EIm_", Type, "_ofs_30_order_",toString(i), "_sC.tex"))
stop()
#-------------------------------------------------------------------------------
# Other plots from old implementation
#-------------------------------------------------------------------------------
Lit = "$L_2$"
ppmd = plotdf_line(imapsys, "mrCPH", "order", paste0("mean distance to ", Lit), "Order", colorCol = "eps", colorLabel = "Precision", isColorFac = TRUE)
ppmd = ppmd + scale_x_continuous(breaks = seq(5,30,5))
ppmd
ppmer = plotdf_line(imapsys, "mdHz", "eps", "mean($\\delta H_0$)", "$log_{10}(e_I)$", colorCol = "order", colorLabel = "Order", isColorFac = TRUE)
ppmer = ppmer + scale_x_continuous(breaks = seq(-10,-1,1))
ppmer
#-------------------------------------------------------------------------------
# 3D plot
#-------------------------------------------------------------------------------
# scatter3D(imapr$xEM, imapr$yEM, imapr$zEM, colvar = imapr$order, pch = 16, cex = 1.5)
# scatter3D(imap$xEM, imap$yEM, imap$zEM, colvar = imap$flog10eOm, pch = 16, cex = 1.5)
| /Imap/Imap_Order_Random_3D_PhD.R | no_license | lebihanbastien/RFTDA | R | false | false | 8,309 | r | # R script to handle a precision map of the QBFBP around EML1,2
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Init
#-------------------------------------------------------------------------------
source("source/init.R")
#-------------------------------------------------------------------------------
# Select Models & libration point
#-------------------------------------------------------------------------------
Li = "L2"
MODEL = "QBCP"
FWRK = "EM"
Type = "rand" #selection or global
Energy = 0
vorders = c(5, 10, 15, 20, 25, 30);
#vorders = c(seq(3,30,2), 30)
currentfolder = paste0(printfolder(MODEL, FWRK, Li), "Serv/")
#-------------------------------------------------------------------------------
#Normalized units (gamma, c1)
#-------------------------------------------------------------------------------
muR = muR(FWRK);
gamma = gamma(Li, FWRK);
c1 = c1(Li, FWRK);
L = Ldist(FWRK);
if(FWRK == "EM")
{
primaryR = 1737.10 #m2
}else{
primaryR = 6378.10 #m2
}
#-------------------------------------------------------------------------------
#Additionnal parameters
#-------------------------------------------------------------------------------
isLegendOn = 1;
legendOnly = 0;
#-------------------------------------------------------------------------------
# Building the data.frame of results
#-------------------------------------------------------------------------------
imap = data.frame()
for (i in vorders) #loop on the orders
{
if(Energy == 0)
{
fileprefix = paste0(currentfolder, "eIm_", Type, "_ofs_30_order_",toString(i), "_PhD");
}else
{
fileprefix = paste0(currentfolder, "eIm_", Type, "_ofs_30_order_",toString(i), "_energy_", toString(Energy));
}
filename = paste0(fileprefix, ".bin")
# Load csv source
#-------------------------------------------------------------------------------
if (file.exists(filename))
{
names = c("label", "x", "y", "z", "px", "py", "pz",
"xEM", "yEM", "zEM", "pxEM", "pyEM", "pzEM",
"s1", "s2", "s3", "s4", "t", "dHz", "eIm");
imapc = dffbinary(filename, 20, names);
imapc$order = i;
}else
{
imapc = data.frame()
}
#rbind in ttm_all
imap = rbind(imap, imapc);
}
#-------------------------------------------------------------------------------
# Postprocessing
#-------------------------------------------------------------------------------
# Using EIm (EM units) instead of eIm (in NC units)
imap$EIm = gamma*imap$eIm
# To centered units
imap = NCtoC(imap, gamma)
# To physical units
imap = CtoPH(imap, L)
# To physical units
imap = EMtoPH(imap, L)
# Compute -log10(precision)
imap$log10eOm = log10(imap$EIm)
# Compute -log10(precision)
imap$flog10eOm = floor(log10(imap$EIm))
#Distance to the center
imap$rC = sqrt(imap$xC^2+imap$yC^2+imap$zC^2)
imap$rCPH = sqrt(imap$xCPH^2+imap$yCPH^2+imap$zCPH^2)
imap$frCPH = floor(imap$rCPH*1e-4)
imap$sC = sqrt(imap$s1^2+imap$s2^2+imap$s3^2++imap$s4^2)
#Abs of zC
imap$zCn = abs(imap$zC)
#-------------------------------------------------------------------------------
# Get rid of bad precision
#-------------------------------------------------------------------------------
imap = imap[which(imap$log10eOm < -1),]
#-------------------------------------------------------------------------------
# Select only some given value in the results
#-------------------------------------------------------------------------------
imapsys = data.frame();
# veps = c(seq(1,8,2)*1e-5, seq(1,8,2)*1e-6, seq(1,8,2)*1e-7, seq(1,8,2)*1e-8, seq(1,8,2)*1e-9);
# Select only positive some given value in the results
if(FWRK == "EM")
{
if(Li == "L2")
{
veps = 10^(-seq(5,9));
}else{
veps = 10^(-seq(5,9));
}
deps = 0.8
}else
{
if(Li == "L2")
{
veps = 10^(-seq(5,12));
}else{
veps = 10^(-seq(5,12));
}
deps = 0.5
}
for (eps in veps) #loop on the orders
{
isCloseToEPS = abs(imap$EIm - eps) < deps*eps;
imapr = imap[which(isCloseToEPS),]
#Ordered version
imapr = imapr[order(imapr$order),]
# Compute the mean position
imapm = ddply(imapr, .(order), summarize, mdHz = mean(dHz), msC = mean(sC), mrCPH = mean(rCPH), mrC = mean(rC), mxC = mean(abs(xC)), myC = mean(abs(yC)), mzC = mean(abs(zC)))
imapm$eps = log10(eps)
#rbind in ttm_all
imapsys = rbind(imapsys, imapm);
}
#-------------------------------------------------------------------------------
# Scale x
#-------------------------------------------------------------------------------
if(FWRK == "EM")
{
scale_x_dH = scale_x_continuous(breaks = seq(0,0.03,0.01))
scale_y_dH = scale_y_continuous(breaks = seq(0,0.03,0.01), limits = c(0, 0.033))
}else
{
scale_x_dH = scale_x_continuous(labels = scientific_format(), breaks = seq(0,12,3)*1e-5)
scale_y_dH = scale_y_continuous(labels = scientific_format(), breaks = seq(0,12,3)*1e-5, limits = c(0, 1.35e-4))
}
#-------------------------------------------------------------------------------
# colorLab
#-------------------------------------------------------------------------------
colorLab = "logEI"
# Or
#colorLab = "$\\raisebox{0.3ex}{\\scriptsize{$\\log_{10}(E_I)$}}$"
#-------------------------------------------------------------------------------
# Main plot: N vs dHz
#-------------------------------------------------------------------------------
ppme = plotdf_path(imapsys, "order", "mdHz", "Order $N$", "$\\delta \\bar{H}_0$", colorCol = "eps", colorLabel = colorLab, isColorFac = TRUE)
ppme = ppme + scale_x_continuous(breaks = seq(5,30,5))
ppme = ppme + scale_y_dH
ppme = ppme + custom_theme + legend_pos(c(0,1))
ppme
#-------------------------------------------------------------------------------
# Main plot: N vs sC
#-------------------------------------------------------------------------------
ppms = plotdf_path(imapsys, "order", "msC", "Order $N$", "$\\bar{\\|\\mathbf{s}\\||}_0$", colorCol = "eps", colorLabel = colorLab, isColorFac = TRUE)
ppms = ppms + scale_x_continuous(breaks = seq(5,30,5))
ppms = ppms + custom_theme + legend_pos(c(0,1))
ppms
#-------------------------------------------------------------------------------
# Main plot: dHz vs N
#
# ppme2 = geom_point_pretty(ppme2, imapsys, aes(mdHz, order, color = factor(eps)))
#-------------------------------------------------------------------------------
ppme2 = plotdf_path(imapsys, "mdHz", "order", "$\\delta \\bar{H}_0$", "Order $N$", colorCol = "eps", colorLabel = colorLab, isColorFac = TRUE)
ppme2 = ppme2 + scale_x_dH
ppme2 = ppme2 + scale_y_continuous(breaks = seq(5,30,5))
ppme2 = ppme2 + legend_inside_theme
# Needed to account for the cutting of the right part of the x labels
ppme2 = ppme2 + theme(plot.margin = margin(10,40,10,10))
ppme2
#-------------------------------------------------------------------------------
#Save in tikz
#-------------------------------------------------------------------------------
stop()
ggplot2tikz_phd(ppme, xSize, ySize, file = paste0(currentfolder, "EIm_", Type, "_ofs_30_order_",toString(i), "_3D.tex"))
ggplot2tikz_phd(ppms, xSize, ySize, file = paste0(currentfolder, "EIm_", Type, "_ofs_30_order_",toString(i), "_sC.tex"))
stop()
#-------------------------------------------------------------------------------
# Other plots from old implementation
#-------------------------------------------------------------------------------
Lit = "$L_2$"
ppmd = plotdf_line(imapsys, "mrCPH", "order", paste0("mean distance to ", Lit), "Order", colorCol = "eps", colorLabel = "Precision", isColorFac = TRUE)
ppmd = ppmd + scale_x_continuous(breaks = seq(5,30,5))
ppmd
ppmer = plotdf_line(imapsys, "mdHz", "eps", "mean($\\delta H_0$)", "$log_{10}(e_I)$", colorCol = "order", colorLabel = "Order", isColorFac = TRUE)
ppmer = ppmer + scale_x_continuous(breaks = seq(-10,-1,1))
ppmer
#-------------------------------------------------------------------------------
# 3D plot
#-------------------------------------------------------------------------------
# scatter3D(imapr$xEM, imapr$yEM, imapr$zEM, colvar = imapr$order, pch = 16, cex = 1.5)
# scatter3D(imap$xEM, imap$yEM, imap$zEM, colvar = imap$flog10eOm, pch = 16, cex = 1.5)
|
/script.covid.world.evolution.r | no_license | MaelaKloareg/Covid-19-Data | R | false | false | 8,941 | r | ||
#' Import and prepare a batch for analysis, this function replaces batchPrep()
#'
#' @param folder A path to a folder containing multiple exports from ABI7500 or Quantstudio 5
#' @param progress logical, if TRUE a progressbar is shown works only on windows
#' @param equipment Specify from what equipment the export originates, "ABI" for ABI7500, "quant" for quantstudio 5.
#' @param ... Argumnts passed to read_[EQUIPMENT]
#'
#' @import "dplyr"
#' @export
prepare_batch <- function(folder, ..., progress=TRUE, equipment="ABI"){
if(equipment=="ABI"){
# list all files to be prepared
files <- list.files(folder)
# empty list to store data
dat <- list()
## Initialize a Progress Bar
if(progress==TRUE){
pb <- txtProgressBar(min=0, max=length(files), style=3)
# loop trough file list and extractRawData
for(i in 1:length(files)){
dat[[i]] <- read_ABI(paste(folder,"/", files[i], sep=""), ...)
setTxtProgressBar(pb, i)
}
# compile to one data.frame
close(pb)
data <- data.frame(dplyr::bind_rows(dat))
}
if(progress==FALSE){
# loop trough file list and extractRawData
for(i in 1:length(files)){
dat[[i]] <- read_ABI(paste(folder,"/", files[i], sep=""), ...)
}
# compile to one data.frame
data <- data.frame(dplyr::bind_rows(dat))
}
# return dataframe
data
}else{
if(equipment=="quant"){
# list all files to be prepared
files <- list.files(folder)
# empty list to store data
dat <- list()
## Initialize a Progress Bar
if(progress==TRUE){
pb <- txtProgressBar(min=0, max=length(files), style=3)
# loop trough file list and extractRawData
for(i in 1:length(files)){
dat[[i]] <- read_quant5(paste(folder,"/", files[i], sep=""), ...)
setTxtProgressBar(pb, i)
}
# compile to one data.frame
close(pb)
data <- data.frame(dplyr::bind_rows(dat))
}
if(progress==FALSE){
# loop trough file list and extractRawData
for(i in 1:length(files)){
dat[[i]] <- read_quant5(paste(folder,"/", files[i], sep=""), ...)
}
# compile to one data.frame
data <- data.frame(dplyr::bind_rows(dat))
}
# return dataframe
data
}
else{
stop("No method for specified equipment")
}
}
}
| /R/prepare_batch.R | no_license | dhammarstrom/qpcrpal | R | false | false | 2,436 | r | #' Import and prepare a batch for analysis, this function replaces batchPrep()
#'
#' @param folder A path to a folder containing multiple exports from ABI7500 or Quantstudio 5
#' @param progress logical, if TRUE a progressbar is shown works only on windows
#' @param equipment Specify from what equipment the export originates, "ABI" for ABI7500, "quant" for quantstudio 5.
#' @param ... Argumnts passed to read_[EQUIPMENT]
#'
#' @import "dplyr"
#' @export
prepare_batch <- function(folder, ..., progress=TRUE, equipment="ABI"){
if(equipment=="ABI"){
# list all files to be prepared
files <- list.files(folder)
# empty list to store data
dat <- list()
## Initialize a Progress Bar
if(progress==TRUE){
pb <- txtProgressBar(min=0, max=length(files), style=3)
# loop trough file list and extractRawData
for(i in 1:length(files)){
dat[[i]] <- read_ABI(paste(folder,"/", files[i], sep=""), ...)
setTxtProgressBar(pb, i)
}
# compile to one data.frame
close(pb)
data <- data.frame(dplyr::bind_rows(dat))
}
if(progress==FALSE){
# loop trough file list and extractRawData
for(i in 1:length(files)){
dat[[i]] <- read_ABI(paste(folder,"/", files[i], sep=""), ...)
}
# compile to one data.frame
data <- data.frame(dplyr::bind_rows(dat))
}
# return dataframe
data
}else{
if(equipment=="quant"){
# list all files to be prepared
files <- list.files(folder)
# empty list to store data
dat <- list()
## Initialize a Progress Bar
if(progress==TRUE){
pb <- txtProgressBar(min=0, max=length(files), style=3)
# loop trough file list and extractRawData
for(i in 1:length(files)){
dat[[i]] <- read_quant5(paste(folder,"/", files[i], sep=""), ...)
setTxtProgressBar(pb, i)
}
# compile to one data.frame
close(pb)
data <- data.frame(dplyr::bind_rows(dat))
}
if(progress==FALSE){
# loop trough file list and extractRawData
for(i in 1:length(files)){
dat[[i]] <- read_quant5(paste(folder,"/", files[i], sep=""), ...)
}
# compile to one data.frame
data <- data.frame(dplyr::bind_rows(dat))
}
# return dataframe
data
}
else{
stop("No method for specified equipment")
}
}
}
|
#' @include NCRNWater_NCRNWaterObj_Class_def.R
#' @include getWData.R
#' @importFrom magrittr %>%
#' @importFrom lubridate year month
#' @importFrom ggplot2 ggplot aes geom_boxplot scale_x_discrete labs theme_bw theme element_blank geom_hline
#' @importFrom plotly ggplotly config
#'
#' @title waterbox
#'
#' @description Produces box plots from water data.
#'
#' @inheritParams getChars
#'
#' @param object Either a \code{data.frame} that is the output of \code{getWData}, a \code{Characteristic} object a \code{Site} object, a \code{Park} object or a \code{list} of such objects.
#' @param charname Required if \code{object} is not a \code{data.frame}. Name, in quotes, of a single \code{Characteristic} whose data should be graphed.
#' @param by Indicates how the data for the boxplot should be grouped. A text variable in quotes. Choices are:
#' \describe{
#' \item{"year"}{The default. Will prodice a boxplot for each year in the data}
#' \item{"month"}{ Will prodoce a boxplot for each month, combining data across years. That is all January data will be in one box, all February data in another, etc.}
#' \item{"site}{If more than one site is included in the input object, this will produce a different box for each site.}
#' \item{"park"}{If more than one park is included int he input object, theis will prodice a different box for each park}
#' }
#' @param assessmemt Vector indicating if assessment points will be maked on the graph. See details below.
#' @param yname Text, defaults to \code{NA}. A label for the y-axis. If an \code{Characteristic}, \code{Site}, or \code{Park} object is passed to \code{object}, then the y-label will default to the Display Name and Units for the Charecteristic, unless overwitten by the \code{yname} argument. If a \code{data.frame} is passed then the y-label will either be the text from \code{yname} or blank if \code{yname} is left as \code{NA}.
#' @param xname Text, defaults to \code{NA}. A label for the x-axis. If a \code{Characteristic}, \code{Site}, or \code{Park} object is passed to \code{object}, then the x-label will default to whatever is indicated in \code{by}.unless overwitten by the \code{xname} argument. If a \code{data.frame} is passed then the x-label will either be the text from \code{xname} or blank if \code{xname} is left as \code{NA}.
#'
#' @param labels A character vector indicating the labels for the bars, defaults to NA. If labels are provided (one for each bar) they will be printed. If \code{object} is a \code{data.frame} and \code{labels} is \code{NA} then no labels will be printed. If \code{object} is a \code{Characteristic}, \code{Site}, or \code{Park} object, and \code{labels} is \code{NA} then the default will depend on the \code{gropby} argument. "year" will be labeled with 4 digit years, "month" with 3 letter months (Jan,Feb, etc), "site" with the site name from the \code{Site}'s \code{Display Name} slot and "park" with the \code{Park}'s short name from the \code{ShortName} slot.
#' @param title A title in the graph in quotes. Defauls to \code{NULL}, which indicates no title should be used.
#' @param assesscolor a length one character vector with the color for the assessment lines.
#' @param outliercolor a length one character vector with the color for the outlier points.
#' @param sizes a length 3 numeric vector with the sizes for the outlier points, lines of the boxplot and assessment lines.
#' @param webplot If TRUE, the plot is produced using ggploty from the ploty package. Will produce a html plot with interactive features.
#'
#' @param ... Additional arguments used to select and filter data passed to \code{\link{getWData}}
#'
#' @return Creates a boxplot
#'
#' @details The \code{assessment} argument determines if lines representing the assessment points should be drawn on the graph. If \code{FALSE} then no lines will be drawn. If \code{TRUE}, the default, then the upper and lower assessment points indicated in \code{object}'s \code{Character} objects will be used to draw the lines. Note that if there are multiple assessemnt points, for example if diffrerent sites have different points, or if there is both an upper and lower point, they will all be drawn. If a \code{vector} of numbers is passed to \code{assessment} instead then those will serve as the assessment points and lines will be drawn accordingly. Note that if \code{obejct} is a \code{data.frame} then the only way to draw assessment points is by passing a \code{numeric vector} to \code{assessment}.
#'
#' @export
setGeneric(name="waterbox",function(object,parkcode=NA, sitecode=NA, charname, by="year",assessment=TRUE,yname=NA,xname=NA,labels=NA,title=NULL,assesscolor="red", outliercolor="blue",sizes=c(2,.5,1), webplot=FALSE,...){standardGeneric("waterbox")},signature=c("object") )
setMethod(f="waterbox", signature=c(object="NCRNWaterObj"),
function(object,parkcode, sitecode, charname,by,assessment,yname,xname,labels,title,assesscolor,outliercolor,sizes,webplot,...){
PlotData<-getWData(object=object,parkcode=parkcode, sitecode=sitecode, charname=charname,...)
if(is.na(yname)) yname<-paste0(getCharInfo(object=object, charname=charname, info="DisplayName")," (",
getCharInfo(object=object, charname=charname, info="Units"),")") %>% unique
if(is.na(xname)) xname<-switch(by,
year="Year",
month="Month",
site="Site",
park="Park"
)
if(assessment) assessment<-c(getCharInfo(object=object,parkcode=parkcode, sitecode=sitecode, charname=charname, info="LowerPoint"),
getCharInfo(object=object,parkcode=parkcode, sitecode=sitecode, charname=charname, info="UpperPoint")) %>%
unlist %>% unique
assessment<-assessment[!is.na(assessment)] # needed if there is no upper or lower assessment.
waterbox(object=PlotData,by=by,assessment=assessment,yname=yname,xname=xname,labels=labels,title=title,assesscolor=assesscolor,
outliercolor=outliercolor, sizes=sizes, webplot=webplot)
})
setMethod(f="waterbox", signature=c(object="data.frame"),
function(object,by,assessment,yname,xname,labels,title,assesscolor,outliercolor, sizes, webplot){
Grouper<-switch(by,
year=object$Date %>% year %>% factor,
month=object$Date %>% month(label=T) %>% factor,
site=object$Site,
park=object$Park)
if(is.na(yname)) yname<-""
if(all(is.na(xname))) xname<-""
if(all(is.na(labels))) labels<-switch(by,
year=object$Date %>% year %>% unique,
month=object$Date %>% month(label=T) %>% unique %>% sort %>% as.character,
site=object$Site %>% unique,
park=object$Park %>% unique)
OutPlot<-ggplot(object,aes(Grouper,Value)) +
geom_boxplot(outlier.size=sizes[1], outlier.color=outliercolor, lwd=sizes[2]) +
{if (is.numeric(assessment)) geom_hline(yintercept=assessment,color=assesscolor,linetype="dashed",size=sizes[3])}+
labs(title=title,y=yname)+
scale_x_discrete(name=xname,labels=labels)+
theme_bw()+
theme(panel.grid = element_blank())
ifelse(webplot, return(ggplotly(OutPlot) %>% plotly::config(displaylogo=F)),return(OutPlot))
})
| /R/waterbox.R | no_license | NTD-MIDN/NCRNWater | R | false | false | 7,566 | r | #' @include NCRNWater_NCRNWaterObj_Class_def.R
#' @include getWData.R
#' @importFrom magrittr %>%
#' @importFrom lubridate year month
#' @importFrom ggplot2 ggplot aes geom_boxplot scale_x_discrete labs theme_bw theme element_blank geom_hline
#' @importFrom plotly ggplotly config
#'
#' @title waterbox
#'
#' @description Produces box plots from water data.
#'
#' @inheritParams getChars
#'
#' @param object Either a \code{data.frame} that is the output of \code{getWData}, a \code{Characteristic} object a \code{Site} object, a \code{Park} object or a \code{list} of such objects.
#' @param charname Required if \code{object} is not a \code{data.frame}. Name, in quotes, of a single \code{Characteristic} whose data should be graphed.
#' @param by Indicates how the data for the boxplot should be grouped. A text variable in quotes. Choices are:
#' \describe{
#' \item{"year"}{The default. Will prodice a boxplot for each year in the data}
#' \item{"month"}{ Will prodoce a boxplot for each month, combining data across years. That is all January data will be in one box, all February data in another, etc.}
#' \item{"site}{If more than one site is included in the input object, this will produce a different box for each site.}
#' \item{"park"}{If more than one park is included int he input object, theis will prodice a different box for each park}
#' }
#' @param assessmemt Vector indicating if assessment points will be maked on the graph. See details below.
#' @param yname Text, defaults to \code{NA}. A label for the y-axis. If an \code{Characteristic}, \code{Site}, or \code{Park} object is passed to \code{object}, then the y-label will default to the Display Name and Units for the Charecteristic, unless overwitten by the \code{yname} argument. If a \code{data.frame} is passed then the y-label will either be the text from \code{yname} or blank if \code{yname} is left as \code{NA}.
#' @param xname Text, defaults to \code{NA}. A label for the x-axis. If a \code{Characteristic}, \code{Site}, or \code{Park} object is passed to \code{object}, then the x-label will default to whatever is indicated in \code{by}.unless overwitten by the \code{xname} argument. If a \code{data.frame} is passed then the x-label will either be the text from \code{xname} or blank if \code{xname} is left as \code{NA}.
#'
#' @param labels A character vector indicating the labels for the bars, defaults to NA. If labels are provided (one for each bar) they will be printed. If \code{object} is a \code{data.frame} and \code{labels} is \code{NA} then no labels will be printed. If \code{object} is a \code{Characteristic}, \code{Site}, or \code{Park} object, and \code{labels} is \code{NA} then the default will depend on the \code{gropby} argument. "year" will be labeled with 4 digit years, "month" with 3 letter months (Jan,Feb, etc), "site" with the site name from the \code{Site}'s \code{Display Name} slot and "park" with the \code{Park}'s short name from the \code{ShortName} slot.
#' @param title A title in the graph in quotes. Defauls to \code{NULL}, which indicates no title should be used.
#' @param assesscolor a length one character vector with the color for the assessment lines.
#' @param outliercolor a length one character vector with the color for the outlier points.
#' @param sizes a length 3 numeric vector with the sizes for the outlier points, lines of the boxplot and assessment lines.
#' @param webplot If TRUE, the plot is produced using ggploty from the ploty package. Will produce a html plot with interactive features.
#'
#' @param ... Additional arguments used to select and filter data passed to \code{\link{getWData}}
#'
#' @return Creates a boxplot
#'
#' @details The \code{assessment} argument determines if lines representing the assessment points should be drawn on the graph. If \code{FALSE} then no lines will be drawn. If \code{TRUE}, the default, then the upper and lower assessment points indicated in \code{object}'s \code{Character} objects will be used to draw the lines. Note that if there are multiple assessemnt points, for example if diffrerent sites have different points, or if there is both an upper and lower point, they will all be drawn. If a \code{vector} of numbers is passed to \code{assessment} instead then those will serve as the assessment points and lines will be drawn accordingly. Note that if \code{obejct} is a \code{data.frame} then the only way to draw assessment points is by passing a \code{numeric vector} to \code{assessment}.
#'
#' @export
setGeneric(name="waterbox",function(object,parkcode=NA, sitecode=NA, charname, by="year",assessment=TRUE,yname=NA,xname=NA,labels=NA,title=NULL,assesscolor="red", outliercolor="blue",sizes=c(2,.5,1), webplot=FALSE,...){standardGeneric("waterbox")},signature=c("object") )
setMethod(f="waterbox", signature=c(object="NCRNWaterObj"),
function(object,parkcode, sitecode, charname,by,assessment,yname,xname,labels,title,assesscolor,outliercolor,sizes,webplot,...){
PlotData<-getWData(object=object,parkcode=parkcode, sitecode=sitecode, charname=charname,...)
if(is.na(yname)) yname<-paste0(getCharInfo(object=object, charname=charname, info="DisplayName")," (",
getCharInfo(object=object, charname=charname, info="Units"),")") %>% unique
if(is.na(xname)) xname<-switch(by,
year="Year",
month="Month",
site="Site",
park="Park"
)
if(assessment) assessment<-c(getCharInfo(object=object,parkcode=parkcode, sitecode=sitecode, charname=charname, info="LowerPoint"),
getCharInfo(object=object,parkcode=parkcode, sitecode=sitecode, charname=charname, info="UpperPoint")) %>%
unlist %>% unique
assessment<-assessment[!is.na(assessment)] # needed if there is no upper or lower assessment.
waterbox(object=PlotData,by=by,assessment=assessment,yname=yname,xname=xname,labels=labels,title=title,assesscolor=assesscolor,
outliercolor=outliercolor, sizes=sizes, webplot=webplot)
})
setMethod(f="waterbox", signature=c(object="data.frame"),
function(object,by,assessment,yname,xname,labels,title,assesscolor,outliercolor, sizes, webplot){
Grouper<-switch(by,
year=object$Date %>% year %>% factor,
month=object$Date %>% month(label=T) %>% factor,
site=object$Site,
park=object$Park)
if(is.na(yname)) yname<-""
if(all(is.na(xname))) xname<-""
if(all(is.na(labels))) labels<-switch(by,
year=object$Date %>% year %>% unique,
month=object$Date %>% month(label=T) %>% unique %>% sort %>% as.character,
site=object$Site %>% unique,
park=object$Park %>% unique)
OutPlot<-ggplot(object,aes(Grouper,Value)) +
geom_boxplot(outlier.size=sizes[1], outlier.color=outliercolor, lwd=sizes[2]) +
{if (is.numeric(assessment)) geom_hline(yintercept=assessment,color=assesscolor,linetype="dashed",size=sizes[3])}+
labs(title=title,y=yname)+
scale_x_discrete(name=xname,labels=labels)+
theme_bw()+
theme(panel.grid = element_blank())
ifelse(webplot, return(ggplotly(OutPlot) %>% plotly::config(displaylogo=F)),return(OutPlot))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc-frequency.R
\name{calc_frequency}
\alias{calc_frequency}
\title{Calculate frequency of cell groups present in object}
\usage{
calc_frequency(
input,
data_col,
cluster_col = NULL,
prefix = paste0(data_col, "_"),
return_df = FALSE,
per_chain = FALSE,
chain = NULL,
chain_col = global$chain_col,
sep = global$sep
)
}
\arguments{
\item{input}{Single cell object or data.frame containing V(D)J data. If a
data.frame is provided, the cell barcodes should be stored as row names.}
\item{data_col}{meta.data column containing cell labels to use for
calculating frequency. To calculate clonotype frequencies, provide the column
containing clonotype IDs, to calculate isotype frequencies provide the column
containing cell isotypes. By default the clonotype_id is used for
calculations.}
\item{cluster_col}{meta.data column containing cluster IDs to use for
grouping cells when calculating clonotype abundance}
\item{prefix}{Prefix to add to new columns}
\item{return_df}{Return results as a data.frame. If set to \code{FALSE}, results
will be added to the input object.}
\item{per_chain}{If \code{TRUE} the frequency of each per-chain value will be
calculated. If \code{FALSE} per-chain data will not be parsed and the values
present in \code{data_col} will be used as is.}
\item{chain}{Chain(s) to use for calculating frequency. Set to \code{NULL} to
include all chains.}
\item{chain_col}{meta.data column(s) containing chains for each cell}
\item{sep}{Separator used for storing per-chain V(D)J data for each cell}
}
\value{
Single cell object or data.frame with clonotype frequencies
}
\description{
Calculate the frequency of each cell label present in the provided meta.data
column. This is useful for comparing the proportion of cells belonging to
different samples, cell types, clonotypes, isotypes, etc.
}
\examples{
# Calculate clonotype abundance using all cells
res <- calc_frequency(
vdj_so,
data_col = "clonotype_id"
)
head(slot(res, "meta.data"), 1)
# Group cells based on meta.data column before calculating abundance
res <- calc_frequency(
vdj_sce,
data_col = "clonotype_id",
cluster_col = "orig.ident"
)
head(slot(res, "colData"), 1)
# Add a prefix to the new columns
# this is useful if multiple abundance calculations are stored in the
# meta.data
res <- calc_frequency(
vdj_so,
data_col = "clonotype_id",
prefix = "bcr_"
)
head(slot(res, "meta.data"), 1)
# Return a data.frame instead of adding the results to the input object
res <- calc_frequency(
vdj_sce,
data_col = "clonotype_id",
return_df = TRUE
)
head(res, 1)
}
\seealso{
\code{\link[=plot_frequency]{plot_frequency()}}, \code{\link[=plot_clone_frequency]{plot_clone_frequency()}}
}
| /man/calc_frequency.Rd | permissive | rnabioco/djvdj | R | false | true | 2,791 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc-frequency.R
\name{calc_frequency}
\alias{calc_frequency}
\title{Calculate frequency of cell groups present in object}
\usage{
calc_frequency(
input,
data_col,
cluster_col = NULL,
prefix = paste0(data_col, "_"),
return_df = FALSE,
per_chain = FALSE,
chain = NULL,
chain_col = global$chain_col,
sep = global$sep
)
}
\arguments{
\item{input}{Single cell object or data.frame containing V(D)J data. If a
data.frame is provided, the cell barcodes should be stored as row names.}
\item{data_col}{meta.data column containing cell labels to use for
calculating frequency. To calculate clonotype frequencies, provide the column
containing clonotype IDs, to calculate isotype frequencies provide the column
containing cell isotypes. By default the clonotype_id is used for
calculations.}
\item{cluster_col}{meta.data column containing cluster IDs to use for
grouping cells when calculating clonotype abundance}
\item{prefix}{Prefix to add to new columns}
\item{return_df}{Return results as a data.frame. If set to \code{FALSE}, results
will be added to the input object.}
\item{per_chain}{If \code{TRUE} the frequency of each per-chain value will be
calculated. If \code{FALSE} per-chain data will not be parsed and the values
present in \code{data_col} will be used as is.}
\item{chain}{Chain(s) to use for calculating frequency. Set to \code{NULL} to
include all chains.}
\item{chain_col}{meta.data column(s) containing chains for each cell}
\item{sep}{Separator used for storing per-chain V(D)J data for each cell}
}
\value{
Single cell object or data.frame with clonotype frequencies
}
\description{
Calculate the frequency of each cell label present in the provided meta.data
column. This is useful for comparing the proportion of cells belonging to
different samples, cell types, clonotypes, isotypes, etc.
}
\examples{
# Calculate clonotype abundance using all cells
res <- calc_frequency(
vdj_so,
data_col = "clonotype_id"
)
head(slot(res, "meta.data"), 1)
# Group cells based on meta.data column before calculating abundance
res <- calc_frequency(
vdj_sce,
data_col = "clonotype_id",
cluster_col = "orig.ident"
)
head(slot(res, "colData"), 1)
# Add a prefix to the new columns
# this is useful if multiple abundance calculations are stored in the
# meta.data
res <- calc_frequency(
vdj_so,
data_col = "clonotype_id",
prefix = "bcr_"
)
head(slot(res, "meta.data"), 1)
# Return a data.frame instead of adding the results to the input object
res <- calc_frequency(
vdj_sce,
data_col = "clonotype_id",
return_df = TRUE
)
head(res, 1)
}
\seealso{
\code{\link[=plot_frequency]{plot_frequency()}}, \code{\link[=plot_clone_frequency]{plot_clone_frequency()}}
}
|
library(glmnet)
mydata = read.table("./TrainingSet/ReliefF/bone.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.15,family="gaussian",standardize=TRUE)
sink('./Model/EN/ReliefF/bone/bone_031.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/ReliefF/bone/bone_031.R | no_license | leon1003/QSMART | R | false | false | 346 | r | library(glmnet)
mydata = read.table("./TrainingSet/ReliefF/bone.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.15,family="gaussian",standardize=TRUE)
sink('./Model/EN/ReliefF/bone/bone_031.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_examplesfile.R
\name{make_examplesfile}
\alias{make_examplesfile}
\title{Create an rmarkdown file with examples on how to use BEQR functions.}
\usage{
make_examplesfile(dir_path = "notebook", file_name = "newfile.rmd")
}
\arguments{
\item{dir_path}{relative or absolute path to an existing file directory}
\item{file_name}{a name to be given to the template rmarkdown file (include
*.rmd extension)}
}
\value{
creates an rmarkdown file in an existing file directory
}
\description{
This function has a template rmarkdown file. The file contains BEQR functions
for plotting, NCA and ABE analyis.
}
| /man/make_examplesfile.Rd | no_license | Eliford/BEQR | R | false | true | 681 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_examplesfile.R
\name{make_examplesfile}
\alias{make_examplesfile}
\title{Create an rmarkdown file with examples on how to use BEQR functions.}
\usage{
make_examplesfile(dir_path = "notebook", file_name = "newfile.rmd")
}
\arguments{
\item{dir_path}{relative or absolute path to an existing file directory}
\item{file_name}{a name to be given to the template rmarkdown file (include
*.rmd extension)}
}
\value{
creates an rmarkdown file in an existing file directory
}
\description{
This function has a template rmarkdown file. The file contains BEQR functions
for plotting, NCA and ABE analyis.
}
|
#
# OpenMx Ordinal Data Example
# Revision history:
# Michael Neale 14 Aug 2010
#
# Step 1: load libraries
require(OpenMx)
#
# Step 2: set up simulation parameters
# Note: nVariables>=3, nThresholds>=1, nSubjects>=nVariables*nThresholds (maybe more)
# and model should be identified
#
nVariables<-5
nFactors<-1
nThresholds<-3
nSubjects<-500
isIdentified<-function(nVariables,nFactors) as.logical(1+sign((nVariables*(nVariables-1)/2) - nVariables*nFactors + nFactors*(nFactors-1)/2))
# if this function returns FALSE then model is not identified, otherwise it is.
isIdentified(nVariables,nFactors)
loadings <- matrix(.7,nrow=nVariables,ncol=nFactors)
residuals <- 1 - (loadings * loadings)
sigma <- loadings %*% t(loadings) + vec2diag(residuals)
mu <- matrix(0,nrow=nVariables,ncol=1)
# Step 3: simulate multivariate normal data
set.seed(1234)
continuousData <- mvtnorm::rmvnorm(n=nSubjects,mu,sigma)
# Step 4: chop continuous variables into ordinal data
# with nThresholds+1 approximately equal categories, based on 1st variable
quants<-quantile(continuousData[,1], probs = c((1:nThresholds)/(nThresholds+1)))
ordinalData<-matrix(0,nrow=nSubjects,ncol=nVariables)
for(i in 1:nVariables)
{
ordinalData[,i] <- cut(as.vector(continuousData[,i]),c(-Inf,quants,Inf))
}
# Step 5: make the ordinal variables into R factors
ordinalData <- mxFactor(as.data.frame(ordinalData),levels=c(1:(nThresholds+1)))
# Step 6: name the variables
fruitynames<-paste("banana",1:nVariables,sep="")
names(ordinalData)<-fruitynames
thresholdModel <- mxModel("thresholdModel",
mxMatrix("Full", nVariables, nFactors, values=0.2, free=TRUE, lbound=-.99, ubound=.99, name="L"),
mxMatrix("Unit", nVariables, 1, name="vectorofOnes"),
mxMatrix("Zero", 1, nVariables, name="M"),
mxAlgebra(vectorofOnes - (diag2vec(L %*% t(L))) , name="E"),
mxAlgebra(L %*% t(L) + vec2diag(E), name="impliedCovs"),
mxMatrix("Full",
name="thresholdDeviations", nrow=nThresholds, ncol=nVariables,
values=.2,
free = TRUE,
lbound = rep( c(-Inf,rep(.01,(nThresholds-1))) , nVariables),
dimnames = list(c(), fruitynames)),
mxMatrix("Lower",nThresholds,nThresholds,values=1,free=F,name="unitLower"),
mxAlgebra(unitLower %*% thresholdDeviations, name="thresholdMatrix"),
mxFitFunctionML(),mxExpectationNormal(covariance="impliedCovs", means="M", dimnames = fruitynames, thresholds="thresholdMatrix"),
mxData(observed=ordinalData, type='raw')
)
thresholdModelrun <- mxRun(thresholdModel)
thresholdSaturated <- mxRefModels(thresholdModelrun, run=TRUE)
summary(thresholdModelrun, refModels=thresholdSaturated)
a <- proc.time()
thresholdModelWLS <- mxModel(thresholdModel, name="WLSThresholdModel", mxDataWLS(ordinalData, type="ULS"), #Change type here!!!
mxExpectationNormal(covariance="impliedCovs", dimnames = fruitynames, thresholds="thresholdMatrix"),
mxFitFunctionWLS())
thresholdModelWLSrun <- mxRun(thresholdModelWLS)
b <- proc.time()
b-a
summary(thresholdModelrun)$wallTime
summary(thresholdModelWLSrun)
wls.L <- mxEval(L, thresholdModelWLSrun) #should be all 0.7
wls.T <- mxEval(thresholdMatrix, thresholdModelWLSrun) #should be all quants
ml.L <- mxEval(L, thresholdModelrun) #should be all 0.7
ml.T <- mxEval(thresholdMatrix, thresholdModelrun) #should be all quants
rms <- function(x, y){sqrt(mean((x-y)^2))}
omxCheckTrue(rms(wls.L, .7) < 0.05)
rms(ml.L, .7)
omxCheckTrue(rms(wls.T, quants) < 0.08)
rms(ml.T, quants)
ml.sum <- summary(thresholdModelrun, refModels=thresholdSaturated)
wls.sum <- summary(thresholdModelWLSrun)
omxCheckWithinPercentError(wls.sum$Chi, 0.653, percent=10)
omxCheckWithinPercentError(ml.sum$Chi, wls.sum$Chi, percent=15)
omxCheckEquals(ml.sum$ChiDoF, wls.sum$ChiDoF)
ciModel <- mxModel(thresholdModelWLSrun, mxCI("L"))
omxCheckError(mxRun(ciModel, intervals=TRUE), "Confidence intervals are not supported for units 3")
#------------------------------------------------------------------------------
tmod2 <- mxModel("thresholdModel2",
mxMatrix("Full", nVariables, nFactors, values=0.2, free=TRUE, lbound=-.99, ubound=1.5, name="L"),
mxMatrix("Diag", nVariables, nVariables, values=.1, free=TRUE, lbound=1e-8, name="R"),
mxMatrix("Unit", nVariables, 1, name="vectorofOnes"),
mxMatrix("Full", 1, nVariables, values=0, free=TRUE, name="M"),
mxAlgebra(L %*% t(L) + R, name="impliedCovs"),
mxMatrix("Full", nThresholds, nVariables, values=c(0, 1, 2), name="Thresh"),
mxFitFunctionML(),
mxExpectationNormal(covariance="impliedCovs", means="M", dimnames = fruitynames, thresholds="Thresh"),
mxData(observed=ordinalData, type='raw')
)
trun2 <- mxRun(tmod2)
a <- proc.time()
wmod2 <- mxModel(tmod2, mxDataWLS(ordinalData), mxFitFunctionWLS(),
mxAlgebra(cov2cor(impliedCovs), name='newCov'),
mxMatrix("Unit", nrow=nThresholds, ncol=1, name="UnitVector"),
mxAlgebra(UnitVector %x% t(sqrt(diag2vec(impliedCovs))), name='theStandardDeviations'),
mxAlgebra(UnitVector %x% M, name='theM'),
mxAlgebra( (Thresh-theM)/theStandardDeviations, name='newThresh'),
mxExpectationNormal(covariance='newCov', thresholds='newThresh', dimnames = fruitynames) #N.B. means left out on purpose
)
#mxEval(theM, wmod2, compute=TRUE)
#mxEval(Thresh, wmod2, compute=TRUE)
#mxEval(theStandardDeviations, wmod2, compute=TRUE)
#mxEval(newCov, wmod2, compute=TRUE)
#mxEval(newThresh, wmod2, compute=TRUE)
wrun2 <- mxRun(wmod2)
b <- proc.time()
b-a
summary(trun2)$wallTime
cbind(omxGetParameters(trun2), omxGetParameters(wrun2))
plot(omxGetParameters(trun2), omxGetParameters(wrun2))
abline(a=0, b=1)
omxCheckCloseEnough(rms(omxGetParameters(trun2), omxGetParameters(wrun2)), 0, .03)
omxCheckCloseEnough(cor(omxGetParameters(trun2), omxGetParameters(wrun2)), 1, .05)
| /inst/models/nightly/thresholdModel1Factor5VariateWLS.R | no_license | Ewan-Keith/OpenMx | R | false | false | 5,763 | r | #
# OpenMx Ordinal Data Example
# Revision history:
# Michael Neale 14 Aug 2010
#
# Step 1: load libraries
require(OpenMx)
#
# Step 2: set up simulation parameters
# Note: nVariables>=3, nThresholds>=1, nSubjects>=nVariables*nThresholds (maybe more)
# and model should be identified
#
nVariables<-5
nFactors<-1
nThresholds<-3
nSubjects<-500
isIdentified<-function(nVariables,nFactors) as.logical(1+sign((nVariables*(nVariables-1)/2) - nVariables*nFactors + nFactors*(nFactors-1)/2))
# if this function returns FALSE then model is not identified, otherwise it is.
isIdentified(nVariables,nFactors)
loadings <- matrix(.7,nrow=nVariables,ncol=nFactors)
residuals <- 1 - (loadings * loadings)
sigma <- loadings %*% t(loadings) + vec2diag(residuals)
mu <- matrix(0,nrow=nVariables,ncol=1)
# Step 3: simulate multivariate normal data
set.seed(1234)
continuousData <- mvtnorm::rmvnorm(n=nSubjects,mu,sigma)
# Step 4: chop continuous variables into ordinal data
# with nThresholds+1 approximately equal categories, based on 1st variable
quants<-quantile(continuousData[,1], probs = c((1:nThresholds)/(nThresholds+1)))
ordinalData<-matrix(0,nrow=nSubjects,ncol=nVariables)
for(i in 1:nVariables)
{
ordinalData[,i] <- cut(as.vector(continuousData[,i]),c(-Inf,quants,Inf))
}
# Step 5: make the ordinal variables into R factors
ordinalData <- mxFactor(as.data.frame(ordinalData),levels=c(1:(nThresholds+1)))
# Step 6: name the variables
fruitynames<-paste("banana",1:nVariables,sep="")
names(ordinalData)<-fruitynames
thresholdModel <- mxModel("thresholdModel",
mxMatrix("Full", nVariables, nFactors, values=0.2, free=TRUE, lbound=-.99, ubound=.99, name="L"),
mxMatrix("Unit", nVariables, 1, name="vectorofOnes"),
mxMatrix("Zero", 1, nVariables, name="M"),
mxAlgebra(vectorofOnes - (diag2vec(L %*% t(L))) , name="E"),
mxAlgebra(L %*% t(L) + vec2diag(E), name="impliedCovs"),
mxMatrix("Full",
name="thresholdDeviations", nrow=nThresholds, ncol=nVariables,
values=.2,
free = TRUE,
lbound = rep( c(-Inf,rep(.01,(nThresholds-1))) , nVariables),
dimnames = list(c(), fruitynames)),
mxMatrix("Lower",nThresholds,nThresholds,values=1,free=F,name="unitLower"),
mxAlgebra(unitLower %*% thresholdDeviations, name="thresholdMatrix"),
mxFitFunctionML(),mxExpectationNormal(covariance="impliedCovs", means="M", dimnames = fruitynames, thresholds="thresholdMatrix"),
mxData(observed=ordinalData, type='raw')
)
thresholdModelrun <- mxRun(thresholdModel)
thresholdSaturated <- mxRefModels(thresholdModelrun, run=TRUE)
summary(thresholdModelrun, refModels=thresholdSaturated)
a <- proc.time()
thresholdModelWLS <- mxModel(thresholdModel, name="WLSThresholdModel", mxDataWLS(ordinalData, type="ULS"), #Change type here!!!
mxExpectationNormal(covariance="impliedCovs", dimnames = fruitynames, thresholds="thresholdMatrix"),
mxFitFunctionWLS())
thresholdModelWLSrun <- mxRun(thresholdModelWLS)
b <- proc.time()
b-a
summary(thresholdModelrun)$wallTime
summary(thresholdModelWLSrun)
wls.L <- mxEval(L, thresholdModelWLSrun) #should be all 0.7
wls.T <- mxEval(thresholdMatrix, thresholdModelWLSrun) #should be all quants
ml.L <- mxEval(L, thresholdModelrun) #should be all 0.7
ml.T <- mxEval(thresholdMatrix, thresholdModelrun) #should be all quants
rms <- function(x, y){sqrt(mean((x-y)^2))}
omxCheckTrue(rms(wls.L, .7) < 0.05)
rms(ml.L, .7)
omxCheckTrue(rms(wls.T, quants) < 0.08)
rms(ml.T, quants)
ml.sum <- summary(thresholdModelrun, refModels=thresholdSaturated)
wls.sum <- summary(thresholdModelWLSrun)
omxCheckWithinPercentError(wls.sum$Chi, 0.653, percent=10)
omxCheckWithinPercentError(ml.sum$Chi, wls.sum$Chi, percent=15)
omxCheckEquals(ml.sum$ChiDoF, wls.sum$ChiDoF)
ciModel <- mxModel(thresholdModelWLSrun, mxCI("L"))
omxCheckError(mxRun(ciModel, intervals=TRUE), "Confidence intervals are not supported for units 3")
#------------------------------------------------------------------------------
tmod2 <- mxModel("thresholdModel2",
mxMatrix("Full", nVariables, nFactors, values=0.2, free=TRUE, lbound=-.99, ubound=1.5, name="L"),
mxMatrix("Diag", nVariables, nVariables, values=.1, free=TRUE, lbound=1e-8, name="R"),
mxMatrix("Unit", nVariables, 1, name="vectorofOnes"),
mxMatrix("Full", 1, nVariables, values=0, free=TRUE, name="M"),
mxAlgebra(L %*% t(L) + R, name="impliedCovs"),
mxMatrix("Full", nThresholds, nVariables, values=c(0, 1, 2), name="Thresh"),
mxFitFunctionML(),
mxExpectationNormal(covariance="impliedCovs", means="M", dimnames = fruitynames, thresholds="Thresh"),
mxData(observed=ordinalData, type='raw')
)
trun2 <- mxRun(tmod2)
a <- proc.time()
wmod2 <- mxModel(tmod2, mxDataWLS(ordinalData), mxFitFunctionWLS(),
mxAlgebra(cov2cor(impliedCovs), name='newCov'),
mxMatrix("Unit", nrow=nThresholds, ncol=1, name="UnitVector"),
mxAlgebra(UnitVector %x% t(sqrt(diag2vec(impliedCovs))), name='theStandardDeviations'),
mxAlgebra(UnitVector %x% M, name='theM'),
mxAlgebra( (Thresh-theM)/theStandardDeviations, name='newThresh'),
mxExpectationNormal(covariance='newCov', thresholds='newThresh', dimnames = fruitynames) #N.B. means left out on purpose
)
#mxEval(theM, wmod2, compute=TRUE)
#mxEval(Thresh, wmod2, compute=TRUE)
#mxEval(theStandardDeviations, wmod2, compute=TRUE)
#mxEval(newCov, wmod2, compute=TRUE)
#mxEval(newThresh, wmod2, compute=TRUE)
wrun2 <- mxRun(wmod2)
b <- proc.time()
b-a
summary(trun2)$wallTime
cbind(omxGetParameters(trun2), omxGetParameters(wrun2))
plot(omxGetParameters(trun2), omxGetParameters(wrun2))
abline(a=0, b=1)
omxCheckCloseEnough(rms(omxGetParameters(trun2), omxGetParameters(wrun2)), 0, .03)
omxCheckCloseEnough(cor(omxGetParameters(trun2), omxGetParameters(wrun2)), 1, .05)
|
#' NAEP IRT parameters.
#'
#' This data table contains NCES NAEP Assessment IRT parameters from 1990 to 2015.
#' To find all item parameters for an assessment, filter the data table by level, subject,
#' and year. Assessments from 1990 to 2000 also require filtering by assessmentCode and
#' accommodations.
#'
#'
#' @format A data frame with columns
#' \describe{
#' \item{source}{source of data, character}
#' \item{level}{grade or age level of test, integer}
#' \item{levelType}{"grade" or "age", character}
#' \item{NAEPid}{test item ID, character}
#' \item{assessmentCode}{"State" or "National", character}
#' \item{accommodations}{"accom" or "no-accom", character}
#' \item{subtest}{subtest within subject, character}
#' \item{subject}{subject of test, character}
#' \item{year}{year of test, integer}
#' \item{a}{slope or a parameter, numeric}
#' \item{b}{difficulty or d parameter, numeric}
#' \item{c}{guessing or g parameter, numeric}
#' \item{d1}{location of cut point 1 for polytomous item, numeric}
#' \item{d2}{location of cut point 2 for polytomous item, numeric}
#' \item{d3}{location of cut point 3 for polytomous item, numeric}
#' \item{d4}{location of cut point 4 for polytomous item, numeric}
#' \item{d5}{location of cut point 5 for polytomous item, numeric}
#' }
#' @references Department of Education, Institute of Education Sciences, National Center for Education Statistics, National Assessment of Educational Progress (NAEP), 1990-2015, various subjects. Retrieved from <https://nces.ed.gov/nationsreportcard/tdw/analysis/scaling_irt.aspx>
#'
#' @example man/examples/parameters.R
"parameters"
| /R/parameters.R | no_license | cran/NAEPirtparams | R | false | false | 1,672 | r | #' NAEP IRT parameters.
#'
#' This data table contains NCES NAEP Assessment IRT parameters from 1990 to 2015.
#' To find all item parameters for an assessment, filter the data table by level, subject,
#' and year. Assessments from 1990 to 2000 also require filtering by assessmentCode and
#' accommodations.
#'
#'
#' @format A data frame with columns
#' \describe{
#' \item{source}{source of data, character}
#' \item{level}{grade or age level of test, integer}
#' \item{levelType}{"grade" or "age", character}
#' \item{NAEPid}{test item ID, character}
#' \item{assessmentCode}{"State" or "National", character}
#' \item{accommodations}{"accom" or "no-accom", character}
#' \item{subtest}{subtest within subject, character}
#' \item{subject}{subject of test, character}
#' \item{year}{year of test, integer}
#' \item{a}{slope or a parameter, numeric}
#' \item{b}{difficulty or d parameter, numeric}
#' \item{c}{guessing or g parameter, numeric}
#' \item{d1}{location of cut point 1 for polytomous item, numeric}
#' \item{d2}{location of cut point 2 for polytomous item, numeric}
#' \item{d3}{location of cut point 3 for polytomous item, numeric}
#' \item{d4}{location of cut point 4 for polytomous item, numeric}
#' \item{d5}{location of cut point 5 for polytomous item, numeric}
#' }
#' @references Department of Education, Institute of Education Sciences, National Center for Education Statistics, National Assessment of Educational Progress (NAEP), 1990-2015, various subjects. Retrieved from <https://nces.ed.gov/nationsreportcard/tdw/analysis/scaling_irt.aspx>
#'
#' @example man/examples/parameters.R
"parameters"
|
# Note, this is very fragile and experimental
### TODO include more than just misses...
cachebench <- function(..., nreps=10)
{
l <- list(...)
len <- length(l)
type <- "Cache Misses"
if (len == 0)
stop("No expressions")
args <- match.call()[-1]
names <- names(args)
if (is.null(names))
argnames <- as.character(args)
else
{
keep <- names(args) != "nreps"
args <- args[keep]
argnames <- names[keep]
charargs <- as.character(args)
argnames <- sapply(1:len, function(i) if (argnames[i] == "") charargs[i] else argnames[i])
}
template <- system.cache(NULL)
colnames <- names(template)
ret <- lapply(1:len, function(.) {tmp <- matrix(0L, nrow=nreps, ncol=3); colnames(tmp) <- colnames; tmp})
names(ret) <- argnames
class(ret) <- "cachebench"
for (i in 1:len)
{
for (j in 1:nreps)
{
tmp <- system.cache(expr=eval(args[[i]]))
ret[[i]][j, ] <- as.integer(tmp)
}
}
means <- do.call(rbind, lapply(ret, colMeans))
colnames(means) <- gsub(colnames(means), pattern=" cache misses", replacement="")
colnames(means) <- gsub(colnames(means), pattern="^L", replacement="Avg.L")
summarystats <- data.frame(nreps=nreps)
summarystats <- cbind(summarystats, means)
ret$summarystats <- summarystats
ret$type <- type
return(ret)
}
print.cachebench <- function(x)
{
cat(x$type, ":\n")
print(x$summarystats)
}
cachemelt <- function(df)
{
len <- ncol(df) - 1
value <- sapply(sapply(1:len, function(i) df[, i]), c)
nm <- names(df)
variable <- as.character(sapply(sapply(1:len, function(i) rep(nm[i], nrow(df))), c))
Test <- rep(df$Test, len)
data.frame(Test=Test, variable=variable, value=value)
}
plot.cachebench <- function(x, levels=1:3, axis.x.angle=0)
{
### To fool R CMD check
Test <- value <- NULL
rm(list=c("Test", "value"))
tmp <- x
tmp$summarystats <- NULL
tmp$type <- NULL
nm <- names(tmp)
df <- do.call(rbind, lapply(1:length(tmp), function(i) data.frame(tmp[[i]], nm[i])))
df <- df[, c(levels, 4)]
colnames(df)[ncol(df)] <- "Test"
colnames(df) <- gsub(colnames(df), pattern=".cache.misses", replacement="", fixed=TRUE)
df <- cachemelt(df)
g1 <-
ggplot(df, aes(Test, value)) +
stat_boxplot(geom ='errorbar')+
geom_boxplot() +
theme(axis.text.x=element_text(angle=axis.x.angle, hjust=1)) +
xlab("Test") +
ylab("") +
ggtitle(x$type) +
facet_wrap(~ variable)
# g2 <- g3 <- g1
#
# plots <- list(g1=g1, g2=g2, g3=g3)
# label <- x$type
# row_plotter(plots, levels, label, show.title=TRUE)
g1
}
### Example
#x <- cachebench(A=rnorm(1e4), B=rnorm(1e5))
#plot(x)
| /R/cachebench.r | permissive | vsskanand/scribe | R | false | false | 2,712 | r | # Note, this is very fragile and experimental
### TODO include more than just misses...
cachebench <- function(..., nreps=10)
{
l <- list(...)
len <- length(l)
type <- "Cache Misses"
if (len == 0)
stop("No expressions")
args <- match.call()[-1]
names <- names(args)
if (is.null(names))
argnames <- as.character(args)
else
{
keep <- names(args) != "nreps"
args <- args[keep]
argnames <- names[keep]
charargs <- as.character(args)
argnames <- sapply(1:len, function(i) if (argnames[i] == "") charargs[i] else argnames[i])
}
template <- system.cache(NULL)
colnames <- names(template)
ret <- lapply(1:len, function(.) {tmp <- matrix(0L, nrow=nreps, ncol=3); colnames(tmp) <- colnames; tmp})
names(ret) <- argnames
class(ret) <- "cachebench"
for (i in 1:len)
{
for (j in 1:nreps)
{
tmp <- system.cache(expr=eval(args[[i]]))
ret[[i]][j, ] <- as.integer(tmp)
}
}
means <- do.call(rbind, lapply(ret, colMeans))
colnames(means) <- gsub(colnames(means), pattern=" cache misses", replacement="")
colnames(means) <- gsub(colnames(means), pattern="^L", replacement="Avg.L")
summarystats <- data.frame(nreps=nreps)
summarystats <- cbind(summarystats, means)
ret$summarystats <- summarystats
ret$type <- type
return(ret)
}
print.cachebench <- function(x)
{
cat(x$type, ":\n")
print(x$summarystats)
}
cachemelt <- function(df)
{
len <- ncol(df) - 1
value <- sapply(sapply(1:len, function(i) df[, i]), c)
nm <- names(df)
variable <- as.character(sapply(sapply(1:len, function(i) rep(nm[i], nrow(df))), c))
Test <- rep(df$Test, len)
data.frame(Test=Test, variable=variable, value=value)
}
plot.cachebench <- function(x, levels=1:3, axis.x.angle=0)
{
### To fool R CMD check
Test <- value <- NULL
rm(list=c("Test", "value"))
tmp <- x
tmp$summarystats <- NULL
tmp$type <- NULL
nm <- names(tmp)
df <- do.call(rbind, lapply(1:length(tmp), function(i) data.frame(tmp[[i]], nm[i])))
df <- df[, c(levels, 4)]
colnames(df)[ncol(df)] <- "Test"
colnames(df) <- gsub(colnames(df), pattern=".cache.misses", replacement="", fixed=TRUE)
df <- cachemelt(df)
g1 <-
ggplot(df, aes(Test, value)) +
stat_boxplot(geom ='errorbar')+
geom_boxplot() +
theme(axis.text.x=element_text(angle=axis.x.angle, hjust=1)) +
xlab("Test") +
ylab("") +
ggtitle(x$type) +
facet_wrap(~ variable)
# g2 <- g3 <- g1
#
# plots <- list(g1=g1, g2=g2, g3=g3)
# label <- x$type
# row_plotter(plots, levels, label, show.title=TRUE)
g1
}
### Example
#x <- cachebench(A=rnorm(1e4), B=rnorm(1e5))
#plot(x)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/poba1.R
\name{pure}
\alias{pure}
\title{prediction of unobserved responses with fast methods for big data with SSN}
\usage{
pure(ecp, efe, predsID, nNN, poba_prep = FALSE)
}
\arguments{
\item{ecp}{object from cope function.}
\item{efe}{object from fefe function}
\item{predsID}{name of prediction data set in ssn object (passed with ecp)}
\item{nNN}{number of nearest neighbors for predictions}
}
\value{
a data.frame with predictions in first column, and prediction standard errors in the second column.
}
\description{
Prediction of unobserved responses with fast methods for big data with SSN
}
\author{
Jay Ver Hoef
}
| /man/pure.Rd | no_license | jayverhoef/SSNbd | R | false | true | 703 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/poba1.R
\name{pure}
\alias{pure}
\title{prediction of unobserved responses with fast methods for big data with SSN}
\usage{
pure(ecp, efe, predsID, nNN, poba_prep = FALSE)
}
\arguments{
\item{ecp}{object from cope function.}
\item{efe}{object from fefe function}
\item{predsID}{name of prediction data set in ssn object (passed with ecp)}
\item{nNN}{number of nearest neighbors for predictions}
}
\value{
a data.frame with predictions in first column, and prediction standard errors in the second column.
}
\description{
Prediction of unobserved responses with fast methods for big data with SSN
}
\author{
Jay Ver Hoef
}
|
#' Insert a table into the connected document
#'
#' Inserts a table at the current position of the view cursor.
#'
#' @importFrom PythonInR pyExec pySet
#' @param x A matrix of character vectors to be inserted as a table. If not a matrix,
#' an attempt is made to turn it into a matrix by \code{\link{as.matrix}}.
#' @param captiontext The text of the caption
#' @param header The names to be used for the columns of the matrix
#' @param group_header If not NULL, the names of column groups
#' @param common_header If not NULL, the common header of all matrix columns
#' @param group_sizes If group_header is not NULL, a vector holding the sizes of
#' column groups
#' @param footer An optional text to be included as a table footer
#' @param factors An optional named list of character vectors that must describe the
#' rows of the matrix object
#' @param merge_index An optional character vector with the names of the factors for
#' which adjacent cells with identical values should be merged
#' @param numbered Should the caption of the table be numbered?
#' @param NA_string The string used for NA values
#' @param break_before_caption Should a page break be insersted before the caption
#' @param split Should it be allowed to split the table across pages
#' @param repeat_headlines Should the headline(s) be repeated?
#' @param charheight An optional way to specify the character height in table cells
#' @param widths An optional way to specify relative columns widths
#' @param warn Should missing paragraph styles give a warning?
#' @export
rlo_table <- function(x, captiontext,
header = "colnames",
group_header = NULL,
common_header = NULL,
group_sizes = NULL,
footer = NULL,
factors = NULL, merge_index = NULL,
numbered = TRUE,
NA_string = "",
break_before_caption = FALSE,
split = FALSE,
repeat_headlines = TRUE,
charheight = NULL,
widths = NULL,
warn = FALSE)
{
rlo_scursor()
if (!inherits(x, "matrix")) x <- as.matrix(x)
matrix_cols = ncol(x)
pyExec("scursor.setPropertyValue('ParaStyleName', 'Table')")
pyExec("scursor.setPropertyValue('ParaKeepTogether', True)")
if (break_before_caption) {
pyExec("scursor.setPropertyValue('BreakType', 4)") # PAGE_BEFORE
} else {
pyExec("scursor.setPropertyValue('BreakType', 0)") # NONE
}
if (numbered) {
pyExec("text.insertString(scursor, 'Table ', False)")
rlo_dispatch(".uno:InsertField",
list(Type = 23, SubType = 127, Name = "Tabelle", Content = "", Format = 4, Separator = " "))
pyExec("text.insertString(scursor, ': ', False)")
}
pySet("captiontext", captiontext)
pyExec("text.insertString(scursor, captiontext, False)")
pyExec("tbl = doc.createInstance('com.sun.star.text.TextTable')")
if (split) pyExec("tbl.Split = True")
else pyExec("tbl.Split = False")
if (header[1] == "colnames") {
header <- colnames(x)
}
n_headrows = 0
if (!is.null(header)) {
x <- rbind(header, x)
n_headrows = 1
if (!is.null(group_header)) {
group_header_expanded = NULL
for (group in seq_along(group_header)) {
group_header_expanded = c(group_header_expanded, group_header[group])
group_size = group_sizes[group]
if (group_size > 1) {
group_header_expanded = c(group_header_expanded, rep("", group_size - 1))
}
}
x <- rbind(group_header_expanded, x)
n_headrows = 2
}
if (!is.null(common_header)) {
x <- rbind(c(common_header, rep("", matrix_cols - 1)), x)
n_headrows = n_headrows + 1
}
}
mergelist = list()
for (fi in seq_along(factors)) {
factor_col = LETTERS[fi]
if (factor_col %in% merge_index) {
f <- factors[[fi]]
mergelist[[factor_col]] <- list()
merge_start = 1
merge_end = 1
merge_factor = f[merge_start]
merge_count = 0
for (i in 1:length(f)) {
if (f[i] == merge_factor) {
if (i != merge_start) {
factors[[fi]][i] = ""
if (is.na(f[i + 1])) {
merge_count = merge_count + 1
merge_end = i
entry <- c(start = merge_start, end = merge_end)
mergelist[[factor_col]][[merge_count]] = entry
}
else if (f[i + 1] != merge_factor) {
merge_count = merge_count + 1
merge_end = i
entry <- c(start = merge_start, end = merge_end)
mergelist[[factor_col]][[merge_count]] = entry
}
}
} else {
merge_start = i
merge_end = i
merge_factor = f[merge_start]
}
}
}
}
if (!is.null(factors)) {
for (i in length(factors):1) {
if (n_headrows == 0) {
x <- cbind(factors[[i]], x)
}
if (n_headrows == 1) {
x <- cbind(c(names(factors)[i], factors[[i]]), x)
}
if (n_headrows == 2) {
x <- cbind(c(names(factors)[i], "", factors[[i]]), x)
}
if (n_headrows == 3) {
x <- cbind(c(names(factors)[i], "", "", factors[[i]]), x)
}
}
}
dimnames(x) <- NULL
x[is.na(x)] <- NA_string
pyExec(paste0("tbl.initialize(", nrow(x), ", ", ncol(x), ")"))
pyExec("text.insertTextContent(scursor, tbl, False)")
pySet("x", x)
pyExec("x = tuple(tuple(i) for i in x)")
pyExec("tbl.setDataArray(x)")
# Set cell widths
if (!is.null(widths)) {
if (length(widths) > ncol(x)) stop("You specified more cell widths than the number of columns")
if (length(widths) < ncol(x)) widths = c(widths, rep(1, ncol(x) - length(widths)))
separators = round(cumsum(widths) / sum(widths) * 10000)
pyExec("tcs = tbl.TableColumnSeparators")
for (i in 0:(length(separators) - 2)) {
pyExec(paste0("tcs[", i, "].Position = ", separators[i + 1]))
}
pyExec("tbl.TableColumnSeparators = tcs")
}
cellrange = paste0("A1:", LETTERS[ncol(x)], nrow(x))
if (!is.null(charheight)) {
pyExec(paste0("tbl.getCellRangeByName('", cellrange, "').CharHeight = ", charheight))
}
# Merge factor columns if requested
for (factor_col in names(mergelist)) {
if (factor_col %in% merge_index) {
for (ei in seq_along(mergelist[[factor_col]])) {
entry = mergelist[[factor_col]][[ei]]
pySet("cellname", paste0(factor_col, entry["start"] + n_headrows))
pyExec("tcursor = tbl.createCursorByCellName(cellname)")
stepsize = entry["end"] - entry["start"]
pyExec(paste0("tcursor.goDown(", stepsize, ", True)"))
pyExec("tcursor.mergeRange()")
}
}
}
# Merge headers of factor columns (vertically)
if (n_headrows > 1) {
factor_merge_step = n_headrows - 1
for (factor_index in seq_along(factors)) {
factor_col = LETTERS[factor_index]
pySet("cellname", paste0(factor_col, 1))
pyExec("tcursor = tbl.createCursorByCellName(cellname)")
pyExec(paste0("tcursor.goDown(", factor_merge_step, ", True)"))
pyExec("tcursor.mergeRange()")
}
}
# Merge group header fields if group header is present
if (!is.null(group_header)) {
col_index = length(factors)
for (group_size in group_sizes) {
col_index = col_index + 1
group_header_row_index = if (!is.null(common_header)) 2 else 1
pySet("cellname", paste0(LETTERS[col_index], group_header_row_index))
pyExec("tcursor = tbl.createCursorByCellName(cellname)")
pyExec(paste0("tcursor.goRight(", group_size - 1, ", True)"))
pyExec("tcursor.mergeRange()")
}
}
# Merge common header fields if common header is present
if (!is.null(common_header)) {
col_index = length(factors) + 1
pySet("cellname", paste0(LETTERS[col_index], 1))
pyExec("tcursor = tbl.createCursorByCellName(cellname)")
pyExec(paste0("tcursor.goRight(", matrix_cols - 1, ", True)"))
pyExec("tcursor.mergeRange()")
}
# Repeat headlines if requested
if (repeat_headlines) {
pyExec(paste0("tbl.setPropertyValue('HeaderRowCount', ", n_headrows, ")"))
}
# Set footer
if (is.null(footer)) {
rlo_parstyle('Textk\u00f6rper mit Abstand', warn = warn)
} else {
rlo_parstyle('Tabellenunterschrift', warn = warn)
if (!is.null(charheight)) {
pyExec(paste0("scursor.setPropertyValue('CharHeight', ", charheight, ")"))
}
pySet("footer", footer)
pyExec("text.insertString(scursor, footer, False)")
pyExec("text.insertControlCharacter(scursor, 0, False)")
rlo_parstyle('Textk\u00f6rper mit Abstand', warn = warn)
}
}
| /R/rlo_table.R | no_license | cran/rlo | R | false | false | 8,779 | r | #' Insert a table into the connected document
#'
#' Inserts a table at the current position of the view cursor.
#'
#' @importFrom PythonInR pyExec pySet
#' @param x A matrix of character vectors to be inserted as a table. If not a matrix,
#' an attempt is made to turn it into a matrix by \code{\link{as.matrix}}.
#' @param captiontext The text of the caption
#' @param header The names to be used for the columns of the matrix
#' @param group_header If not NULL, the names of column groups
#' @param common_header If not NULL, the common header of all matrix columns
#' @param group_sizes If group_header is not NULL, a vector holding the sizes of
#' column groups
#' @param footer An optional text to be included as a table footer
#' @param factors An optional named list of character vectors that must describe the
#' rows of the matrix object
#' @param merge_index An optional character vector with the names of the factors for
#' which adjacent cells with identical values should be merged
#' @param numbered Should the caption of the table be numbered?
#' @param NA_string The string used for NA values
#' @param break_before_caption Should a page break be insersted before the caption
#' @param split Should it be allowed to split the table across pages
#' @param repeat_headlines Should the headline(s) be repeated?
#' @param charheight An optional way to specify the character height in table cells
#' @param widths An optional way to specify relative columns widths
#' @param warn Should missing paragraph styles give a warning?
#' @export
rlo_table <- function(x, captiontext,
header = "colnames",
group_header = NULL,
common_header = NULL,
group_sizes = NULL,
footer = NULL,
factors = NULL, merge_index = NULL,
numbered = TRUE,
NA_string = "",
break_before_caption = FALSE,
split = FALSE,
repeat_headlines = TRUE,
charheight = NULL,
widths = NULL,
warn = FALSE)
{
rlo_scursor()
if (!inherits(x, "matrix")) x <- as.matrix(x)
matrix_cols = ncol(x)
pyExec("scursor.setPropertyValue('ParaStyleName', 'Table')")
pyExec("scursor.setPropertyValue('ParaKeepTogether', True)")
if (break_before_caption) {
pyExec("scursor.setPropertyValue('BreakType', 4)") # PAGE_BEFORE
} else {
pyExec("scursor.setPropertyValue('BreakType', 0)") # NONE
}
if (numbered) {
pyExec("text.insertString(scursor, 'Table ', False)")
rlo_dispatch(".uno:InsertField",
list(Type = 23, SubType = 127, Name = "Tabelle", Content = "", Format = 4, Separator = " "))
pyExec("text.insertString(scursor, ': ', False)")
}
pySet("captiontext", captiontext)
pyExec("text.insertString(scursor, captiontext, False)")
pyExec("tbl = doc.createInstance('com.sun.star.text.TextTable')")
if (split) pyExec("tbl.Split = True")
else pyExec("tbl.Split = False")
if (header[1] == "colnames") {
header <- colnames(x)
}
n_headrows = 0
if (!is.null(header)) {
x <- rbind(header, x)
n_headrows = 1
if (!is.null(group_header)) {
group_header_expanded = NULL
for (group in seq_along(group_header)) {
group_header_expanded = c(group_header_expanded, group_header[group])
group_size = group_sizes[group]
if (group_size > 1) {
group_header_expanded = c(group_header_expanded, rep("", group_size - 1))
}
}
x <- rbind(group_header_expanded, x)
n_headrows = 2
}
if (!is.null(common_header)) {
x <- rbind(c(common_header, rep("", matrix_cols - 1)), x)
n_headrows = n_headrows + 1
}
}
mergelist = list()
for (fi in seq_along(factors)) {
factor_col = LETTERS[fi]
if (factor_col %in% merge_index) {
f <- factors[[fi]]
mergelist[[factor_col]] <- list()
merge_start = 1
merge_end = 1
merge_factor = f[merge_start]
merge_count = 0
for (i in 1:length(f)) {
if (f[i] == merge_factor) {
if (i != merge_start) {
factors[[fi]][i] = ""
if (is.na(f[i + 1])) {
merge_count = merge_count + 1
merge_end = i
entry <- c(start = merge_start, end = merge_end)
mergelist[[factor_col]][[merge_count]] = entry
}
else if (f[i + 1] != merge_factor) {
merge_count = merge_count + 1
merge_end = i
entry <- c(start = merge_start, end = merge_end)
mergelist[[factor_col]][[merge_count]] = entry
}
}
} else {
merge_start = i
merge_end = i
merge_factor = f[merge_start]
}
}
}
}
if (!is.null(factors)) {
for (i in length(factors):1) {
if (n_headrows == 0) {
x <- cbind(factors[[i]], x)
}
if (n_headrows == 1) {
x <- cbind(c(names(factors)[i], factors[[i]]), x)
}
if (n_headrows == 2) {
x <- cbind(c(names(factors)[i], "", factors[[i]]), x)
}
if (n_headrows == 3) {
x <- cbind(c(names(factors)[i], "", "", factors[[i]]), x)
}
}
}
dimnames(x) <- NULL
x[is.na(x)] <- NA_string
pyExec(paste0("tbl.initialize(", nrow(x), ", ", ncol(x), ")"))
pyExec("text.insertTextContent(scursor, tbl, False)")
pySet("x", x)
pyExec("x = tuple(tuple(i) for i in x)")
pyExec("tbl.setDataArray(x)")
# Set cell widths
if (!is.null(widths)) {
if (length(widths) > ncol(x)) stop("You specified more cell widths than the number of columns")
if (length(widths) < ncol(x)) widths = c(widths, rep(1, ncol(x) - length(widths)))
separators = round(cumsum(widths) / sum(widths) * 10000)
pyExec("tcs = tbl.TableColumnSeparators")
for (i in 0:(length(separators) - 2)) {
pyExec(paste0("tcs[", i, "].Position = ", separators[i + 1]))
}
pyExec("tbl.TableColumnSeparators = tcs")
}
cellrange = paste0("A1:", LETTERS[ncol(x)], nrow(x))
if (!is.null(charheight)) {
pyExec(paste0("tbl.getCellRangeByName('", cellrange, "').CharHeight = ", charheight))
}
# Merge factor columns if requested
for (factor_col in names(mergelist)) {
if (factor_col %in% merge_index) {
for (ei in seq_along(mergelist[[factor_col]])) {
entry = mergelist[[factor_col]][[ei]]
pySet("cellname", paste0(factor_col, entry["start"] + n_headrows))
pyExec("tcursor = tbl.createCursorByCellName(cellname)")
stepsize = entry["end"] - entry["start"]
pyExec(paste0("tcursor.goDown(", stepsize, ", True)"))
pyExec("tcursor.mergeRange()")
}
}
}
# Merge headers of factor columns (vertically)
if (n_headrows > 1) {
factor_merge_step = n_headrows - 1
for (factor_index in seq_along(factors)) {
factor_col = LETTERS[factor_index]
pySet("cellname", paste0(factor_col, 1))
pyExec("tcursor = tbl.createCursorByCellName(cellname)")
pyExec(paste0("tcursor.goDown(", factor_merge_step, ", True)"))
pyExec("tcursor.mergeRange()")
}
}
# Merge group header fields if group header is present
if (!is.null(group_header)) {
col_index = length(factors)
for (group_size in group_sizes) {
col_index = col_index + 1
group_header_row_index = if (!is.null(common_header)) 2 else 1
pySet("cellname", paste0(LETTERS[col_index], group_header_row_index))
pyExec("tcursor = tbl.createCursorByCellName(cellname)")
pyExec(paste0("tcursor.goRight(", group_size - 1, ", True)"))
pyExec("tcursor.mergeRange()")
}
}
# Merge common header fields if common header is present
if (!is.null(common_header)) {
col_index = length(factors) + 1
pySet("cellname", paste0(LETTERS[col_index], 1))
pyExec("tcursor = tbl.createCursorByCellName(cellname)")
pyExec(paste0("tcursor.goRight(", matrix_cols - 1, ", True)"))
pyExec("tcursor.mergeRange()")
}
# Repeat headlines if requested
if (repeat_headlines) {
pyExec(paste0("tbl.setPropertyValue('HeaderRowCount', ", n_headrows, ")"))
}
# Set footer
if (is.null(footer)) {
rlo_parstyle('Textk\u00f6rper mit Abstand', warn = warn)
} else {
rlo_parstyle('Tabellenunterschrift', warn = warn)
if (!is.null(charheight)) {
pyExec(paste0("scursor.setPropertyValue('CharHeight', ", charheight, ")"))
}
pySet("footer", footer)
pyExec("text.insertString(scursor, footer, False)")
pyExec("text.insertControlCharacter(scursor, 0, False)")
rlo_parstyle('Textk\u00f6rper mit Abstand', warn = warn)
}
}
|
plot2 = function(){
## Get the raw data file and unzip it
download.file(url = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "household_power_consumption.zip")
unzip("household_power_consumption.zip")
## Load the data corresponding to Thursday 2007-02-01 and Friday 2007-02-02.
## This means we are considering lines 66637 - 69516
data = read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings=c("?"), colClasses = c("Global_active_power"="numeric"))
data = data[66637:69516, 1:3] ##To make this graph, we need the first 3 columns
data$DateAndTime = strptime(paste(data[,1], data[,2]), format="%d/%m/%Y %H:%M:%S")
## Create the line plot and save it to as plot2.png
png(file="plot2.png") ##opens PNG device. 480px x 480px is the default width and height
with(data, plot(DateAndTime, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")) ## type="l" to display a line graph
dev.off() ##closes PNG device
} | /03 - EDA/CourseProject1/plot2.R | no_license | arobert1976/Data-Science-Foundations-using-R | R | false | false | 1,053 | r | plot2 = function(){
## Get the raw data file and unzip it
download.file(url = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "household_power_consumption.zip")
unzip("household_power_consumption.zip")
## Load the data corresponding to Thursday 2007-02-01 and Friday 2007-02-02.
## This means we are considering lines 66637 - 69516
data = read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings=c("?"), colClasses = c("Global_active_power"="numeric"))
data = data[66637:69516, 1:3] ##To make this graph, we need the first 3 columns
data$DateAndTime = strptime(paste(data[,1], data[,2]), format="%d/%m/%Y %H:%M:%S")
## Create the line plot and save it to as plot2.png
png(file="plot2.png") ##opens PNG device. 480px x 480px is the default width and height
with(data, plot(DateAndTime, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")) ## type="l" to display a line graph
dev.off() ##closes PNG device
} |
##' Information Content Based Methods for semantic similarity measuring
##'
##' implemented for methods proposed by Resnik, Jiang, Lin and Schlicker.
##' @title information content based methods
##' @param ID1 Ontology Term
##' @param ID2 Ontology Term
##' @param method one of "Resnik", "Jiang", "Lin" and "Rel".
##' @param godata GOSemSimDATA object
##' @return semantic similarity score
##' @useDynLib GOSemSim
##' @author Guangchuang Yu \url{http://ygc.name}
infoContentMethod <- function(ID1,
ID2,
method,
godata) {
## IC is biased
## because the IC of a term is dependent of its children but not on its parents.
ont <- godata@ont
IC <- godata@IC
if (length(IC) == 0) {
stop("IC data not found, please re-generate your `semData` with `computeIC=TRUE`...")
}
if (ont %in% c("MF", "BP", "CC", "DO")) {
.anc <- AnnotationDbi::as.list(getAncestors(ont)[union(ID1,ID2)])
} else {
mesh_getAnc <- eval(parse(text="meshes:::getAncestors"))
.anc <- lapply(union(ID1, ID2), mesh_getAnc)
names(.anc) <- union(ID1, ID2)
}
return ( infoContentMethod_cpp( ID1, ID2,
.anc, IC,
method, ont ) )
}
## infoContentMethod <- function(ID1,
## ID2,
## ont="DO",
## method,
## organism="human") {
## IC <- getIC(organism, ont)
## ## more specific term, larger IC value.
## ## Normalized, all divide the most informative IC.
## ## all IC values range from 0(root node) to 1(most specific node)
## mic <- max(IC[IC!=Inf])
## if (ont == "DO") {
## topNode <- "DOID:4"
## } else {
## topNode <- "all"
## }
## IC[topNode] = 0
## ic1 <- IC[ID1]/mic
## ic2 <- IC[ID2]/mic
## if (ic1 == 0 || ic2 == 0)
## return (NA)
## ancestor1 <- getAncestors(ont)[[ID1]]
## ancestor2 <- getAncestors(ont)[[ID2]]
## if (ID1 == ID2) {
## commonAncestor <- ID1
## } else if (ID1 %in% ancestor2) {
## commonAncestor <- ID1
## } else if (ID2 %in% ancestor1) {
## commonAncestor <- ID2
## } else {
## commonAncestor <- intersect(ancestor1, ancestor2)
## }
## if (length(commonAncestor) == 0) return (NA)
## ##Information Content of the most informative common ancestor (MICA)
## mica <- max(IC[commonAncestor])/mic
## ## IC is biased
## ## because the IC of a term is dependent of its children but not on its parents.
## sim <- switch(method,
## Resnik = mica, ## Resnik does not consider how distant the terms are from their common ancestor.
## ## Lin and Jiang take that distance into account.
## Lin = 2*mica/(ic1+ic2),
## Jiang = 1 - min(1, -2*mica + ic1 + ic2),
## Rel = 2*mica/(ic1+ic2)*(1-exp(-mica*mic)) ## mica*mic equals to the original IC value. and exp(-mica*mic) equals to the probability of the term's occurence.
## )
## return (sim)
## }
| /B_analysts_sources_github/GuangchuangYu/bioc-release/ICMethods.R | no_license | Irbis3/crantasticScrapper | R | false | false | 3,212 | r | ##' Information Content Based Methods for semantic similarity measuring
##'
##' implemented for methods proposed by Resnik, Jiang, Lin and Schlicker.
##' @title information content based methods
##' @param ID1 Ontology Term
##' @param ID2 Ontology Term
##' @param method one of "Resnik", "Jiang", "Lin" and "Rel".
##' @param godata GOSemSimDATA object
##' @return semantic similarity score
##' @useDynLib GOSemSim
##' @author Guangchuang Yu \url{http://ygc.name}
infoContentMethod <- function(ID1,
ID2,
method,
godata) {
## IC is biased
## because the IC of a term is dependent of its children but not on its parents.
ont <- godata@ont
IC <- godata@IC
if (length(IC) == 0) {
stop("IC data not found, please re-generate your `semData` with `computeIC=TRUE`...")
}
if (ont %in% c("MF", "BP", "CC", "DO")) {
.anc <- AnnotationDbi::as.list(getAncestors(ont)[union(ID1,ID2)])
} else {
mesh_getAnc <- eval(parse(text="meshes:::getAncestors"))
.anc <- lapply(union(ID1, ID2), mesh_getAnc)
names(.anc) <- union(ID1, ID2)
}
return ( infoContentMethod_cpp( ID1, ID2,
.anc, IC,
method, ont ) )
}
## infoContentMethod <- function(ID1,
## ID2,
## ont="DO",
## method,
## organism="human") {
## IC <- getIC(organism, ont)
## ## more specific term, larger IC value.
## ## Normalized, all divide the most informative IC.
## ## all IC values range from 0(root node) to 1(most specific node)
## mic <- max(IC[IC!=Inf])
## if (ont == "DO") {
## topNode <- "DOID:4"
## } else {
## topNode <- "all"
## }
## IC[topNode] = 0
## ic1 <- IC[ID1]/mic
## ic2 <- IC[ID2]/mic
## if (ic1 == 0 || ic2 == 0)
## return (NA)
## ancestor1 <- getAncestors(ont)[[ID1]]
## ancestor2 <- getAncestors(ont)[[ID2]]
## if (ID1 == ID2) {
## commonAncestor <- ID1
## } else if (ID1 %in% ancestor2) {
## commonAncestor <- ID1
## } else if (ID2 %in% ancestor1) {
## commonAncestor <- ID2
## } else {
## commonAncestor <- intersect(ancestor1, ancestor2)
## }
## if (length(commonAncestor) == 0) return (NA)
## ##Information Content of the most informative common ancestor (MICA)
## mica <- max(IC[commonAncestor])/mic
## ## IC is biased
## ## because the IC of a term is dependent of its children but not on its parents.
## sim <- switch(method,
## Resnik = mica, ## Resnik does not consider how distant the terms are from their common ancestor.
## ## Lin and Jiang take that distance into account.
## Lin = 2*mica/(ic1+ic2),
## Jiang = 1 - min(1, -2*mica + ic1 + ic2),
## Rel = 2*mica/(ic1+ic2)*(1-exp(-mica*mic)) ## mica*mic equals to the original IC value. and exp(-mica*mic) equals to the probability of the term's occurence.
## )
## return (sim)
## }
|
animal_sounds <- function(animal, sound) {
assertthat::assert_that(
assertthat::is.string(animal),
assertthat::is.string(sound)
)
paste0("The ", animal, " says ", sound, sound ,"!")
}
#this function using the assertthat package to check for strings
| /R/animal_sounds.R | permissive | amysw13/mypackage | R | false | false | 265 | r | animal_sounds <- function(animal, sound) {
assertthat::assert_that(
assertthat::is.string(animal),
assertthat::is.string(sound)
)
paste0("The ", animal, " says ", sound, sound ,"!")
}
#this function using the assertthat package to check for strings
|
library(dplyr)
#downloading and unzipping the file
if(!file.exists("Data.zip")){
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", destfile = "Data.zip")
}
if(!file.exists("UCI HAR Dataset")){
unzip("Data.zip")
}
#Importing data
labels <- read.table("UCI HAR Dataset/activity_labels.txt")
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2]) #this is so that grep works
features_sub <- grep(".*mean.*|.*std.*", features[,2])
features_req <- features[features_sub, 2]
#Importing the training and testing datasets
train_x <- read.table("UCI HAR Dataset/train/X_train.txt")
train_x <- train_x[features_sub]
train_y <- read.table("UCI HAR Dataset/train/y_train.txt")
train_subj <- read.table("UCI HAR Dataset/train/subject_train.txt")
train <- cbind( train_y, train_subj, train_x)
test_x <- read.table("UCI HAR Dataset/test/X_test.txt")
test_x <- test_x[features_sub]
test_y <- read.table("UCI HAR Dataset/test/y_test.txt")
test_subj <- read.table("UCI HAR Dataset/test/subject_test.txt")
test <- cbind(test_y, test_subj, test_x)
fulldata <- rbind(train, test)
#Cleaning up features_req to add to the column names
features_req <- gsub("-mean", "Mean", features_req)
features_req <- gsub("-std", "Std", features_req)
features_req <- gsub("[-()]", "", features_req)
#Names look half decent now
colnames(fulldata) <- c("activity", "subject", features_req)
#Labelling Activity
fulldata$activity <- factor(fulldata$activity, levels = labels[,1], labels = labels[,2])
fulldata_means <- summarise_each(group_by(fulldata, activity, subject), funs(mean))
#saving the data
write.table(fulldata_means, "tidy_averages.txt", row.names = FALSE, quote = FALSE)
| /run_analysis.R | no_license | HariharanJayashankar/Getting-and-Cleaning-Data---Programming-Assignment | R | false | false | 1,793 | r | library(dplyr)
#downloading and unzipping the file
if(!file.exists("Data.zip")){
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", destfile = "Data.zip")
}
if(!file.exists("UCI HAR Dataset")){
unzip("Data.zip")
}
#Importing data
labels <- read.table("UCI HAR Dataset/activity_labels.txt")
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2]) #this is so that grep works
features_sub <- grep(".*mean.*|.*std.*", features[,2])
features_req <- features[features_sub, 2]
#Importing the training and testing datasets
train_x <- read.table("UCI HAR Dataset/train/X_train.txt")
train_x <- train_x[features_sub]
train_y <- read.table("UCI HAR Dataset/train/y_train.txt")
train_subj <- read.table("UCI HAR Dataset/train/subject_train.txt")
train <- cbind( train_y, train_subj, train_x)
test_x <- read.table("UCI HAR Dataset/test/X_test.txt")
test_x <- test_x[features_sub]
test_y <- read.table("UCI HAR Dataset/test/y_test.txt")
test_subj <- read.table("UCI HAR Dataset/test/subject_test.txt")
test <- cbind(test_y, test_subj, test_x)
fulldata <- rbind(train, test)
#Cleaning up features_req to add to the column names
features_req <- gsub("-mean", "Mean", features_req)
features_req <- gsub("-std", "Std", features_req)
features_req <- gsub("[-()]", "", features_req)
#Names look half decent now
colnames(fulldata) <- c("activity", "subject", features_req)
#Labelling Activity
fulldata$activity <- factor(fulldata$activity, levels = labels[,1], labels = labels[,2])
fulldata_means <- summarise_each(group_by(fulldata, activity, subject), funs(mean))
#saving the data
write.table(fulldata_means, "tidy_averages.txt", row.names = FALSE, quote = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VarID_functions.R
\name{pruneKnn}
\alias{pruneKnn}
\title{Function inferring a pruned knn matrix}
\usage{
pruneKnn(
expData,
distM = NULL,
large = TRUE,
regNB = TRUE,
bmethod = NULL,
batch = NULL,
regVar = NULL,
offsetModel = TRUE,
thetaML = FALSE,
theta = 10,
ngenes = 2000,
span = 0.75,
pcaComp = NULL,
tol = 1e-05,
algorithm = "kd_tree",
metric = "pearson",
genes = NULL,
knn = 25,
do.prune = TRUE,
alpha = 1,
nb = 3,
no_cores = NULL,
FSelect = FALSE,
pca.scale = FALSE,
ps = 1,
seed = 12345,
...
)
}
\arguments{
\item{expData}{Matrix of gene expression values with genes as rows and cells as columns. These values have to correspond to unique molecular identifier counts. Alternatively, a Seurat object could be used as input, after normalization, PCA-dimensional reduction, and shared-nearest neighbour inference.}
\item{distM}{Optional distance matrix used for determining k nearest neighbours. Default is \code{NULL} and the distance matrix is computed using a metric given by the parameter \code{metric}.}
\item{large}{logical. If \code{TRUE} then no distance matrix is required and nearest neighbours are inferred by the \pkg{FNN} package based on a reduced
feature matrix computed by a principle component analysis. Only the first \code{pcaComp} principle components are considered. Prior to principal component
analysis a negative binomial regression is performed to eliminate the dependence on the total number of transcripts per cell. The pearson residuals of
this regression serve as input for the principal component analysis after smoothing the parameter dependence on the mean by a \code{loess} regression.
Deafult is \code{TRUE}. Recommended mode for very large datasets, where storing a distance matrix requires too much memory. \code{distM}
will be ignored if \code{large} is \code{TRUE}.}
\item{regNB}{logical. If \code{TRUE} then gene a negative binomial regression is performed to prior to the principle component analysis if \code{large = TRUE}. See \code{large}. Otherwise, transcript counts in each cell are normalized to one, multipled by the minimal total transcript count across all cells, followed by adding a pseudocount of 0.1 and taking the logarithm. Default is \code{TRUE}.}
\item{bmethod}{Character string indicating the batch correction method. If "harmony", then batch correction is performed by the \pkg{harmony} package. Default is \code{NULL} and batch correction will be done by negative binomial regression.}
\item{batch}{vector of batch variables. Component names need to correspond to valid cell IDs, i.e. column names of \code{expData}. If \code{regNB} is \code{TRUE}, than the batch variable will be regressed out simultaneously with the log UMI count per cell. An interaction term is included for the log UMI count with the batch variable. Default value is \code{NULL}.}
\item{regVar}{data.frame with additional variables to be regressed out simultaneously with the log UMI count and the batch variable (if \code{batch} is \code{TRUE}). Column names indicate variable names (name \code{beta} is reserved for the coefficient of the log UMI count), and rownames need to correspond to valid cell IDs, i.e. column names of \code{expData}. Interaction terms are included for each variable in \code{regVar} with the batch variable (if \code{batch} is \code{TRUE}). Default value is \code{NULL}.}
\item{offsetModel}{Logical parameter. Only considered if \code{regNB} is \code{TRUE}. If \code{TRUE} then the \code{beta} (log UMI count) coefficient is set to 1 and the intercept is computed analytically as the log ration of UMI counts for a gene and the total UMI count across all cells. Batch variables and additional variables in \code{regVar} are regressed out with an offset term given by the sum of the intercept and the log UMI count. Default is \code{TRUE}.}
\item{thetaML}{Logical parameter. Only considered if \code{offsetModel} equals \code{TRUE}. If \code{TRUE} then the dispersion parameter is estimated by a maximum likelihood fit. Otherwise, it is set to \code{theta}. Default is \code{FALSE}.}
\item{theta}{Positive real number. Fixed value of the dispersion parameter. Only considered if \code{theaML} equals \code{FALSE}.}
\item{ngenes}{Positive integer number. Randomly sampled number of genes (from rownames of \code{expData}) used for predicting regression coefficients (if \code{regNB=TRUE}). Smoothed coefficients are derived for all genes. Default is 2000.}
\item{span}{Positive real number. Parameter for loess-regression (see \code{large}) controlling the degree of smoothing. Default is 0.75.}
\item{pcaComp}{Positive integer number. Number of princple components to be included if \code{large} is \code{TRUE}. Default is \code{NULL} and the number of principal components used for dimensionality reduction of the feature matrix is derived by an elbow criterion. However, the minimum number of components will be set to 15 if the elbow criterion results in a smaller number. The derived number can be be plotted using the \code{plotPC} function.}
\item{tol}{Numerical value greater than zero. Tolerance for numerical PCA using \pkg{irlba}. Default value is 1e-6.}
\item{algorithm}{Algorithm for fast k nearest neighbour inference, using the \code{get.knn} function from the \pkg{FNN} package.
See \code{help(get.knn)}. Deafult is "kd_tree".}
\item{metric}{Distances are computed from the expression matrix \code{x} after optionally including only genes given as argument \code{genes} or after optional feature selection (see \code{FSelect}).
Possible values for \code{metric} are \code{"pearson", "spearman", "logpearson", "euclidean"}. Default is \code{"pearson"}. In case of the correlation based methods,
the distance is computed as 1 – correlation. This parameter is only used if \code{large} is FALSE and \code{distM} is NULL.}
\item{genes}{Vector of gene names corresponding to a subset of rownames of \code{x}. Only these genes are used for the computation of a distance matrix and for the computation of joint probabilities of nearest neighbours. Default is \code{NULL} and all genes are used.}
\item{knn}{Positive integer number. Number of nearest neighbours considered for each cell. Default is 25.}
\item{do.prune}{Logical parameter. If \code{TRUE}, then pruning of k-nearest neighbourhoods is performed. If \code{FALSE}, then no pruning is done. Default is \code{TRUE}.}
\item{alpha}{Positive real number. Relative weight of a cell versus its k nearest neigbour applied for the derivation of joint probabilities. A cell receives a weight of \code{alpha} while the weights of its k nearest neighbours as determined by quadratic programming sum up to one. The sum across all weights and alpha is normalized to one, and the weighted mean expression is used for computing the link porbabilities for each of the k nearest neighbours. Larger values give more weight to the gene expression observed in a cell versus its neighbourhood. Typical values should be in the range of 0 to 10. Default is value is 1. If \code{alpha} is set to NULL it is inferred by an optimization, i.e., \code{alpha} is minimized under the constraint that the gene expression in a cell does not deviate more then one standard deviation from the predicted weigthed mean, where the standard deviation is calculated from the predicted mean using the background model (the average dependence of the variance on the mean expression). This procedure is coputationally more intense and inceases the run time of the function significantly.}
\item{nb}{Positive integer number. Number of genes with the lowest outlier probability included for calculating the link probabilities for the knn pruning. The link probability is computed as the geometric mean across these genes. Default is 3.}
\item{no_cores}{Positive integer number. Number of cores for multithreading. If set to \code{NULL} then the number of available cores minus two is used. Default is \code{NULL}.}
\item{FSelect}{Logical parameter. If \code{TRUE}, then feature selection is performed prior to distance matrix calculation and VarID analysis. Default is \code{FALSE}.}
\item{pca.scale}{Logical parameter. If \code{TRUE}, then input features are scaled prior to PCA transformation. Default is \code{FALSE}.}
\item{ps}{Real number greater or equal to zero. Pseudocount to be added to counts within local neighbourhoods for outlier identification and pruning. Default is 1.}
\item{seed}{Integer number. Random number to initialize stochastic routines. Default is 12345.}
\item{...}{Additional parameters for \code{HarmonyMatrix} function of the \pkg{harmony} package, if \code{batch} is not \code{NULL} and \code{bmethod="harmony"}.}
}
\value{
List object of six components:
\item{distM}{Distance matrix.}
\item{dimRed}{PCA transformation of \code{expData} including the first \code{pcaComp} principle components, computed on including \code{genes} or variable genes only if \code{Fselect} equals \code{TRUE}. Is is set to \code{NULL} if \code{large} equals \code{FALSE}.}
\item{pvM}{Matrix of link probabilities between a cell and each of its k nearest neighbours (Bonferroni-corrected p-values). Column \code{i} shows the k nearest neighbour link probabilities for cell \code{i} in matrix \code{x}. }
\item{pvM.raw}{Matrix of uncorrected link probabilities between a cell and each of its k nearest neighbours (without multiple-testing correction). Column \code{i} shows the k nearest neighbour link probabilities for cell \code{i} in matrix \code{x}. }
\item{NN}{Matrix of column indices of k nearest neighbours for each cell according to input matrix \code{x}. First entry corresponds to index of the cell itself. Columns contain the k nearest neighbour indices for cell \code{i} in matrix \code{x}.}
\item{B}{List object with background model of gene expression as obtained by \code{fitBackVar} function.}
\item{regData}{If \code{regNB=TRUE} this argument contains a list of four components: component \code{pearsonRes} contains a matrix of the Pearson Residual computed from the negative binomial regression, component \code{nbRegr} contains a matrix with the regression coefficients, component \code{nbRegrSmooth} contains a matrix with the smoothed regression coefficients, and \code{log_umi} is a vector with the total log UMI count for each cell. The regression coefficients comprise the dispersion parameter theta, the intercept, the regression coefficient beta for the log UMI count, and the regression coefficients of the batches (if \code{batch} is not \code{NULL}).}
\item{alpha}{Vector of inferred values for the \code{alpha} parameter for each neighbourhood (if input parameter \code{alpha} is NULL; otherwise all values are equal to the input parameter).}
\item{pars}{List object storing the run parameters.}
\item{pca}{Principal component analysis of the of the input data, if \code{large} is TRUE. Output or the function \code{irlba} from the \pkg{irlba} package with \code{pcaComp} principal components, or 100 principal components if \code{pcaComp} is NULL.}
}
\description{
This function determines k nearest neighbours for each cell in gene expression space, and tests if the links are supported by a negative binomial joint distribution of gene expression. A probability is assigned to each link which is given by the minimum joint probability across all genes.
}
\examples{
res <- pruneKnn(intestinalDataSmall,knn=10,alpha=1,no_cores=1,FSelect=FALSE)
}
| /man/pruneKnn.Rd | no_license | dgrun/RaceID3_StemID2_package | R | false | true | 11,544 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VarID_functions.R
\name{pruneKnn}
\alias{pruneKnn}
\title{Function inferring a pruned knn matrix}
\usage{
pruneKnn(
expData,
distM = NULL,
large = TRUE,
regNB = TRUE,
bmethod = NULL,
batch = NULL,
regVar = NULL,
offsetModel = TRUE,
thetaML = FALSE,
theta = 10,
ngenes = 2000,
span = 0.75,
pcaComp = NULL,
tol = 1e-05,
algorithm = "kd_tree",
metric = "pearson",
genes = NULL,
knn = 25,
do.prune = TRUE,
alpha = 1,
nb = 3,
no_cores = NULL,
FSelect = FALSE,
pca.scale = FALSE,
ps = 1,
seed = 12345,
...
)
}
\arguments{
\item{expData}{Matrix of gene expression values with genes as rows and cells as columns. These values have to correspond to unique molecular identifier counts. Alternatively, a Seurat object could be used as input, after normalization, PCA-dimensional reduction, and shared-nearest neighbour inference.}
\item{distM}{Optional distance matrix used for determining k nearest neighbours. Default is \code{NULL} and the distance matrix is computed using a metric given by the parameter \code{metric}.}
\item{large}{logical. If \code{TRUE} then no distance matrix is required and nearest neighbours are inferred by the \pkg{FNN} package based on a reduced
feature matrix computed by a principle component analysis. Only the first \code{pcaComp} principle components are considered. Prior to principal component
analysis a negative binomial regression is performed to eliminate the dependence on the total number of transcripts per cell. The pearson residuals of
this regression serve as input for the principal component analysis after smoothing the parameter dependence on the mean by a \code{loess} regression.
Deafult is \code{TRUE}. Recommended mode for very large datasets, where storing a distance matrix requires too much memory. \code{distM}
will be ignored if \code{large} is \code{TRUE}.}
\item{regNB}{logical. If \code{TRUE} then gene a negative binomial regression is performed to prior to the principle component analysis if \code{large = TRUE}. See \code{large}. Otherwise, transcript counts in each cell are normalized to one, multipled by the minimal total transcript count across all cells, followed by adding a pseudocount of 0.1 and taking the logarithm. Default is \code{TRUE}.}
\item{bmethod}{Character string indicating the batch correction method. If "harmony", then batch correction is performed by the \pkg{harmony} package. Default is \code{NULL} and batch correction will be done by negative binomial regression.}
\item{batch}{vector of batch variables. Component names need to correspond to valid cell IDs, i.e. column names of \code{expData}. If \code{regNB} is \code{TRUE}, than the batch variable will be regressed out simultaneously with the log UMI count per cell. An interaction term is included for the log UMI count with the batch variable. Default value is \code{NULL}.}
\item{regVar}{data.frame with additional variables to be regressed out simultaneously with the log UMI count and the batch variable (if \code{batch} is \code{TRUE}). Column names indicate variable names (name \code{beta} is reserved for the coefficient of the log UMI count), and rownames need to correspond to valid cell IDs, i.e. column names of \code{expData}. Interaction terms are included for each variable in \code{regVar} with the batch variable (if \code{batch} is \code{TRUE}). Default value is \code{NULL}.}
\item{offsetModel}{Logical parameter. Only considered if \code{regNB} is \code{TRUE}. If \code{TRUE} then the \code{beta} (log UMI count) coefficient is set to 1 and the intercept is computed analytically as the log ration of UMI counts for a gene and the total UMI count across all cells. Batch variables and additional variables in \code{regVar} are regressed out with an offset term given by the sum of the intercept and the log UMI count. Default is \code{TRUE}.}
\item{thetaML}{Logical parameter. Only considered if \code{offsetModel} equals \code{TRUE}. If \code{TRUE} then the dispersion parameter is estimated by a maximum likelihood fit. Otherwise, it is set to \code{theta}. Default is \code{FALSE}.}
\item{theta}{Positive real number. Fixed value of the dispersion parameter. Only considered if \code{theaML} equals \code{FALSE}.}
\item{ngenes}{Positive integer number. Randomly sampled number of genes (from rownames of \code{expData}) used for predicting regression coefficients (if \code{regNB=TRUE}). Smoothed coefficients are derived for all genes. Default is 2000.}
\item{span}{Positive real number. Parameter for loess-regression (see \code{large}) controlling the degree of smoothing. Default is 0.75.}
\item{pcaComp}{Positive integer number. Number of princple components to be included if \code{large} is \code{TRUE}. Default is \code{NULL} and the number of principal components used for dimensionality reduction of the feature matrix is derived by an elbow criterion. However, the minimum number of components will be set to 15 if the elbow criterion results in a smaller number. The derived number can be be plotted using the \code{plotPC} function.}
\item{tol}{Numerical value greater than zero. Tolerance for numerical PCA using \pkg{irlba}. Default value is 1e-6.}
\item{algorithm}{Algorithm for fast k nearest neighbour inference, using the \code{get.knn} function from the \pkg{FNN} package.
See \code{help(get.knn)}. Deafult is "kd_tree".}
\item{metric}{Distances are computed from the expression matrix \code{x} after optionally including only genes given as argument \code{genes} or after optional feature selection (see \code{FSelect}).
Possible values for \code{metric} are \code{"pearson", "spearman", "logpearson", "euclidean"}. Default is \code{"pearson"}. In case of the correlation based methods,
the distance is computed as 1 – correlation. This parameter is only used if \code{large} is FALSE and \code{distM} is NULL.}
\item{genes}{Vector of gene names corresponding to a subset of rownames of \code{x}. Only these genes are used for the computation of a distance matrix and for the computation of joint probabilities of nearest neighbours. Default is \code{NULL} and all genes are used.}
\item{knn}{Positive integer number. Number of nearest neighbours considered for each cell. Default is 25.}
\item{do.prune}{Logical parameter. If \code{TRUE}, then pruning of k-nearest neighbourhoods is performed. If \code{FALSE}, then no pruning is done. Default is \code{TRUE}.}
\item{alpha}{Positive real number. Relative weight of a cell versus its k nearest neigbour applied for the derivation of joint probabilities. A cell receives a weight of \code{alpha} while the weights of its k nearest neighbours as determined by quadratic programming sum up to one. The sum across all weights and alpha is normalized to one, and the weighted mean expression is used for computing the link porbabilities for each of the k nearest neighbours. Larger values give more weight to the gene expression observed in a cell versus its neighbourhood. Typical values should be in the range of 0 to 10. Default is value is 1. If \code{alpha} is set to NULL it is inferred by an optimization, i.e., \code{alpha} is minimized under the constraint that the gene expression in a cell does not deviate more then one standard deviation from the predicted weigthed mean, where the standard deviation is calculated from the predicted mean using the background model (the average dependence of the variance on the mean expression). This procedure is coputationally more intense and inceases the run time of the function significantly.}
\item{nb}{Positive integer number. Number of genes with the lowest outlier probability included for calculating the link probabilities for the knn pruning. The link probability is computed as the geometric mean across these genes. Default is 3.}
\item{no_cores}{Positive integer number. Number of cores for multithreading. If set to \code{NULL} then the number of available cores minus two is used. Default is \code{NULL}.}
\item{FSelect}{Logical parameter. If \code{TRUE}, then feature selection is performed prior to distance matrix calculation and VarID analysis. Default is \code{FALSE}.}
\item{pca.scale}{Logical parameter. If \code{TRUE}, then input features are scaled prior to PCA transformation. Default is \code{FALSE}.}
\item{ps}{Real number greater or equal to zero. Pseudocount to be added to counts within local neighbourhoods for outlier identification and pruning. Default is 1.}
\item{seed}{Integer number. Random number to initialize stochastic routines. Default is 12345.}
\item{...}{Additional parameters for \code{HarmonyMatrix} function of the \pkg{harmony} package, if \code{batch} is not \code{NULL} and \code{bmethod="harmony"}.}
}
\value{
List object of six components:
\item{distM}{Distance matrix.}
\item{dimRed}{PCA transformation of \code{expData} including the first \code{pcaComp} principle components, computed on including \code{genes} or variable genes only if \code{Fselect} equals \code{TRUE}. Is is set to \code{NULL} if \code{large} equals \code{FALSE}.}
\item{pvM}{Matrix of link probabilities between a cell and each of its k nearest neighbours (Bonferroni-corrected p-values). Column \code{i} shows the k nearest neighbour link probabilities for cell \code{i} in matrix \code{x}. }
\item{pvM.raw}{Matrix of uncorrected link probabilities between a cell and each of its k nearest neighbours (without multiple-testing correction). Column \code{i} shows the k nearest neighbour link probabilities for cell \code{i} in matrix \code{x}. }
\item{NN}{Matrix of column indices of k nearest neighbours for each cell according to input matrix \code{x}. First entry corresponds to index of the cell itself. Columns contain the k nearest neighbour indices for cell \code{i} in matrix \code{x}.}
\item{B}{List object with background model of gene expression as obtained by \code{fitBackVar} function.}
\item{regData}{If \code{regNB=TRUE} this argument contains a list of four components: component \code{pearsonRes} contains a matrix of the Pearson Residual computed from the negative binomial regression, component \code{nbRegr} contains a matrix with the regression coefficients, component \code{nbRegrSmooth} contains a matrix with the smoothed regression coefficients, and \code{log_umi} is a vector with the total log UMI count for each cell. The regression coefficients comprise the dispersion parameter theta, the intercept, the regression coefficient beta for the log UMI count, and the regression coefficients of the batches (if \code{batch} is not \code{NULL}).}
\item{alpha}{Vector of inferred values for the \code{alpha} parameter for each neighbourhood (if input parameter \code{alpha} is NULL; otherwise all values are equal to the input parameter).}
\item{pars}{List object storing the run parameters.}
\item{pca}{Principal component analysis of the of the input data, if \code{large} is TRUE. Output or the function \code{irlba} from the \pkg{irlba} package with \code{pcaComp} principal components, or 100 principal components if \code{pcaComp} is NULL.}
}
\description{
This function determines k nearest neighbours for each cell in gene expression space, and tests if the links are supported by a negative binomial joint distribution of gene expression. A probability is assigned to each link which is given by the minimum joint probability across all genes.
}
\examples{
res <- pruneKnn(intestinalDataSmall,knn=10,alpha=1,no_cores=1,FSelect=FALSE)
}
|
test_that("ypr_tabulate_sr population", {
sr <- ypr_tabulate_sr(ypr_population())
expect_s3_class(sr, "tbl_df")
expect_snapshot_data(sr, "srpopulation")
})
test_that("ypr_tabulate_sr populations", {
sr <- ypr_tabulate_sr(ypr_populations(Rk = c(3, 5)))
expect_s3_class(sr, "tbl_df")
expect_snapshot_data(sr, "srpopulationRk")
})
test_that("ypr_tabulate_sr ecotypes", {
sr <- ypr_tabulate_sr(ypr_ecotypes(Linf = c(70, 80), RPR = c(0.8, 0.2)))
expect_s3_class(sr, "tbl_df")
expect_snapshot_data(sr, "srecotypes")
})
| /tests/testthat/test-tabulate-sr.R | permissive | poissonconsulting/ypr | R | false | false | 536 | r | test_that("ypr_tabulate_sr population", {
sr <- ypr_tabulate_sr(ypr_population())
expect_s3_class(sr, "tbl_df")
expect_snapshot_data(sr, "srpopulation")
})
test_that("ypr_tabulate_sr populations", {
sr <- ypr_tabulate_sr(ypr_populations(Rk = c(3, 5)))
expect_s3_class(sr, "tbl_df")
expect_snapshot_data(sr, "srpopulationRk")
})
test_that("ypr_tabulate_sr ecotypes", {
sr <- ypr_tabulate_sr(ypr_ecotypes(Linf = c(70, 80), RPR = c(0.8, 0.2)))
expect_s3_class(sr, "tbl_df")
expect_snapshot_data(sr, "srecotypes")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stat.R
\name{rowOrColumnwisePercentage}
\alias{rowOrColumnwisePercentage}
\title{Rowwise or Columnwise Percentage}
\usage{
rowOrColumnwisePercentage(x, rowwise, default = 0, digits = 1)
}
\arguments{
\item{x}{two dimensional numeric data structure}
\item{rowwise}{if \code{TRUE} the percentage is calculated by row, else by
column}
\item{default}{default value to be used if the calculated percentage is
\code{NA}.}
\item{digits}{number of digits (default: 1) to which the resulting
percentages are to be rounded. Set to \code{NA} to suppress rounding}
}
\description{
Calculate the percentage (value divided by sum of values in the row/column)
for each row/column
}
\seealso{
\code{\link{rowwisePercentage}}, \code{\link{columnwisePercentage}}
}
| /man/rowOrColumnwisePercentage.Rd | permissive | KWB-R/kwb.utils | R | false | true | 831 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stat.R
\name{rowOrColumnwisePercentage}
\alias{rowOrColumnwisePercentage}
\title{Rowwise or Columnwise Percentage}
\usage{
rowOrColumnwisePercentage(x, rowwise, default = 0, digits = 1)
}
\arguments{
\item{x}{two dimensional numeric data structure}
\item{rowwise}{if \code{TRUE} the percentage is calculated by row, else by
column}
\item{default}{default value to be used if the calculated percentage is
\code{NA}.}
\item{digits}{number of digits (default: 1) to which the resulting
percentages are to be rounded. Set to \code{NA} to suppress rounding}
}
\description{
Calculate the percentage (value divided by sum of values in the row/column)
for each row/column
}
\seealso{
\code{\link{rowwisePercentage}}, \code{\link{columnwisePercentage}}
}
|
#Simple Computation
require(geoR)
require(geoRglm)
require(fields)
require(akima)
require(splancs)
require(plyr)
require(ggplot2)
require(scales)
# setwd("D:/CodeProjects/R/R_srs/inventory")
# setwd("D:/work/Code/srs_work/inventory")
#Clear all, set na.exclude
rm(list=ls())
#options(na.action="na.omit")
#options("na.action")
###################
#0. Some needed functions
#
#Selected matrix functions from Henrik Bengtsson
# https://stat.ethz.ch/pipermail/r-help/2003-October/040484.html
# http://www1.maths.lth.se/help/R/image/image.R
# Mirror matrix (left-right)
mirror.matrix <- function(x) {
xx <- as.data.frame(x);
xx <- rev(xx);
xx <- as.matrix(xx);
xx;
}
# Rotate matrix 90 clockworks
rotate90.matrix <- function(x) {
t(mirror.matrix(x))
}
# Rotate matrix 180 clockworks
rotate180.matrix <- function(x) {
xx <- rev(x);
dim(xx) <- dim(x);
xx;
}
# Rotate matrix 270 clockworks
rotate270.matrix <- function(x) {
mirror.matrix(t(x))
}
#################################
#1.
#load data
tritium<-readRDS("../SRS_data/tritium.rdata")
tritiumavg<-readRDS("../SRS_data/tritiumavg.rdata")
wl<-readRDS("../SRS_data/wl.rdata")
TCCZe_all<-readRDS("../TCCZ_krig/TCCZ/TCCZ_o.rdata")
TCCZe<-TCCZe_all[!is.na(TCCZe_all$TCCZ_top),]
rm(TCCZe_all)
wlavg<-readRDS("../SRS_data/wlavg.rdata")
#basin coords for plotting if needed
#f3basin<-readRDS("../basin_coords/f3basin.rdata")
#f3basin27<-readRDS("../basin_coords/f3basin27.rdata")
#Add log transform
tritium$logmean<-log(tritium$mean)
tritium$log10mean<-log10(tritium$mean)
#
#Split per measurement year
wll<-split(wl,wl$MYEAR)
tritiuml<-split(tritium,tritium$MYEAR)
#Select 1988 and after
wll2<-wll[5:length(wll)]
tritiuml2<-tritiuml[10:length(tritiuml)]
#names(tritiuml2)
#########################################################
#2.
#Define interpolation domain and compute area, define other parameters
#Boundaries
no.min<-3680930
no.max<-3682110
ea.min<-436175
ea.max<-437155
#number of breaks ~ 20 m apart
ea.b<-1+(ea.max-ea.min)/20
no.b<-1+(no.max-no.min)/20
#Create the vectors
ea.v<-seq(ea.min, ea.max, length = ea.b)
no.v<-seq(no.min, no.max, length = no.b)
#Create the expandgrid df for predictions
testgrid1<-expand.grid(EASTING=ea.v, NORTHING=no.v)
#Create polygon to compute area.
d.ea<-c(ea.min,ea.min,ea.max,ea.max,ea.min)
d.no<-c(no.min,no.max,no.max,no.min,no.min)
pp<-cbind(d.ea,d.no)
#plot(pp, type="b")
area.dom<-areapl(pp)
#Define porosity value
porosity.mean<-.3
porosity.sd<-.03
#########################################################
#3. Do simple average calculations on the well positions only
# \int_D C h p dx dy = \int_D dx dy * \hat{C} * \hat{h} * porosity p
#Thickness is computed using a linear prediction
#
#Local polynomial fit (1st order) and linear model
TCCZ.loess1<-loess(TCCZ_top~EASTING+NORTHING,data=TCCZe,degree= 1, span= 0.25)
TCCZ.loess1b<-loess(TCCZ_top~EASTING+NORTHING,data=TCCZe,degree= 1,span= 0.4)
TCCZ.lm<-lm(TCCZ_top~EASTING+NORTHING,data=TCCZe)
#Create the aquifer thickness data frame
thperyear<-wl
#Add the TCCZ values predicted by the linear models
# with standard error estimates
options(na.action="na.exclude")
pre2<-predict(TCCZ.loess1,newdata = wl[,c("EASTING","NORTHING")],se = TRUE)
pre2b<-predict(TCCZ.loess1b,newdata = wl[,c("EASTING","NORTHING")],se = TRUE)
pre2lm<-predict(TCCZ.lm,newdata = wl[,c("EASTING","NORTHING")],se = TRUE)
#
thperyear$TCCZ.fit<-pre2$fit
thperyear$TCCZ.se.fit<-pre2$se.fit
thperyear$TCCZ.fitb<-pre2b$fit
thperyear$TCCZ.se.fitb<-pre2b$se.fit
thperyear$TCCZ.fitlm<-pre2lm$fit
thperyear$TCCZ.se.fitlm<-pre2lm$se.fit
#Compute the thickness in feet
thperyear$h<-thperyear$mean-thperyear$TCCZ.fit
thperyear$hb<-thperyear$mean-thperyear$TCCZ.fitb
thperyear$hlm<-thperyear$mean-thperyear$TCCZ.fitlm
# Replace negative values with NA
thperyear$h[thperyear$h<=0]<-NA
thperyear$hb[thperyear$hb<=0]<-NA
thperyear$hlm[thperyear$hlm<=0]<-NA
#Remove NAs
thperyear.cleanh<-thperyear[!is.na(thperyear$h),]
thperyear.cleanhb<-thperyear[!is.na(thperyear$hb),]
thperyear.cleanhlm<-thperyear[!is.na(thperyear$hlm),]
#Compute the avg per year
th.avg.peryearh<-ddply(thperyear.cleanh, c('MYEAR'), function(x) c(counth=nrow(x),h.mean=mean(x$h),h.median=median(x$h),h.sd=sd(x$h),h.mad=mad(x$h),h.min=min(x$h),h.max=max(x$h)))
th.avg.peryearhb<-ddply(thperyear.cleanhb, c('MYEAR'), function(x) c(counthb=nrow(x),hb.mean=mean(x$hb),hb.median=median(x$hb),hb.sd=sd(x$hb),hb.mad=mad(x$hb),hb.min=min(x$hb),hb.max=max(x$hb)))
th.avg.peryearhlm<-ddply(thperyear.cleanhlm, c('MYEAR'), function(x) c(counthlm=nrow(x),hlm.mean=mean(x$hlm),hlm.median=median(x$hlm),hlm.sd=sd(x$hlm),hlm.mad=mad(x$hlm),hlm.min=min(x$hlm),hlm.max=max(x$hlm)))
#Create inventory df
inventoryja<-merge(tritiumavg[tritiumavg$MYEAR>=1984,],th.avg.peryearh,by="MYEAR")
inventoryja<-merge(inventoryja,th.avg.peryearhb,by="MYEAR")
inventoryja<-merge(inventoryja,th.avg.peryearhlm,by="MYEAR")
#Compute the inventory
inventoryja$inventory1<-area.dom*porosity.mean*inventoryja$h.mean*inventoryja$mean*.3048*1e-9
inventoryja$inventory1b<-area.dom*porosity.mean*inventoryja$hb.mean*inventoryja$mean*.3048*1e-9
inventoryja$inventory1lm<-area.dom*porosity.mean*inventoryja$hlm.mean*inventoryja$mean*.3048*1e-9
rm(thperyear.cleanh)
rm(thperyear.cleanhb)
rm(thperyear.cleanhlm)
rm(th.avg.peryearh)
rm(th.avg.peryearhb)
rm(th.avg.peryearhlm)
#Draft ggplot for the inventory
#qplot(MYEAR, inventory, data=inventoryja)
ja.plot<-ggplot(data=inventoryja, aes(x=MYEAR))
ja.plot<- ja.plot +geom_line(aes(y=inventory1), colour='blue')
ja.plot<- ja.plot +geom_line(aes(y=inventory1b), colour='red')
ja.plot<- ja.plot +geom_line(aes(y=inventory1lm), colour='green')
ja.plot<-ja.plot+labs(title="Tritium Inventory")+xlab("Year")+ylab("Tritium (Ci)")
ja.plot
# log2 scaling of the y axis (with visually-equal spacing)
ja.plotlog1<- ja.plot + scale_y_continuous(trans=log10_trans())
ja.plotlog1
# log2 coordinate transformation (with visually-diminishing spacing)
ja.plotlog2<- ja.plot + coord_trans(y="log2")
ja.plotlog2
saveRDS(inventoryja, file = "inventoryja.rdata")
inventoryja.csv<-inventoryja[,c("MYEAR","inventory1","inventory1b","inventory1lm")]
write.csv(inventoryja.csv, file="inventoryja.csv")
########################################
#4. Do the calculation again, this time with interpolation on a regular grid.
# We should compute both
# \int_D C * h * p dx dy = \int_D C dx dy/area.dom * \int_D h dx dy/area.dom * p
# = (\Sigma C)/N points on the regular grid * (\Sigma h)/N * p
# and
# \int_D C h dx dy = \int_D C*h dx dy * p
#The later will allow for the matching of concentration and aquifer thickness.
###########################################
#Define short hand for AKIMA interpolation functions without and with extrapolation
interp.peryear<- function(x) interp(x$EASTING, x$NORTHING, x$mean, xo=ea.v, yo=no.v, linear = TRUE, extrap=FALSE, duplicate = "mean");
interpext.peryear<- function(x) interp(x$EASTING, x$NORTHING, x$mean, xo=ea.v, yo=no.v, linear = FALSE, extrap=TRUE, duplicate = "mean");
#Interpolation for TCCZ
TCCZ.interp<-interp(TCCZe$EASTING, TCCZe$NORTHING, TCCZe$TCCZ_top, xo=ea.v, yo=no.v, linear = TRUE, duplicate = "error")
#Interpolation
wll.interp<-llply(wll, interp.peryear)
tritiuml.interp<-llply(tritiuml, interp.peryear)
#Extrapolation
wll.interpext<-llply(wll, interpext.peryear, .progress = "text")
tritiuml.interpext<-llply(tritiuml, interpext.peryear, .progress = "text")
#create aquifer thickness
thickness<-llply(wll.interp,function(ll){list(x=ll$x, y=ll$y, z=as.matrix(0.3048*(ll$z-TCCZ.interp$z)))}, .progress = "text")
# image.plot(TCCZ.interp)
# image.plot(wll.interp['1996'][[1]],asp = 1)
# image.plot(tritiuml.interp['1992'][[1]],asp = 1)
# image.plot(wll.interpext['1988'][[1]],asp = 1)
# image.plot(tritiuml.interpext['1992'][[1]],asp = 1)
# contour(wll.interpext['1988'][[1]][[1]],wll.interpext['1988'][[1]][[2]],wll.interpext['1988'][[1]][[3]])
# contour(wll.interp['1988'][[1]][[1]],wll.interp['1988'][[1]][[2]],wll.interp['1988'][[1]][[3]])
#image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=thickness['1996'][[1]][[3]])
#Make into a dataframe for ggplot
# thicknessdf<-testgrid1
# for (jj in 1:length(thickness)) {
# thicknessdf[2+jj]<-as.vector(thickness[jj][[1]][[3]])
# names(thicknessdf)[2+jj]<-paste0("th",names(thickness)[jj])
# }
# gg96<-ggplot(data=thicknessdf,aes(x=EASTING,y=NORTHING)) + geom_tile(aes(fill=th1996))
# gg96
#contour(testthickness['1988'][[1]][[1]],testthickness['1988'][[1]][[2]],testthickness['1988'][[1]][[3]])
######################################
#5. Using loess and prediction.
# Test Use of the loess model and linear models for TCCZ prediction
pre3<-predict(TCCZ.loess1,newdata = testgrid1,se = TRUE ,na.action = na.omit)
pre3b<-predict(TCCZ.loess1b,newdata = testgrid1,se = TRUE ,na.action = na.omit)
pre3lm<-predict(TCCZ.lm,newdata = testgrid1,se = TRUE ,na.action = na.omit)
wll.loess<-llply(wll2, function(zzl) {loess(mean~EASTING+NORTHING, data=zzl,degree=1, span=0.75)})
wl.pred<-llply(wll.loess, function(m) {predict(m,newdata=testgrid1,se=TRUE)})
tritiuml.loess<-llply(tritiuml2, function(zzl) {loess(mean~EASTING+NORTHING, data=zzl,degree=1,span=0.75)})
tritium.pred<-llply(tritiuml.loess, function(m) {predict(m,newdata=testgrid1,se =TRUE)})
inv5<-testgrid1
inv5$TCCZfit<-as.vector(pre3$fit)
inv5$TCCZfitb<-as.vector(pre3b$fit)
inv5$TCCZfitlm<-as.vector(pre3lm$fit)
inv5$TCCZsefit<-as.vector(pre3$se.fit)
inv5$TCCZsefitb<-as.vector(pre3b$se.fit)
inv5$TCCZsefitlm<-as.vector(pre3lm$se.fit)
nbparam1<-5
for (kk in 1:length(tritiuml2)) {
t.loess<-loess(mean~EASTING+NORTHING, data=tritiuml2[[kk]],degree=1,span=0.5)
logt.loess<-loess(logmean~EASTING+NORTHING, data=tritiuml2[[kk]],degree=1,span=0.5)
predt<-predict(t.loess,newdata = testgrid1 ,se = TRUE)
predlogt<-predict(logt.loess,newdata = testgrid1 ,se = TRUE)
inv5[nbparam1*(kk-1)+9]<-as.vector(predt$fit)
names(inv5)[nbparam1*(kk-1)+9]<-paste0("T",names(tritiuml2)[kk])
inv5[nbparam1*(kk-1)+10]<-as.vector(predlogt$fit)
names(inv5)[nbparam1*(kk-1)+10]<-paste0("LogT",names(tritiuml2)[kk])
#inv5[nbparam1*(kk-1)+10]<-pdret$se.fit
#names(inv5)[nbparam1*(kk-1)+10]<-paste0("seT",names(tritiuml2)[kk])
w.loess<-loess(mean~EASTING+NORTHING, data=wll2[[kk]],degree=1, span=0.5)
predw<-predict(w.loess,newdata = testgrid1 ,se = TRUE)
inv5[nbparam1*(kk-1)+11]<-as.vector(predw$fit)
names(inv5)[nbparam1*(kk-1)+11]<-paste0("w",names(wll2)[kk])
inv5[nbparam1*(kk-1)+12]<-as.vector(predw$fit)-inv5$TCCZfitb
names(inv5)[nbparam1*(kk-1)+12]<-paste0("h",names(wll2)[kk])
inv5[nbparam1*(kk-1)+12][inv5[nbparam1*(kk-1)+12]<1]<-NA
inv5[nbparam1*(kk-1)+13]<-inv5[nbparam1*(kk-1)+9]*inv5[nbparam1*(kk-1)+12]
names(inv5)[nbparam1*(kk-1)+13]<-paste0("ch",names(wll2)[kk])
}
# ggtest2<-ggplot(inv5, aes(x=EASTING,y=NORTHING)) + geom_tile(aes(fill=TCCZfitb))
# ggtest2<-ggtest2+scale_colour_gradient(limits=range(inv5$TCCZfitb, na.rm = TRUE), low="red", high="white")
# print(ggtest2)
#
# ggtest3<-ggplot(inv5, aes(x=EASTING,y=NORTHING)) + geom_tile(aes(fill=TCCZfitb), colour = "white",linetype = 0) + scale_fill_gradient(low = "white",high = "red")
# print(ggtest3)
ggtest4<-ggplot(inv5, aes(x=EASTING,y=NORTHING, z=T1996)) + stat_contour(aes(colour = ..level..))
#ggtest4<-ggtest4+scale_colour_gradient(limits=range(inv5$TCCZfitb, na.rm = TRUE), low="red", high="white")
print(ggtest4)
qplot(x=h1994,y=T1994,data=inv5)
inventory5<-data.frame(MYEAR=seq(1988,2011,length=24))
for (jj2 in 1:length(inventory5$MYEAR)) {
inventory5$meanch[jj2]<-mean(inv5[[nbparam1*(jj2-1)+12]], na.rm=TRUE)
inventory5$medianch[jj2]<-median(inv5[[nbparam1*(jj2-1)+12]], na.rm=TRUE)
inventory5$sdch[jj2]<-sd(inv5[[nbparam1*(jj2-1)+12]], na.rm=TRUE)
}
inventory5$t<-area.dom*porosity.mean*inventory5$meanch*1e-9*.3048
inventory5$tmed<-area.dom*porosity.mean*inventory5$medianch*1e-9*.3048
inventory.final<-merge(inventoryja, inventory5, by="MYEAR")
final.plot<-ggplot(data=inventory.final, aes(x=MYEAR))
final.plot<- final.plot +geom_line(aes(y=inventory1), colour='blue')
final.plot<- final.plot +geom_line(aes(y=inventory1b), colour='red')
final.plot<- final.plot +geom_line(aes(y=inventory1lm), colour='green')
final.plot<- final.plot +geom_line(aes(y=t), colour='orange')
#final.plot<- final.plot +geom_line(aes(y=tmed), colour='black')
final.plot<- final.plot + scale_y_log10()
#scale_y_continuous(trans=log2_trans())
final.plot<-final.plot+labs(title="Tritium Inventory")+xlab("Year")+ylab("Tritium (Ci)")
print(final.plot)
final.plot2<-ggplot(data=inventory.final, aes(x=MYEAR))
final.plot2<- final.plot2 +geom_point(aes(y=inventory1), colour='blue')
final.plot2<- final.plot2 +geom_point(aes(y=inventory1b), colour='red')
final.plot2<- final.plot2 +geom_point(aes(y=inventory1lm), colour='green')
final.plot2<- final.plot2 +geom_point(aes(y=t), colour='orange')
final.plot2<- final.plot2 + scale_y_log10()
final.plot2<-final.plot2+labs(title="Tritium Inventory")+xlab("Year")+ylab("Tritium (Ci)")
print(final.plot2)
################################################
################################################
################################################
#Leftovers
#
#
#Call loess
#second order polynomial
#TCCZ.loess2 = loess(TCCZ_top~EASTING+NORTHING, data = TCCZe, degree = 2, span = 0.25)
#predict(TCCZ.loess1,newdata = wlavg[,c("EASTING","NORTHING")], na.action = na.omit)
############################################
#3. a)Computation with the average water level
# thavg<-wlavg
# pre1<-predict(TCCZ.loess1,newdata = wlavg[,c("EASTING","NORTHING")],se=TRUE)
# pre1b<-predict(TCCZ.loess1b,newdata = wlavg[,c("EASTING","NORTHING")],se = TRUE)
# pre1lm<-predict(TCCZ.lm,newdata = wlavg[,c("EASTING","NORTHING")],se = TRUE)
#
# thavg$TCCZ.fit<-pre1$fit
# thavg$TCCZ.se.fit<-pre1$se.fit
# thavg$TCCZ.fitb<-pre1b$fit
# thavg$TCCZ.se.fitb<-pre1b$se.fit
# thavg$TCCZ.fitlm<-pre1lm$fit
# thavg$TCCZ.se.fitlm<-pre1lm$se.fit
#Compute the thickness in feet
# thavg$h<-thavg$mean-thavg$TCCZ.fit
# Replace negative values with NA
# thavg$h[thavg$h<=0]<-NA
#Remove NAs
# thavg.clean<-thavg[!is.na(thavg$h),]
#####################################
#Early inventory calculations
#inventoryjahb<-merge(tritiumavg[tritiumavg$MYEAR>=1984,],th.avg.peryearhb,by="MYEAR")
#inventoryjahlm<-merge(tritiumavg[tritiumavg$MYEAR>=1984,],th.avg.peryearhlm,by="MYEAR")
#inventoryja[,c("count","h.mean","h.median","h.sd","h.mad","h.min","h.max")]<-th.avg.peryear[, c("count","h.mean","h.median","h.sd","h.mad","h.min","h.max")]
#inventoryjah$inventory<-area.dom*porosity.mean*inventoryjah$h.mean*inventoryjah$mean*.3048*1e-9
#inventoryjahb$inventory<-area.dom*porosity.mean*inventoryjahb$hb.mean*inventoryjahb$mean*.3048*1e-9
#inventoryjahlm$inventory<-area.dom*porosity.mean*inventoryjahlm$hlm.mean*inventoryjahlm$mean*.3048*1e-9
#
#selectyear<-function (x,y) {subset(x, x$MYEAR == y)}
#tritium.1984<-selectyear(tritium, 1984)
#TCCZ.lm<-lm(TCCZ_top~UTM_E+UTM_N,data=TCCZe,na.action=na.omit)
#tritium.interp<-interp(tritium$EASTING, tritium$NORTHING, tritium$mean, xo=seq(ea.min, ea.max, length = ea.b), yo=seq(no.min, no.max, length = no.b), linear = TRUE, extrap=FALSE, duplicate = "mean")
#wlavg.lm<-lm(mean~EASTING+NORTHING, data =wlavg)
#prewllm<-predict(wlavg.lm,newdata = testgrid1,se = TRUE ,na.action = na.omit)
# #Test interpolation debugging with smaller matrix
# #number of breaks
# ea.b2<-15
# no.b2<-20
#
# #ea
# ea.v2<-seq(ea.min, ea.max, length = ea.b2)
# no.v2<-seq(no.min, no.max, length = no.b2)
#
# wlavg.interp2<-interp(wlavg$EASTING, wlavg$NORTHING, wlavg$mean, xo=ea.v2, yo=no.v2, linear = TRUE, extrap=FALSE, duplicate = "mean")
# # Flip matrix (upside-down)
# flip.matrix <- function(x) {
# mirror.matrix(rotate180.matrix(x))
# }
# # Rotate matrix 180 clockworks
# rotate180.matrix <- function(x) {
# xx <- rev(x);
# dim(xx) <- dim(x);
# xx;
# }
#
# # Rotate matrix 270 clockworks
# rotate270.matrix <- function(x) {
# mirror.matrix(t(x))
# }
# # Debug Statements for interp output
# tx<-matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE)
# ty<-matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE)
# tz<-wlavg.interp2$z
# tzprime<-t(wlavg.interp2$z)
# tz90<-rotate90.matrix(tz)
#TCCZ.loess1.interp<-list(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=matrix(pre3$fit,nrow=60, ncol=50, byrow=TRUE))
#TCCZ.loess1b.interp<-list(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=matrix(pre3$fit,nrow=60, ncol=50, byrow=TRUE))
# image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=matrix(pre3$fit,nrow=60, ncol=50, byrow=TRUE))
# image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=matrix(pre3b$fit,nrow=60, ncol=50, byrow=TRUE))
# image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=matrix(pre3lm$fit,nrow=60, ncol=50, byrow=TRUE))
#
# image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=rotate270.matrix(wlavg.interp$z)-matrix(pre3$fit,nrow=60, ncol=50, byrow=TRUE))
# image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=rotate90.matrix(wlavg.interp$z)-matrix(pre3b$fit,nrow=60, ncol=50, byrow=TRUE))
# image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=rotate90.matrix(wlavg.interp$z)-matrix(pre3lm$fit,nrow=60, ncol=50, byrow=TRUE))
#
# image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=wlavg.interp$z-matrix(pre3$fit,nrow=60, ncol=50, byrow=TRUE))
# image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=wlavg.interp$z-matrix(pre3b$fit,nrow=60, ncol=50, byrow=TRUE))
# image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=wlavg.interp$z-matrix(pre3lm$fit,nrow=60, ncol=50, byrow=TRUE))
# image.plot(wlavg.interp)
# dim(wlavg.interp$z)
# image.plot(TCCZ.loess1.interp)
# points(TCCZe$EASTING, TCCZe$NORTHING, pch=19)
# text(TCCZe$EASTING, TCCZe$NORTHING, labels=TCCZe$STATION_ID)
# wlavg.interp<-interp(wlavg$EASTING, wlavg$NORTHING, wlavg$mean, xo=ea.v, yo=no.v, linear = TRUE, duplicate = "mean")
####################################
#Loop test
#
#
# t.loess<-loess(mean~EASTING+NORTHING, data=tritiuml2[['1996']],degree=1,span=0.5)
# logt.loess<-loess(logmean~EASTING+NORTHING, data=tritiuml2[['1996']],degree=1,span=0.5)
# predt<-predict(t.loess,newdata = testgrid1 ,se = TRUE)
# predlogt<-predict(logt.loess,newdata = testgrid1 ,se = TRUE)
# T1996<-as.vector(predt$fit)
# Tcl1996<-T1996
# Tcl1996[Tcl1996<0]<-NA
# LogT1996<-as.vector(predlogt$fit)
# Tprime1996<-exp(LogT1996)
# w.loess<-loess(mean~EASTING+NORTHING, data=wll2[['1996']],degree=1, span=0.5)
# predw<-predict(w.loess,newdata = testgrid1 ,se = TRUE)
# w1996<-as.vector(predw$fit)
# h1996<-as.vector(predw$fit)-inv5$TCCZfitb
# h1996[h1996<1]<-NA
# ch1996<-T1996*h1996
# chcl1996<-Tcl1996*h1996
# chprime1996<-Tprime1996*h1996
#
# t1<-area.dom*porosity.mean*mean(ch1996,na.rm=TRUE)*1e-9*.3048
# tcl1<-area.dom*porosity.mean*mean(chcl1996,na.rm=TRUE)*1e-9*.3048
# tlog1<-area.dom*porosity.mean*mean(chprime1996,na.rm=TRUE)*1e-9*.3048
# #image and contour plot for the TCCZ
# image.plot(TCCZ.interp)
# contour(TCCZ.interp$x,TCCZ.interp$y,TCCZ.interp$z)
# points(f3basin27$UTM_E,f3basin27$UTM_N,type="l")
# points(TCCZe$EASTING, TCCZe$NORTHING, pch=19)
# text(TCCZe$UTM_E, TCCZe$UTM_N, labels=TCCZe$UWI)
#
# #image and contour plot for the water level
# image.plot(wlavg.interp)
# contour(wlavg.interp$x,wlavg.interp$y,wlavg.interp$z)
#
# #
# #image and contour plot for the water level
# #image.plot(wlavg.interp)
# contour(wlavg.interp$x,wlavg.interp$y,wlavg.interp$z-TCCZ.interp$z, asp = 1, bty ="n")
# #xlim=c(ea.min,ea.max), ylim=c(no.min,no.max),
# #points(f3basin$UTM_E,f3basin$UTM_N,type="l")
# points(f3basin27$UTM_E,f3basin27$UTM_N,type="l")
# points(wlavg$EASTING, wlavg$NORTHING, pch=19)
# text(wlavg$EASTING, wlavg$NORTHING, labels=wlavg$STATION_ID)
#Define n zones
#Compute trend planes in these zones for waterlevels and TCCZ
#Do the difference to get the thickness | /analysis/raw_scripts/old_just_avg_past0.R | no_license | artwr/srs_work | R | false | false | 20,717 | r | #Simple Computation
require(geoR)
require(geoRglm)
require(fields)
require(akima)
require(splancs)
require(plyr)
require(ggplot2)
require(scales)
# setwd("D:/CodeProjects/R/R_srs/inventory")
# setwd("D:/work/Code/srs_work/inventory")
#Clear all, set na.exclude
rm(list=ls())
#options(na.action="na.omit")
#options("na.action")
###################
#0. Some needed functions
#
#Selected matrix functions from Henrik Bengtsson
# https://stat.ethz.ch/pipermail/r-help/2003-October/040484.html
# http://www1.maths.lth.se/help/R/image/image.R
# Mirror matrix (left-right)
mirror.matrix <- function(x) {
xx <- as.data.frame(x);
xx <- rev(xx);
xx <- as.matrix(xx);
xx;
}
# Rotate matrix 90 clockworks
rotate90.matrix <- function(x) {
t(mirror.matrix(x))
}
# Rotate matrix 180 clockworks
rotate180.matrix <- function(x) {
xx <- rev(x);
dim(xx) <- dim(x);
xx;
}
# Rotate matrix 270 clockworks
rotate270.matrix <- function(x) {
mirror.matrix(t(x))
}
#################################
#1.
#load data
tritium<-readRDS("../SRS_data/tritium.rdata")
tritiumavg<-readRDS("../SRS_data/tritiumavg.rdata")
wl<-readRDS("../SRS_data/wl.rdata")
TCCZe_all<-readRDS("../TCCZ_krig/TCCZ/TCCZ_o.rdata")
TCCZe<-TCCZe_all[!is.na(TCCZe_all$TCCZ_top),]
rm(TCCZe_all)
wlavg<-readRDS("../SRS_data/wlavg.rdata")
#basin coords for plotting if needed
#f3basin<-readRDS("../basin_coords/f3basin.rdata")
#f3basin27<-readRDS("../basin_coords/f3basin27.rdata")
#Add log transform
tritium$logmean<-log(tritium$mean)
tritium$log10mean<-log10(tritium$mean)
#
#Split per measurement year
wll<-split(wl,wl$MYEAR)
tritiuml<-split(tritium,tritium$MYEAR)
#Select 1988 and after
wll2<-wll[5:length(wll)]
tritiuml2<-tritiuml[10:length(tritiuml)]
#names(tritiuml2)
#########################################################
#2.
#Define interpolation domain and compute area, define other parameters
#Boundaries
no.min<-3680930
no.max<-3682110
ea.min<-436175
ea.max<-437155
#number of breaks ~ 20 m apart
ea.b<-1+(ea.max-ea.min)/20
no.b<-1+(no.max-no.min)/20
#Create the vectors
ea.v<-seq(ea.min, ea.max, length = ea.b)
no.v<-seq(no.min, no.max, length = no.b)
#Create the expandgrid df for predictions
testgrid1<-expand.grid(EASTING=ea.v, NORTHING=no.v)
#Create polygon to compute area.
d.ea<-c(ea.min,ea.min,ea.max,ea.max,ea.min)
d.no<-c(no.min,no.max,no.max,no.min,no.min)
pp<-cbind(d.ea,d.no)
#plot(pp, type="b")
area.dom<-areapl(pp)
#Define porosity value
porosity.mean<-.3
porosity.sd<-.03
#########################################################
#3. Do simple average calculations on the well positions only
# \int_D C h p dx dy = \int_D dx dy * \hat{C} * \hat{h} * porosity p
#Thickness is computed using a linear prediction
#
#Local polynomial fit (1st order) and linear model
TCCZ.loess1<-loess(TCCZ_top~EASTING+NORTHING,data=TCCZe,degree= 1, span= 0.25)
TCCZ.loess1b<-loess(TCCZ_top~EASTING+NORTHING,data=TCCZe,degree= 1,span= 0.4)
TCCZ.lm<-lm(TCCZ_top~EASTING+NORTHING,data=TCCZe)
#Create the aquifer thickness data frame
thperyear<-wl
#Add the TCCZ values predicted by the linear models
# with standard error estimates
options(na.action="na.exclude")
pre2<-predict(TCCZ.loess1,newdata = wl[,c("EASTING","NORTHING")],se = TRUE)
pre2b<-predict(TCCZ.loess1b,newdata = wl[,c("EASTING","NORTHING")],se = TRUE)
pre2lm<-predict(TCCZ.lm,newdata = wl[,c("EASTING","NORTHING")],se = TRUE)
#
thperyear$TCCZ.fit<-pre2$fit
thperyear$TCCZ.se.fit<-pre2$se.fit
thperyear$TCCZ.fitb<-pre2b$fit
thperyear$TCCZ.se.fitb<-pre2b$se.fit
thperyear$TCCZ.fitlm<-pre2lm$fit
thperyear$TCCZ.se.fitlm<-pre2lm$se.fit
#Compute the thickness in feet
thperyear$h<-thperyear$mean-thperyear$TCCZ.fit
thperyear$hb<-thperyear$mean-thperyear$TCCZ.fitb
thperyear$hlm<-thperyear$mean-thperyear$TCCZ.fitlm
# Replace negative values with NA
thperyear$h[thperyear$h<=0]<-NA
thperyear$hb[thperyear$hb<=0]<-NA
thperyear$hlm[thperyear$hlm<=0]<-NA
#Remove NAs
thperyear.cleanh<-thperyear[!is.na(thperyear$h),]
thperyear.cleanhb<-thperyear[!is.na(thperyear$hb),]
thperyear.cleanhlm<-thperyear[!is.na(thperyear$hlm),]
#Compute the avg per year
th.avg.peryearh<-ddply(thperyear.cleanh, c('MYEAR'), function(x) c(counth=nrow(x),h.mean=mean(x$h),h.median=median(x$h),h.sd=sd(x$h),h.mad=mad(x$h),h.min=min(x$h),h.max=max(x$h)))
th.avg.peryearhb<-ddply(thperyear.cleanhb, c('MYEAR'), function(x) c(counthb=nrow(x),hb.mean=mean(x$hb),hb.median=median(x$hb),hb.sd=sd(x$hb),hb.mad=mad(x$hb),hb.min=min(x$hb),hb.max=max(x$hb)))
th.avg.peryearhlm<-ddply(thperyear.cleanhlm, c('MYEAR'), function(x) c(counthlm=nrow(x),hlm.mean=mean(x$hlm),hlm.median=median(x$hlm),hlm.sd=sd(x$hlm),hlm.mad=mad(x$hlm),hlm.min=min(x$hlm),hlm.max=max(x$hlm)))
#Create inventory df
inventoryja<-merge(tritiumavg[tritiumavg$MYEAR>=1984,],th.avg.peryearh,by="MYEAR")
inventoryja<-merge(inventoryja,th.avg.peryearhb,by="MYEAR")
inventoryja<-merge(inventoryja,th.avg.peryearhlm,by="MYEAR")
#Compute the inventory
inventoryja$inventory1<-area.dom*porosity.mean*inventoryja$h.mean*inventoryja$mean*.3048*1e-9
inventoryja$inventory1b<-area.dom*porosity.mean*inventoryja$hb.mean*inventoryja$mean*.3048*1e-9
inventoryja$inventory1lm<-area.dom*porosity.mean*inventoryja$hlm.mean*inventoryja$mean*.3048*1e-9
rm(thperyear.cleanh)
rm(thperyear.cleanhb)
rm(thperyear.cleanhlm)
rm(th.avg.peryearh)
rm(th.avg.peryearhb)
rm(th.avg.peryearhlm)
#Draft ggplot for the inventory
#qplot(MYEAR, inventory, data=inventoryja)
ja.plot<-ggplot(data=inventoryja, aes(x=MYEAR))
ja.plot<- ja.plot +geom_line(aes(y=inventory1), colour='blue')
ja.plot<- ja.plot +geom_line(aes(y=inventory1b), colour='red')
ja.plot<- ja.plot +geom_line(aes(y=inventory1lm), colour='green')
ja.plot<-ja.plot+labs(title="Tritium Inventory")+xlab("Year")+ylab("Tritium (Ci)")
ja.plot
# log2 scaling of the y axis (with visually-equal spacing)
ja.plotlog1<- ja.plot + scale_y_continuous(trans=log10_trans())
ja.plotlog1
# log2 coordinate transformation (with visually-diminishing spacing)
ja.plotlog2<- ja.plot + coord_trans(y="log2")
ja.plotlog2
saveRDS(inventoryja, file = "inventoryja.rdata")
inventoryja.csv<-inventoryja[,c("MYEAR","inventory1","inventory1b","inventory1lm")]
write.csv(inventoryja.csv, file="inventoryja.csv")
########################################
#4. Do the calculation again, this time with interpolation on a regular grid.
# We should compute both
# \int_D C * h * p dx dy = \int_D C dx dy/area.dom * \int_D h dx dy/area.dom * p
# = (\Sigma C)/N points on the regular grid * (\Sigma h)/N * p
# and
# \int_D C h dx dy = \int_D C*h dx dy * p
#The later will allow for the matching of concentration and aquifer thickness.
###########################################
#Define short hand for AKIMA interpolation functions without and with extrapolation
interp.peryear<- function(x) interp(x$EASTING, x$NORTHING, x$mean, xo=ea.v, yo=no.v, linear = TRUE, extrap=FALSE, duplicate = "mean");
interpext.peryear<- function(x) interp(x$EASTING, x$NORTHING, x$mean, xo=ea.v, yo=no.v, linear = FALSE, extrap=TRUE, duplicate = "mean");
#Interpolation for TCCZ
TCCZ.interp<-interp(TCCZe$EASTING, TCCZe$NORTHING, TCCZe$TCCZ_top, xo=ea.v, yo=no.v, linear = TRUE, duplicate = "error")
#Interpolation
wll.interp<-llply(wll, interp.peryear)
tritiuml.interp<-llply(tritiuml, interp.peryear)
#Extrapolation
wll.interpext<-llply(wll, interpext.peryear, .progress = "text")
tritiuml.interpext<-llply(tritiuml, interpext.peryear, .progress = "text")
#create aquifer thickness
thickness<-llply(wll.interp,function(ll){list(x=ll$x, y=ll$y, z=as.matrix(0.3048*(ll$z-TCCZ.interp$z)))}, .progress = "text")
# image.plot(TCCZ.interp)
# image.plot(wll.interp['1996'][[1]],asp = 1)
# image.plot(tritiuml.interp['1992'][[1]],asp = 1)
# image.plot(wll.interpext['1988'][[1]],asp = 1)
# image.plot(tritiuml.interpext['1992'][[1]],asp = 1)
# contour(wll.interpext['1988'][[1]][[1]],wll.interpext['1988'][[1]][[2]],wll.interpext['1988'][[1]][[3]])
# contour(wll.interp['1988'][[1]][[1]],wll.interp['1988'][[1]][[2]],wll.interp['1988'][[1]][[3]])
#image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=thickness['1996'][[1]][[3]])
#Make into a dataframe for ggplot
# thicknessdf<-testgrid1
# for (jj in 1:length(thickness)) {
# thicknessdf[2+jj]<-as.vector(thickness[jj][[1]][[3]])
# names(thicknessdf)[2+jj]<-paste0("th",names(thickness)[jj])
# }
# gg96<-ggplot(data=thicknessdf,aes(x=EASTING,y=NORTHING)) + geom_tile(aes(fill=th1996))
# gg96
#contour(testthickness['1988'][[1]][[1]],testthickness['1988'][[1]][[2]],testthickness['1988'][[1]][[3]])
######################################
#5. Using loess and prediction.
# Test Use of the loess model and linear models for TCCZ prediction
pre3<-predict(TCCZ.loess1,newdata = testgrid1,se = TRUE ,na.action = na.omit)
pre3b<-predict(TCCZ.loess1b,newdata = testgrid1,se = TRUE ,na.action = na.omit)
pre3lm<-predict(TCCZ.lm,newdata = testgrid1,se = TRUE ,na.action = na.omit)
wll.loess<-llply(wll2, function(zzl) {loess(mean~EASTING+NORTHING, data=zzl,degree=1, span=0.75)})
wl.pred<-llply(wll.loess, function(m) {predict(m,newdata=testgrid1,se=TRUE)})
tritiuml.loess<-llply(tritiuml2, function(zzl) {loess(mean~EASTING+NORTHING, data=zzl,degree=1,span=0.75)})
tritium.pred<-llply(tritiuml.loess, function(m) {predict(m,newdata=testgrid1,se =TRUE)})
inv5<-testgrid1
inv5$TCCZfit<-as.vector(pre3$fit)
inv5$TCCZfitb<-as.vector(pre3b$fit)
inv5$TCCZfitlm<-as.vector(pre3lm$fit)
inv5$TCCZsefit<-as.vector(pre3$se.fit)
inv5$TCCZsefitb<-as.vector(pre3b$se.fit)
inv5$TCCZsefitlm<-as.vector(pre3lm$se.fit)
nbparam1<-5
for (kk in 1:length(tritiuml2)) {
t.loess<-loess(mean~EASTING+NORTHING, data=tritiuml2[[kk]],degree=1,span=0.5)
logt.loess<-loess(logmean~EASTING+NORTHING, data=tritiuml2[[kk]],degree=1,span=0.5)
predt<-predict(t.loess,newdata = testgrid1 ,se = TRUE)
predlogt<-predict(logt.loess,newdata = testgrid1 ,se = TRUE)
inv5[nbparam1*(kk-1)+9]<-as.vector(predt$fit)
names(inv5)[nbparam1*(kk-1)+9]<-paste0("T",names(tritiuml2)[kk])
inv5[nbparam1*(kk-1)+10]<-as.vector(predlogt$fit)
names(inv5)[nbparam1*(kk-1)+10]<-paste0("LogT",names(tritiuml2)[kk])
#inv5[nbparam1*(kk-1)+10]<-pdret$se.fit
#names(inv5)[nbparam1*(kk-1)+10]<-paste0("seT",names(tritiuml2)[kk])
w.loess<-loess(mean~EASTING+NORTHING, data=wll2[[kk]],degree=1, span=0.5)
predw<-predict(w.loess,newdata = testgrid1 ,se = TRUE)
inv5[nbparam1*(kk-1)+11]<-as.vector(predw$fit)
names(inv5)[nbparam1*(kk-1)+11]<-paste0("w",names(wll2)[kk])
inv5[nbparam1*(kk-1)+12]<-as.vector(predw$fit)-inv5$TCCZfitb
names(inv5)[nbparam1*(kk-1)+12]<-paste0("h",names(wll2)[kk])
inv5[nbparam1*(kk-1)+12][inv5[nbparam1*(kk-1)+12]<1]<-NA
inv5[nbparam1*(kk-1)+13]<-inv5[nbparam1*(kk-1)+9]*inv5[nbparam1*(kk-1)+12]
names(inv5)[nbparam1*(kk-1)+13]<-paste0("ch",names(wll2)[kk])
}
# ggtest2<-ggplot(inv5, aes(x=EASTING,y=NORTHING)) + geom_tile(aes(fill=TCCZfitb))
# ggtest2<-ggtest2+scale_colour_gradient(limits=range(inv5$TCCZfitb, na.rm = TRUE), low="red", high="white")
# print(ggtest2)
#
# ggtest3<-ggplot(inv5, aes(x=EASTING,y=NORTHING)) + geom_tile(aes(fill=TCCZfitb), colour = "white",linetype = 0) + scale_fill_gradient(low = "white",high = "red")
# print(ggtest3)
ggtest4<-ggplot(inv5, aes(x=EASTING,y=NORTHING, z=T1996)) + stat_contour(aes(colour = ..level..))
#ggtest4<-ggtest4+scale_colour_gradient(limits=range(inv5$TCCZfitb, na.rm = TRUE), low="red", high="white")
print(ggtest4)
qplot(x=h1994,y=T1994,data=inv5)
inventory5<-data.frame(MYEAR=seq(1988,2011,length=24))
for (jj2 in 1:length(inventory5$MYEAR)) {
inventory5$meanch[jj2]<-mean(inv5[[nbparam1*(jj2-1)+12]], na.rm=TRUE)
inventory5$medianch[jj2]<-median(inv5[[nbparam1*(jj2-1)+12]], na.rm=TRUE)
inventory5$sdch[jj2]<-sd(inv5[[nbparam1*(jj2-1)+12]], na.rm=TRUE)
}
inventory5$t<-area.dom*porosity.mean*inventory5$meanch*1e-9*.3048
inventory5$tmed<-area.dom*porosity.mean*inventory5$medianch*1e-9*.3048
inventory.final<-merge(inventoryja, inventory5, by="MYEAR")
final.plot<-ggplot(data=inventory.final, aes(x=MYEAR))
final.plot<- final.plot +geom_line(aes(y=inventory1), colour='blue')
final.plot<- final.plot +geom_line(aes(y=inventory1b), colour='red')
final.plot<- final.plot +geom_line(aes(y=inventory1lm), colour='green')
final.plot<- final.plot +geom_line(aes(y=t), colour='orange')
#final.plot<- final.plot +geom_line(aes(y=tmed), colour='black')
final.plot<- final.plot + scale_y_log10()
#scale_y_continuous(trans=log2_trans())
final.plot<-final.plot+labs(title="Tritium Inventory")+xlab("Year")+ylab("Tritium (Ci)")
print(final.plot)
final.plot2<-ggplot(data=inventory.final, aes(x=MYEAR))
final.plot2<- final.plot2 +geom_point(aes(y=inventory1), colour='blue')
final.plot2<- final.plot2 +geom_point(aes(y=inventory1b), colour='red')
final.plot2<- final.plot2 +geom_point(aes(y=inventory1lm), colour='green')
final.plot2<- final.plot2 +geom_point(aes(y=t), colour='orange')
final.plot2<- final.plot2 + scale_y_log10()
final.plot2<-final.plot2+labs(title="Tritium Inventory")+xlab("Year")+ylab("Tritium (Ci)")
print(final.plot2)
################################################
################################################
################################################
#Leftovers
#
#
#Call loess
#second order polynomial
#TCCZ.loess2 = loess(TCCZ_top~EASTING+NORTHING, data = TCCZe, degree = 2, span = 0.25)
#predict(TCCZ.loess1,newdata = wlavg[,c("EASTING","NORTHING")], na.action = na.omit)
############################################
#3. a)Computation with the average water level
# thavg<-wlavg
# pre1<-predict(TCCZ.loess1,newdata = wlavg[,c("EASTING","NORTHING")],se=TRUE)
# pre1b<-predict(TCCZ.loess1b,newdata = wlavg[,c("EASTING","NORTHING")],se = TRUE)
# pre1lm<-predict(TCCZ.lm,newdata = wlavg[,c("EASTING","NORTHING")],se = TRUE)
#
# thavg$TCCZ.fit<-pre1$fit
# thavg$TCCZ.se.fit<-pre1$se.fit
# thavg$TCCZ.fitb<-pre1b$fit
# thavg$TCCZ.se.fitb<-pre1b$se.fit
# thavg$TCCZ.fitlm<-pre1lm$fit
# thavg$TCCZ.se.fitlm<-pre1lm$se.fit
#Compute the thickness in feet
# thavg$h<-thavg$mean-thavg$TCCZ.fit
# Replace negative values with NA
# thavg$h[thavg$h<=0]<-NA
#Remove NAs
# thavg.clean<-thavg[!is.na(thavg$h),]
#####################################
#Early inventory calculations
#inventoryjahb<-merge(tritiumavg[tritiumavg$MYEAR>=1984,],th.avg.peryearhb,by="MYEAR")
#inventoryjahlm<-merge(tritiumavg[tritiumavg$MYEAR>=1984,],th.avg.peryearhlm,by="MYEAR")
#inventoryja[,c("count","h.mean","h.median","h.sd","h.mad","h.min","h.max")]<-th.avg.peryear[, c("count","h.mean","h.median","h.sd","h.mad","h.min","h.max")]
#inventoryjah$inventory<-area.dom*porosity.mean*inventoryjah$h.mean*inventoryjah$mean*.3048*1e-9
#inventoryjahb$inventory<-area.dom*porosity.mean*inventoryjahb$hb.mean*inventoryjahb$mean*.3048*1e-9
#inventoryjahlm$inventory<-area.dom*porosity.mean*inventoryjahlm$hlm.mean*inventoryjahlm$mean*.3048*1e-9
#
#selectyear<-function (x,y) {subset(x, x$MYEAR == y)}
#tritium.1984<-selectyear(tritium, 1984)
#TCCZ.lm<-lm(TCCZ_top~UTM_E+UTM_N,data=TCCZe,na.action=na.omit)
#tritium.interp<-interp(tritium$EASTING, tritium$NORTHING, tritium$mean, xo=seq(ea.min, ea.max, length = ea.b), yo=seq(no.min, no.max, length = no.b), linear = TRUE, extrap=FALSE, duplicate = "mean")
#wlavg.lm<-lm(mean~EASTING+NORTHING, data =wlavg)
#prewllm<-predict(wlavg.lm,newdata = testgrid1,se = TRUE ,na.action = na.omit)
# #Test interpolation debugging with smaller matrix
# #number of breaks
# ea.b2<-15
# no.b2<-20
#
# #ea
# ea.v2<-seq(ea.min, ea.max, length = ea.b2)
# no.v2<-seq(no.min, no.max, length = no.b2)
#
# wlavg.interp2<-interp(wlavg$EASTING, wlavg$NORTHING, wlavg$mean, xo=ea.v2, yo=no.v2, linear = TRUE, extrap=FALSE, duplicate = "mean")
# # Flip matrix (upside-down)
# flip.matrix <- function(x) {
# mirror.matrix(rotate180.matrix(x))
# }
# # Rotate matrix 180 clockworks
# rotate180.matrix <- function(x) {
# xx <- rev(x);
# dim(xx) <- dim(x);
# xx;
# }
#
# # Rotate matrix 270 clockworks
# rotate270.matrix <- function(x) {
# mirror.matrix(t(x))
# }
# # Debug Statements for interp output
# tx<-matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE)
# ty<-matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE)
# tz<-wlavg.interp2$z
# tzprime<-t(wlavg.interp2$z)
# tz90<-rotate90.matrix(tz)
#TCCZ.loess1.interp<-list(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=matrix(pre3$fit,nrow=60, ncol=50, byrow=TRUE))
#TCCZ.loess1b.interp<-list(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=matrix(pre3$fit,nrow=60, ncol=50, byrow=TRUE))
# image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=matrix(pre3$fit,nrow=60, ncol=50, byrow=TRUE))
# image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=matrix(pre3b$fit,nrow=60, ncol=50, byrow=TRUE))
# image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=matrix(pre3lm$fit,nrow=60, ncol=50, byrow=TRUE))
#
# image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=rotate270.matrix(wlavg.interp$z)-matrix(pre3$fit,nrow=60, ncol=50, byrow=TRUE))
# image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=rotate90.matrix(wlavg.interp$z)-matrix(pre3b$fit,nrow=60, ncol=50, byrow=TRUE))
# image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=rotate90.matrix(wlavg.interp$z)-matrix(pre3lm$fit,nrow=60, ncol=50, byrow=TRUE))
#
# image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=wlavg.interp$z-matrix(pre3$fit,nrow=60, ncol=50, byrow=TRUE))
# image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=wlavg.interp$z-matrix(pre3b$fit,nrow=60, ncol=50, byrow=TRUE))
# image.plot(x=matrix(testgrid1$EASTING,nrow=60, ncol=50, byrow=TRUE),y=matrix(testgrid1$NORTHING,nrow=60, ncol=50, byrow=TRUE),z=wlavg.interp$z-matrix(pre3lm$fit,nrow=60, ncol=50, byrow=TRUE))
# image.plot(wlavg.interp)
# dim(wlavg.interp$z)
# image.plot(TCCZ.loess1.interp)
# points(TCCZe$EASTING, TCCZe$NORTHING, pch=19)
# text(TCCZe$EASTING, TCCZe$NORTHING, labels=TCCZe$STATION_ID)
# wlavg.interp<-interp(wlavg$EASTING, wlavg$NORTHING, wlavg$mean, xo=ea.v, yo=no.v, linear = TRUE, duplicate = "mean")
####################################
#Loop test
#
#
# t.loess<-loess(mean~EASTING+NORTHING, data=tritiuml2[['1996']],degree=1,span=0.5)
# logt.loess<-loess(logmean~EASTING+NORTHING, data=tritiuml2[['1996']],degree=1,span=0.5)
# predt<-predict(t.loess,newdata = testgrid1 ,se = TRUE)
# predlogt<-predict(logt.loess,newdata = testgrid1 ,se = TRUE)
# T1996<-as.vector(predt$fit)
# Tcl1996<-T1996
# Tcl1996[Tcl1996<0]<-NA
# LogT1996<-as.vector(predlogt$fit)
# Tprime1996<-exp(LogT1996)
# w.loess<-loess(mean~EASTING+NORTHING, data=wll2[['1996']],degree=1, span=0.5)
# predw<-predict(w.loess,newdata = testgrid1 ,se = TRUE)
# w1996<-as.vector(predw$fit)
# h1996<-as.vector(predw$fit)-inv5$TCCZfitb
# h1996[h1996<1]<-NA
# ch1996<-T1996*h1996
# chcl1996<-Tcl1996*h1996
# chprime1996<-Tprime1996*h1996
#
# t1<-area.dom*porosity.mean*mean(ch1996,na.rm=TRUE)*1e-9*.3048
# tcl1<-area.dom*porosity.mean*mean(chcl1996,na.rm=TRUE)*1e-9*.3048
# tlog1<-area.dom*porosity.mean*mean(chprime1996,na.rm=TRUE)*1e-9*.3048
# #image and contour plot for the TCCZ
# image.plot(TCCZ.interp)
# contour(TCCZ.interp$x,TCCZ.interp$y,TCCZ.interp$z)
# points(f3basin27$UTM_E,f3basin27$UTM_N,type="l")
# points(TCCZe$EASTING, TCCZe$NORTHING, pch=19)
# text(TCCZe$UTM_E, TCCZe$UTM_N, labels=TCCZe$UWI)
#
# #image and contour plot for the water level
# image.plot(wlavg.interp)
# contour(wlavg.interp$x,wlavg.interp$y,wlavg.interp$z)
#
# #
# #image and contour plot for the water level
# #image.plot(wlavg.interp)
# contour(wlavg.interp$x,wlavg.interp$y,wlavg.interp$z-TCCZ.interp$z, asp = 1, bty ="n")
# #xlim=c(ea.min,ea.max), ylim=c(no.min,no.max),
# #points(f3basin$UTM_E,f3basin$UTM_N,type="l")
# points(f3basin27$UTM_E,f3basin27$UTM_N,type="l")
# points(wlavg$EASTING, wlavg$NORTHING, pch=19)
# text(wlavg$EASTING, wlavg$NORTHING, labels=wlavg$STATION_ID)
#Define n zones
#Compute trend planes in these zones for waterlevels and TCCZ
#Do the difference to get the thickness |
#set posterior probability thresholds----
positive_change <- 0.25
no_change <- 0.02
#set filter thresholds----
coverage <- 1
fp_coverage <- 1.5
tp_trim <- 125
set.seed(020588) | /R_scripts/Structure_seq_variables.R | no_license | Bushell-lab/Structure-seq2-with-hippuristanol-treatment-in-MCF7-cells | R | false | false | 188 | r | #set posterior probability thresholds----
positive_change <- 0.25
no_change <- 0.02
#set filter thresholds----
coverage <- 1
fp_coverage <- 1.5
tp_trim <- 125
set.seed(020588) |
setwd("/media/alf/datos/drive/CEU/DOCENCIA/ejercicios/ejercicios_estadistica")
library(tikzDevice)
library(plyr)
library(plotly)
require(Hmisc)
# Polígono de frecuencias acumuladas del tiempo de duración de un examen
time = rep(c(30,60,90,120,150),c(9,6,14,26,11))
tikz(file="img/descriptiva/poligono_acumulado_tiempo_examen.tex", width=7, height=5)
par(cex.lab=1.2)
h <- hist(time, breaks=c(0,30,60,90,120,150), plot=FALSE)
h$counts <- cumsum(h[["counts"]])
freq <- c(0, h[["counts"]])
plot(h$breaks, freq, type="o", lwd=3, pch=16, col="royalblue", main="Tiempo en finalizar el examen", xlab="Tiempo (en minutos)", ylab="Número de estudiantes", axes=FALSE)
axis(1, at = c(0,30,60,90,120,150))
axis(2, at = seq(0,65,by=5))
abline(h=seq(0,65,by=5), col="gray", lty=3)
dev.off()
# Histograma del ímc por sexo
imc=rep(c(17.5,22.5,27.5,32.5,17.5,22.5,27.5,32.5,37.5),c(9,30,5,1,7,25,10,5,1))
gender=factor(rep(c("Male","Female"),c(45,48)))
tikz(file="img/descriptiva/histograma_imc_sexo.tex", width=5, height=5)
options(digits=1)
par(cex.lab=1.2)
out <- histbackback(split(imc, gender), xlim=c(-30,30), brks=c(15,20,25,30,35,40), main = 'Histograma del Índicie de Masa Corporal según Sexo', xlab=c("Mujeres", "Hombres"), ylab="IMC")
abline(v= (-25:25)*5 , col ="gray" , lty =3)
barplot(-out$left, col="coral" , horiz=TRUE, space=0, add=TRUE, axes=FALSE)
barplot(out$right, col="royalblue1", horiz=TRUE, space=0, add=TRUE, axes=FALSE)
dev.off()
# Diagrama de caja
tikz(file="img/descriptiva/diagrama_caja_seguro_salud.tex", width=5, height=5)
par(cex.lab=1.2)
times=rep(c(0,1,2,3,4,5,7),c(4,8,6,3,2,1,1))
boxplot(times, main="Diagrama de caja del uso anual de un seguro de salud", xlab="Usos",col="coral", horizontal=TRUE)
dev.off()
# Diagrama de caja de edad según estado civil
tikz(file="img/descriptiva/diagrama_caja_edad_estado_civil.tex", width=5, height=5)
status=c("S","S","S","S","S","S","S","S","S","D","D","D","D","D","D","V","V","V","V","V","V","V","C","C","C","C","C")
age=c(31,45,45,35,21,38,62,32,31,62,34,52,59,69,62,80,68,65,40,78,69,75,31,65,59,51,71)
par(cex.lab=1.2)
boxplot(age~status, main="Diagrama de caja de edades según estado civil", xlab="Edad",col=rainbow(4,s=0.6), horizontal=TRUE)
dev.off()
| /img/descriptiva.R | no_license | asalber/ejercicios-estadistica | R | false | false | 2,234 | r | setwd("/media/alf/datos/drive/CEU/DOCENCIA/ejercicios/ejercicios_estadistica")
library(tikzDevice)
library(plyr)
library(plotly)
require(Hmisc)
# Polígono de frecuencias acumuladas del tiempo de duración de un examen
time = rep(c(30,60,90,120,150),c(9,6,14,26,11))
tikz(file="img/descriptiva/poligono_acumulado_tiempo_examen.tex", width=7, height=5)
par(cex.lab=1.2)
h <- hist(time, breaks=c(0,30,60,90,120,150), plot=FALSE)
h$counts <- cumsum(h[["counts"]])
freq <- c(0, h[["counts"]])
plot(h$breaks, freq, type="o", lwd=3, pch=16, col="royalblue", main="Tiempo en finalizar el examen", xlab="Tiempo (en minutos)", ylab="Número de estudiantes", axes=FALSE)
axis(1, at = c(0,30,60,90,120,150))
axis(2, at = seq(0,65,by=5))
abline(h=seq(0,65,by=5), col="gray", lty=3)
dev.off()
# Histograma del ímc por sexo
imc=rep(c(17.5,22.5,27.5,32.5,17.5,22.5,27.5,32.5,37.5),c(9,30,5,1,7,25,10,5,1))
gender=factor(rep(c("Male","Female"),c(45,48)))
tikz(file="img/descriptiva/histograma_imc_sexo.tex", width=5, height=5)
options(digits=1)
par(cex.lab=1.2)
out <- histbackback(split(imc, gender), xlim=c(-30,30), brks=c(15,20,25,30,35,40), main = 'Histograma del Índicie de Masa Corporal según Sexo', xlab=c("Mujeres", "Hombres"), ylab="IMC")
abline(v= (-25:25)*5 , col ="gray" , lty =3)
barplot(-out$left, col="coral" , horiz=TRUE, space=0, add=TRUE, axes=FALSE)
barplot(out$right, col="royalblue1", horiz=TRUE, space=0, add=TRUE, axes=FALSE)
dev.off()
# Diagrama de caja
tikz(file="img/descriptiva/diagrama_caja_seguro_salud.tex", width=5, height=5)
par(cex.lab=1.2)
times=rep(c(0,1,2,3,4,5,7),c(4,8,6,3,2,1,1))
boxplot(times, main="Diagrama de caja del uso anual de un seguro de salud", xlab="Usos",col="coral", horizontal=TRUE)
dev.off()
# Diagrama de caja de edad según estado civil
tikz(file="img/descriptiva/diagrama_caja_edad_estado_civil.tex", width=5, height=5)
status=c("S","S","S","S","S","S","S","S","S","D","D","D","D","D","D","V","V","V","V","V","V","V","C","C","C","C","C")
age=c(31,45,45,35,21,38,62,32,31,62,34,52,59,69,62,80,68,65,40,78,69,75,31,65,59,51,71)
par(cex.lab=1.2)
boxplot(age~status, main="Diagrama de caja de edades según estado civil", xlab="Edad",col=rainbow(4,s=0.6), horizontal=TRUE)
dev.off()
|
#' @importClassesFrom Matrix dgCMatrix dgeMatrix
#' @import methods
# depends on matrix
.onLoad <- function(libname, pkgname) {
library.dynam("xgboost", pkgname, libname)
}
.onUnload <- function(libpath) {
library.dynam.unload("xgboost", libpath)
}
# set information into dmatrix, this mutate dmatrix
xgb.setinfo <- function(dmat, name, info) {
if (class(dmat) != "xgb.DMatrix") {
stop("xgb.setinfo: first argument dtrain must be xgb.DMatrix")
}
if (name == "label") {
.Call("XGDMatrixSetInfo_R", dmat, name, as.numeric(info),
PACKAGE = "xgboost")
return(TRUE)
}
if (name == "weight") {
.Call("XGDMatrixSetInfo_R", dmat, name, as.numeric(info),
PACKAGE = "xgboost")
return(TRUE)
}
if (name == "base_margin") {
.Call("XGDMatrixSetInfo_R", dmat, name, as.numeric(info),
PACKAGE = "xgboost")
return(TRUE)
}
if (name == "group") {
.Call("XGDMatrixSetInfo_R", dmat, name, as.integer(info),
PACKAGE = "xgboost")
return(TRUE)
}
stop(paste("xgb.setinfo: unknown info name", name))
return(FALSE)
}
# construct a Booster from cachelist
xgb.Booster <- function(params = list(), cachelist = list(), modelfile = NULL) {
if (typeof(cachelist) != "list") {
stop("xgb.Booster: only accepts list of DMatrix as cachelist")
}
for (dm in cachelist) {
if (class(dm) != "xgb.DMatrix") {
stop("xgb.Booster: only accepts list of DMatrix as cachelist")
}
}
handle <- .Call("XGBoosterCreate_R", cachelist, PACKAGE = "xgboost")
if (length(params) != 0) {
for (i in 1:length(params)) {
p <- params[i]
.Call("XGBoosterSetParam_R", handle, gsub("\\.", "_", names(p)), as.character(p),
PACKAGE = "xgboost")
}
}
if (!is.null(modelfile)) {
if (typeof(modelfile) != "character") {
stop("xgb.Booster: modelfile must be character")
}
.Call("XGBoosterLoadModel_R", handle, modelfile, PACKAGE = "xgboost")
}
return(structure(handle, class = "xgb.Booster"))
}
## ----the following are low level iteratively function, not needed if
## you do not want to use them ---------------------------------------
# get dmatrix from data, label
xgb.get.DMatrix <- function(data, label = NULL) {
inClass <- class(data)
if (inClass == "dgCMatrix" || inClass == "matrix") {
if (is.null(label)) {
stop("xgboost: need label when data is a matrix")
}
dtrain <- xgb.DMatrix(data, label = label)
} else {
if (!is.null(label)) {
warning("xgboost: label will be ignored.")
}
if (inClass == "character") {
dtrain <- xgb.DMatrix(data)
} else if (inClass == "xgb.DMatrix") {
dtrain <- data
} else {
stop("xgboost: Invalid input of data")
}
}
return (dtrain)
}
xgb.numrow <- function(dmat) {
nrow <- .Call("XGDMatrixNumRow_R", dmat, PACKAGE="xgboost")
return(nrow)
}
# iteratively update booster with customized statistics
xgb.iter.boost <- function(booster, dtrain, gpair) {
if (class(booster) != "xgb.Booster") {
stop("xgb.iter.update: first argument must be type xgb.Booster")
}
if (class(dtrain) != "xgb.DMatrix") {
stop("xgb.iter.update: second argument must be type xgb.DMatrix")
}
.Call("XGBoosterBoostOneIter_R", booster, dtrain, gpair$grad, gpair$hess,
PACKAGE = "xgboost")
return(TRUE)
}
# iteratively update booster with dtrain
xgb.iter.update <- function(booster, dtrain, iter, obj = NULL) {
if (class(booster) != "xgb.Booster") {
stop("xgb.iter.update: first argument must be type xgb.Booster")
}
if (class(dtrain) != "xgb.DMatrix") {
stop("xgb.iter.update: second argument must be type xgb.DMatrix")
}
if (is.null(obj)) {
.Call("XGBoosterUpdateOneIter_R", booster, as.integer(iter), dtrain,
PACKAGE = "xgboost")
} else {
pred <- predict(booster, dtrain)
gpair <- obj(pred, dtrain)
succ <- xgb.iter.boost(booster, dtrain, gpair)
}
return(TRUE)
}
# iteratively evaluate one iteration
xgb.iter.eval <- function(booster, watchlist, iter, feval = NULL) {
if (class(booster) != "xgb.Booster") {
stop("xgb.eval: first argument must be type xgb.Booster")
}
if (typeof(watchlist) != "list") {
stop("xgb.eval: only accepts list of DMatrix as watchlist")
}
for (w in watchlist) {
if (class(w) != "xgb.DMatrix") {
stop("xgb.eval: watch list can only contain xgb.DMatrix")
}
}
if (length(watchlist) != 0) {
if (is.null(feval)) {
evnames <- list()
for (i in 1:length(watchlist)) {
w <- watchlist[i]
if (length(names(w)) == 0) {
stop("xgb.eval: name tag must be presented for every elements in watchlist")
}
evnames <- append(evnames, names(w))
}
msg <- .Call("XGBoosterEvalOneIter_R", booster, as.integer(iter), watchlist,
evnames, PACKAGE = "xgboost")
} else {
msg <- paste("[", iter, "]", sep="")
for (j in 1:length(watchlist)) {
w <- watchlist[j]
if (length(names(w)) == 0) {
stop("xgb.eval: name tag must be presented for every elements in watchlist")
}
ret <- feval(predict(booster, w[[1]]), w[[1]])
msg <- paste(msg, "\t", names(w), "-", ret$metric, ":", ret$value, sep="")
}
}
} else {
msg <- ""
}
return(msg)
}
#------------------------------------------
# helper functions for cross validation
#
xgb.cv.mknfold <- function(dall, nfold, param) {
randidx <- sample(1 : xgb.numrow(dall))
kstep <- length(randidx) / nfold
idset <- list()
for (i in 1:nfold) {
idset[[i]] <- randidx[ ((i-1) * kstep + 1) : min(i * kstep, length(randidx)) ]
}
ret <- list()
for (k in 1:nfold) {
dtest <- slice(dall, idset[[k]])
didx = c()
for (i in 1:nfold) {
if (i != k) {
didx <- append(didx, idset[[i]])
}
}
dtrain <- slice(dall, didx)
bst <- xgb.Booster(param, list(dtrain, dtest))
watchlist = list(train=dtrain, test=dtest)
ret[[k]] <- list(dtrain=dtrain, booster=bst, watchlist=watchlist)
}
return (ret)
}
xgb.cv.aggcv <- function(res, showsd = TRUE) {
header <- res[[1]]
ret <- header[1]
for (i in 2:length(header)) {
kv <- strsplit(header[i], ":")[[1]]
ret <- paste(ret, "\t", kv[1], ":", sep="")
stats <- c()
stats[1] <- as.numeric(kv[2])
for (j in 2:length(res)) {
tkv <- strsplit(res[[j]][i], ":")[[1]]
stats[j] <- as.numeric(tkv[2])
}
ret <- paste(ret, sprintf("%f", mean(stats)), sep="")
if (showsd) {
ret <- paste(ret, sprintf("+%f", sd(stats)), sep="")
}
}
return (ret)
}
| /xgboost/R-package/R/utils.R | permissive | hetong007/higgsml | R | false | false | 6,625 | r | #' @importClassesFrom Matrix dgCMatrix dgeMatrix
#' @import methods
# depends on matrix
.onLoad <- function(libname, pkgname) {
library.dynam("xgboost", pkgname, libname)
}
.onUnload <- function(libpath) {
library.dynam.unload("xgboost", libpath)
}
# set information into dmatrix, this mutate dmatrix
xgb.setinfo <- function(dmat, name, info) {
if (class(dmat) != "xgb.DMatrix") {
stop("xgb.setinfo: first argument dtrain must be xgb.DMatrix")
}
if (name == "label") {
.Call("XGDMatrixSetInfo_R", dmat, name, as.numeric(info),
PACKAGE = "xgboost")
return(TRUE)
}
if (name == "weight") {
.Call("XGDMatrixSetInfo_R", dmat, name, as.numeric(info),
PACKAGE = "xgboost")
return(TRUE)
}
if (name == "base_margin") {
.Call("XGDMatrixSetInfo_R", dmat, name, as.numeric(info),
PACKAGE = "xgboost")
return(TRUE)
}
if (name == "group") {
.Call("XGDMatrixSetInfo_R", dmat, name, as.integer(info),
PACKAGE = "xgboost")
return(TRUE)
}
stop(paste("xgb.setinfo: unknown info name", name))
return(FALSE)
}
# construct a Booster from cachelist
xgb.Booster <- function(params = list(), cachelist = list(), modelfile = NULL) {
if (typeof(cachelist) != "list") {
stop("xgb.Booster: only accepts list of DMatrix as cachelist")
}
for (dm in cachelist) {
if (class(dm) != "xgb.DMatrix") {
stop("xgb.Booster: only accepts list of DMatrix as cachelist")
}
}
handle <- .Call("XGBoosterCreate_R", cachelist, PACKAGE = "xgboost")
if (length(params) != 0) {
for (i in 1:length(params)) {
p <- params[i]
.Call("XGBoosterSetParam_R", handle, gsub("\\.", "_", names(p)), as.character(p),
PACKAGE = "xgboost")
}
}
if (!is.null(modelfile)) {
if (typeof(modelfile) != "character") {
stop("xgb.Booster: modelfile must be character")
}
.Call("XGBoosterLoadModel_R", handle, modelfile, PACKAGE = "xgboost")
}
return(structure(handle, class = "xgb.Booster"))
}
## ----the following are low level iteratively function, not needed if
## you do not want to use them ---------------------------------------
# get dmatrix from data, label
xgb.get.DMatrix <- function(data, label = NULL) {
inClass <- class(data)
if (inClass == "dgCMatrix" || inClass == "matrix") {
if (is.null(label)) {
stop("xgboost: need label when data is a matrix")
}
dtrain <- xgb.DMatrix(data, label = label)
} else {
if (!is.null(label)) {
warning("xgboost: label will be ignored.")
}
if (inClass == "character") {
dtrain <- xgb.DMatrix(data)
} else if (inClass == "xgb.DMatrix") {
dtrain <- data
} else {
stop("xgboost: Invalid input of data")
}
}
return (dtrain)
}
xgb.numrow <- function(dmat) {
nrow <- .Call("XGDMatrixNumRow_R", dmat, PACKAGE="xgboost")
return(nrow)
}
# iteratively update booster with customized statistics
xgb.iter.boost <- function(booster, dtrain, gpair) {
if (class(booster) != "xgb.Booster") {
stop("xgb.iter.update: first argument must be type xgb.Booster")
}
if (class(dtrain) != "xgb.DMatrix") {
stop("xgb.iter.update: second argument must be type xgb.DMatrix")
}
.Call("XGBoosterBoostOneIter_R", booster, dtrain, gpair$grad, gpair$hess,
PACKAGE = "xgboost")
return(TRUE)
}
# iteratively update booster with dtrain
xgb.iter.update <- function(booster, dtrain, iter, obj = NULL) {
if (class(booster) != "xgb.Booster") {
stop("xgb.iter.update: first argument must be type xgb.Booster")
}
if (class(dtrain) != "xgb.DMatrix") {
stop("xgb.iter.update: second argument must be type xgb.DMatrix")
}
if (is.null(obj)) {
.Call("XGBoosterUpdateOneIter_R", booster, as.integer(iter), dtrain,
PACKAGE = "xgboost")
} else {
pred <- predict(booster, dtrain)
gpair <- obj(pred, dtrain)
succ <- xgb.iter.boost(booster, dtrain, gpair)
}
return(TRUE)
}
# iteratively evaluate one iteration
xgb.iter.eval <- function(booster, watchlist, iter, feval = NULL) {
if (class(booster) != "xgb.Booster") {
stop("xgb.eval: first argument must be type xgb.Booster")
}
if (typeof(watchlist) != "list") {
stop("xgb.eval: only accepts list of DMatrix as watchlist")
}
for (w in watchlist) {
if (class(w) != "xgb.DMatrix") {
stop("xgb.eval: watch list can only contain xgb.DMatrix")
}
}
if (length(watchlist) != 0) {
if (is.null(feval)) {
evnames <- list()
for (i in 1:length(watchlist)) {
w <- watchlist[i]
if (length(names(w)) == 0) {
stop("xgb.eval: name tag must be presented for every elements in watchlist")
}
evnames <- append(evnames, names(w))
}
msg <- .Call("XGBoosterEvalOneIter_R", booster, as.integer(iter), watchlist,
evnames, PACKAGE = "xgboost")
} else {
msg <- paste("[", iter, "]", sep="")
for (j in 1:length(watchlist)) {
w <- watchlist[j]
if (length(names(w)) == 0) {
stop("xgb.eval: name tag must be presented for every elements in watchlist")
}
ret <- feval(predict(booster, w[[1]]), w[[1]])
msg <- paste(msg, "\t", names(w), "-", ret$metric, ":", ret$value, sep="")
}
}
} else {
msg <- ""
}
return(msg)
}
#------------------------------------------
# helper functions for cross validation
#
xgb.cv.mknfold <- function(dall, nfold, param) {
randidx <- sample(1 : xgb.numrow(dall))
kstep <- length(randidx) / nfold
idset <- list()
for (i in 1:nfold) {
idset[[i]] <- randidx[ ((i-1) * kstep + 1) : min(i * kstep, length(randidx)) ]
}
ret <- list()
for (k in 1:nfold) {
dtest <- slice(dall, idset[[k]])
didx = c()
for (i in 1:nfold) {
if (i != k) {
didx <- append(didx, idset[[i]])
}
}
dtrain <- slice(dall, didx)
bst <- xgb.Booster(param, list(dtrain, dtest))
watchlist = list(train=dtrain, test=dtest)
ret[[k]] <- list(dtrain=dtrain, booster=bst, watchlist=watchlist)
}
return (ret)
}
xgb.cv.aggcv <- function(res, showsd = TRUE) {
header <- res[[1]]
ret <- header[1]
for (i in 2:length(header)) {
kv <- strsplit(header[i], ":")[[1]]
ret <- paste(ret, "\t", kv[1], ":", sep="")
stats <- c()
stats[1] <- as.numeric(kv[2])
for (j in 2:length(res)) {
tkv <- strsplit(res[[j]][i], ":")[[1]]
stats[j] <- as.numeric(tkv[2])
}
ret <- paste(ret, sprintf("%f", mean(stats)), sep="")
if (showsd) {
ret <- paste(ret, sprintf("+%f", sd(stats)), sep="")
}
}
return (ret)
}
|
##Copyright R. Gentleman, 2004
##simple functions to get Evidence codes
.isMissingGOEntry <- function(x) (length(x) == 1L && is.na(x))
##get then GO term names for a particular (sub)ontology
getOntology = function(inlist, ontology=c("MF", "BP", "CC")) {
which = match.arg(ontology)
onts = sapply(inlist, function(z) {
if (!.isMissingGOEntry(z))
z$Ontology
else
z
})
onts = onts[!is.na(onts)]
unique(names(inlist[onts %in% which]))
}
##get GO evidence codes
getEvidence = function(inlist) {
ans <- sapply(inlist, function(z) {
if (!.isMissingGOEntry(z))
z$Evidence
else
z
})
ans[!is.na(ans)]
}
##drop a specified set of evidence codes
dropECode = function(inlist, code = "IEA") {
hasCode = sapply(inlist, function(z) {
if (!.isMissingGOEntry(z))
z$Evidence
else
z
})
hasCode <- hasCode[!is.na(hasCode)]
badVals = hasCode %in% code
inlist[!badVals]
}
## helper function, determines if there is a GO annotation for the
## desired mode
hasGOannote <- function(x, which="MF") {
if (is(x, "GOTerms")) {
cat <- Ontology(x)
if (!is.na(cat) && cat == which)
return(TRUE) else return(FALSE)
}
if (is.list(x)) {
gT <- sapply(x, function(y) is(y, "GOTerms"))
if (any(gT)) {
if (all(gT)) {
cats <- sapply(x, Ontology)
return(cats == which)
}
else
stop("mixed arguments not allowed")
}
}
if (!is.character(x))
stop("wrong argument")
tm <- getGOOntology(x)
return(tm == which)
}
##three functions to get all the GO information for a set of GO terms
##FIXME: these need to be renovated - probably removed even..
getGOOntology <- function(x) {
if( !is.character(x) )
stop("need a character argument")
if(length(x) == 0 )
return( character(0))
loadNamespace("GO.db")
wh <- mget(x, envir=GO.db::GOTERM, ifnotfound=NA)
return( sapply(wh, Ontology) )
}
getGOParents <- function(x) {
if( !is.character(x) )
stop("need a character argument")
if(length(x) == 0 )
return(list())
loadNamespace("GO.db")
hasMF <- mget(x, envir=GO.db::GOMFPARENTS, ifnotfound=NA)
hasBP <- mget(x, envir=GO.db::GOBPPARENTS, ifnotfound=NA)
hasCC <- mget(x, envir=GO.db::GOCCPARENTS, ifnotfound=NA)
lenx <- length(x)
rval <- vector("list", length=lenx)
names(rval) <- x
rval <- vector("list", length=lenx)
names(rval) <- x
for(i in 1:lenx) {
if( (length(hasMF[[i]]) > 1 ) || !is.na(hasMF[[i]]) )
rval[[i]] <- list(Ontology="MF", Parents=hasMF[[i]])
else if( (length(hasMF[[i]]) > 1 ) || !is.na(hasBP[[i]]) )
rval[[i]] <- list(Ontology="BP", Parents=hasBP[[i]])
else if( (length(hasMF[[i]]) > 1 ) || !is.na(hasCC[[i]]) )
rval[[i]] <- list(Ontology="CC", Parents=hasCC[[i]])
else
stop(paste(x[i], "is not a member of any ontology"))
}
return(rval)
}
getGOChildren <- function(x) {
if( !is.character(x) )
stop("need a character argument")
if(length(x) == 0 )
return(list())
loadNamespace("GO.db")
hasMF <- mget(x, envir=GO.db::GOMFCHILDREN, ifnotfound=NA)
hasBP <- mget(x, envir=GO.db::GOBPCHILDREN, ifnotfound=NA)
hasCC <- mget(x, envir=GO.db::GOCCCHILDREN, ifnotfound=NA)
lenx <- length(x)
rval <- vector("list", length=lenx)
names(rval) <- x
for(i in 1:lenx) {
if( (length(hasMF[[i]]) > 1 ) || !is.na(hasMF[[i]]) )
rval[[i]] <- list(Ontology="MF", Children=hasMF[[i]])
else if( (length(hasMF[[i]]) > 1 ) || !is.na(hasBP[[i]]) )
rval[[i]] <- list(Ontology="BP", Children=hasBP[[i]])
else if( (length(hasMF[[i]]) > 1 ) || !is.na(hasCC[[i]]) )
rval[[i]] <- list(Ontology="CC", Children=hasCC[[i]])
else
rval[[i]] <- list()
}
return(rval)
}
getGOTerm <- function(x) {
if( !is.character(x) )
stop("need a character argument")
if(length(x) == 0 )
return(list())
loadNamespace("GO.db")
terms <- mget(x, envir=GO.db::GOTERM, ifnotfound=NA)
isNA = sapply(terms,function(x) !(isS4(x) && is(x, "GOTerms")))
if( any(isNA) )
terms = terms[!isNA]
ontology <- sapply(terms, Ontology)
terms = sapply(terms, Term)
return(split(terms, ontology))
}
filterGOByOntology <- function(goids, ontology=c("BP", "CC", "MF")) {
ontology <- match.arg(ontology)
eName <- switch(ontology,
BP="GOBPPARENTS",
CC="GOCCPARENTS",
MF="GOMFPARENTS",
stop("invalid ontology ", ontology))
e <- get(eName)
goids %in% ls(e)
}
aqListGOIDs <- function(ont) {
## Return all GO IDs in the specified ontologies
ont <- unique(ont)
knownOnts <- c("BP", "CC", "MF")
badOnt <- ont[!(ont %in% knownOnts)]
if (length(badOnt))
stop("Unknown ontology codes: ", paste(badOnt, collapse=", "),
"\nvalid codes are: ", paste(knownOnts, collapse=", "))
## determine size
lens <- integer(length(ont))
for (i in seq(along=ont))
lens[i] <- length(getAnnMap(paste(ont[i], "PARENTS", sep=""),
chip="GO"))
## retrieve IDs
ans <- character(sum(lens))
lens <- c(0L, lens)
for (i in seq(along=ont)) {
ans[lens[i]+1:lens[i+1]] <- ls(getAnnMap(paste(ont[i], "PARENTS", sep=""),
chip="GO"))
}
ans
}
| /R/GOhelpers.R | no_license | bedatadriven/annotate | R | false | false | 5,754 | r | ##Copyright R. Gentleman, 2004
##simple functions to get Evidence codes
.isMissingGOEntry <- function(x) (length(x) == 1L && is.na(x))
##get then GO term names for a particular (sub)ontology
getOntology = function(inlist, ontology=c("MF", "BP", "CC")) {
which = match.arg(ontology)
onts = sapply(inlist, function(z) {
if (!.isMissingGOEntry(z))
z$Ontology
else
z
})
onts = onts[!is.na(onts)]
unique(names(inlist[onts %in% which]))
}
##get GO evidence codes
getEvidence = function(inlist) {
ans <- sapply(inlist, function(z) {
if (!.isMissingGOEntry(z))
z$Evidence
else
z
})
ans[!is.na(ans)]
}
##drop a specified set of evidence codes
dropECode = function(inlist, code = "IEA") {
hasCode = sapply(inlist, function(z) {
if (!.isMissingGOEntry(z))
z$Evidence
else
z
})
hasCode <- hasCode[!is.na(hasCode)]
badVals = hasCode %in% code
inlist[!badVals]
}
## helper function, determines if there is a GO annotation for the
## desired mode
hasGOannote <- function(x, which="MF") {
if (is(x, "GOTerms")) {
cat <- Ontology(x)
if (!is.na(cat) && cat == which)
return(TRUE) else return(FALSE)
}
if (is.list(x)) {
gT <- sapply(x, function(y) is(y, "GOTerms"))
if (any(gT)) {
if (all(gT)) {
cats <- sapply(x, Ontology)
return(cats == which)
}
else
stop("mixed arguments not allowed")
}
}
if (!is.character(x))
stop("wrong argument")
tm <- getGOOntology(x)
return(tm == which)
}
##three functions to get all the GO information for a set of GO terms
##FIXME: these need to be renovated - probably removed even..
getGOOntology <- function(x) {
if( !is.character(x) )
stop("need a character argument")
if(length(x) == 0 )
return( character(0))
loadNamespace("GO.db")
wh <- mget(x, envir=GO.db::GOTERM, ifnotfound=NA)
return( sapply(wh, Ontology) )
}
getGOParents <- function(x) {
if( !is.character(x) )
stop("need a character argument")
if(length(x) == 0 )
return(list())
loadNamespace("GO.db")
hasMF <- mget(x, envir=GO.db::GOMFPARENTS, ifnotfound=NA)
hasBP <- mget(x, envir=GO.db::GOBPPARENTS, ifnotfound=NA)
hasCC <- mget(x, envir=GO.db::GOCCPARENTS, ifnotfound=NA)
lenx <- length(x)
rval <- vector("list", length=lenx)
names(rval) <- x
rval <- vector("list", length=lenx)
names(rval) <- x
for(i in 1:lenx) {
if( (length(hasMF[[i]]) > 1 ) || !is.na(hasMF[[i]]) )
rval[[i]] <- list(Ontology="MF", Parents=hasMF[[i]])
else if( (length(hasMF[[i]]) > 1 ) || !is.na(hasBP[[i]]) )
rval[[i]] <- list(Ontology="BP", Parents=hasBP[[i]])
else if( (length(hasMF[[i]]) > 1 ) || !is.na(hasCC[[i]]) )
rval[[i]] <- list(Ontology="CC", Parents=hasCC[[i]])
else
stop(paste(x[i], "is not a member of any ontology"))
}
return(rval)
}
getGOChildren <- function(x) {
if( !is.character(x) )
stop("need a character argument")
if(length(x) == 0 )
return(list())
loadNamespace("GO.db")
hasMF <- mget(x, envir=GO.db::GOMFCHILDREN, ifnotfound=NA)
hasBP <- mget(x, envir=GO.db::GOBPCHILDREN, ifnotfound=NA)
hasCC <- mget(x, envir=GO.db::GOCCCHILDREN, ifnotfound=NA)
lenx <- length(x)
rval <- vector("list", length=lenx)
names(rval) <- x
for(i in 1:lenx) {
if( (length(hasMF[[i]]) > 1 ) || !is.na(hasMF[[i]]) )
rval[[i]] <- list(Ontology="MF", Children=hasMF[[i]])
else if( (length(hasMF[[i]]) > 1 ) || !is.na(hasBP[[i]]) )
rval[[i]] <- list(Ontology="BP", Children=hasBP[[i]])
else if( (length(hasMF[[i]]) > 1 ) || !is.na(hasCC[[i]]) )
rval[[i]] <- list(Ontology="CC", Children=hasCC[[i]])
else
rval[[i]] <- list()
}
return(rval)
}
getGOTerm <- function(x) {
if( !is.character(x) )
stop("need a character argument")
if(length(x) == 0 )
return(list())
loadNamespace("GO.db")
terms <- mget(x, envir=GO.db::GOTERM, ifnotfound=NA)
isNA = sapply(terms,function(x) !(isS4(x) && is(x, "GOTerms")))
if( any(isNA) )
terms = terms[!isNA]
ontology <- sapply(terms, Ontology)
terms = sapply(terms, Term)
return(split(terms, ontology))
}
filterGOByOntology <- function(goids, ontology=c("BP", "CC", "MF")) {
ontology <- match.arg(ontology)
eName <- switch(ontology,
BP="GOBPPARENTS",
CC="GOCCPARENTS",
MF="GOMFPARENTS",
stop("invalid ontology ", ontology))
e <- get(eName)
goids %in% ls(e)
}
aqListGOIDs <- function(ont) {
## Return all GO IDs in the specified ontologies
ont <- unique(ont)
knownOnts <- c("BP", "CC", "MF")
badOnt <- ont[!(ont %in% knownOnts)]
if (length(badOnt))
stop("Unknown ontology codes: ", paste(badOnt, collapse=", "),
"\nvalid codes are: ", paste(knownOnts, collapse=", "))
## determine size
lens <- integer(length(ont))
for (i in seq(along=ont))
lens[i] <- length(getAnnMap(paste(ont[i], "PARENTS", sep=""),
chip="GO"))
## retrieve IDs
ans <- character(sum(lens))
lens <- c(0L, lens)
for (i in seq(along=ont)) {
ans[lens[i]+1:lens[i+1]] <- ls(getAnnMap(paste(ont[i], "PARENTS", sep=""),
chip="GO"))
}
ans
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/semanticPage.R
\name{get_default_semantic_theme}
\alias{get_default_semantic_theme}
\title{Get default semantic css}
\usage{
get_default_semantic_theme(full_url = TRUE)
}
\arguments{
\item{full_url}{define return output filename or full path. Default TRUE}
}
\value{
path to default css semantic file or default filename
}
\description{
Get default semantic css
}
| /man/get_default_semantic_theme.Rd | permissive | ashbaldry/shiny.semantic | R | false | true | 442 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/semanticPage.R
\name{get_default_semantic_theme}
\alias{get_default_semantic_theme}
\title{Get default semantic css}
\usage{
get_default_semantic_theme(full_url = TRUE)
}
\arguments{
\item{full_url}{define return output filename or full path. Default TRUE}
}
\value{
path to default css semantic file or default filename
}
\description{
Get default semantic css
}
|
## Kaggle CareerCon 2019 Exploratory Data Analysis
setwd("H:/repos/kaggle-CareerCon2019/")
library(data.table)
# 1. Load Data --------------------------------------
# X test/train: the input data, covering 10 sensor channels and 128 measurements per time series plus three ID columns:
## row_id: The ID for this row.
## series_id: ID number for the measurement series. Foreign key to y_train/sample_submission.
## measurement_number: Measurement number within the series.
# Y: the surfaces for the training set
x_test <- fread("data/X_test.csv")
x_train <- fread("data/X_train.csv")
y_train <- fread("data/y_train.csv")
summary(y_train)
summary(x_train)
#table(x_train$series_id)
# Note: 128 measurements for each series_id
#length(unique(x_train$row_id)) == length(x_train$row_id)
# Note: row_id is a unique identifier
table(y_train$surface)
# Classification with 9 possible classes
# Note: variable sizes of outputs in training set
## To do:
# Create features from orientation, angular velocity, and linear acceleration
## Feature Extraction by series (should get this into just orientation, angular velocity, and linear acceleration first)
cols <- c("orientation_X", "orientation_Y", "orientation_Z", "orientation_W",
"angular_velocity_X", "angular_velocity_Y", "angular_velocity_Z",
"linear_acceleration_X", "linear_acceleration_Y", "linear_acceleration_Z")
x_train_mean <- x_train[, lapply(.SD, mean),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "mean", sep = "_"))]
x_train_sd <- x_train[, lapply(.SD, sd),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "sd", sep = "_"))]
x_train_median <- x_train[, lapply(.SD, median),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "median", sep = "_"))]
x_train_IQR <- x_train[, lapply(.SD, IQR),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "IQR", sep = "_"))]
x_train_mad <- x_train[, lapply(.SD, mad),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "mad", sep = "_"))]
y_train <- y_train[, .(series_id, surface)]
xy_train <- merge(x_train_mean, x_train_sd, by = "series_id")
xy_train <- merge(xy_train, x_train_median, by = "series_id")
xy_train <- merge(xy_train, x_train_IQR, by = "series_id")
xy_train <- merge(xy_train, x_train_mad, by = "series_id")
xy_train <- merge(xy_train, y_train, by = "series_id")
rm(x_train_mean, x_train_sd, x_train_median, x_train_IQR, x_train_mad)
# Create corresponding x_test
x_test_mean <- x_test[, lapply(.SD, mean),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "mean", sep = "_"))]
x_test_sd <- x_test[, lapply(.SD, sd),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "sd", sep = "_"))]
x_test_median <- x_test[, lapply(.SD, median),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "median", sep = "_"))]
x_test_IQR <- x_test[, lapply(.SD, IQR),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "IQR", sep = "_"))]
x_test_mad <- x_test[, lapply(.SD, mad),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "mad", sep = "_"))]
xy_test <- merge(x_test_mean, x_test_sd, by = "series_id")
xy_test <- merge(xy_test, x_test_median, by = "series_id")
xy_test <- merge(xy_test, x_test_IQR, by = "series_id")
xy_test <- merge(xy_test, x_test_mad, by = "series_id")
rm(x_test_mean, x_test_sd, x_test_median, x_test_IQR, x_test_mad)
#train_data 70%
#validation_data 15%
#test_data 15%
set.seed(314567)
size_train <- as.integer(.7 * nrow(xy_train))
size_validation <- as.integer(.15 * nrow(xy_train))
size_test <- nrow(xy_train) - size_train - size_validation
train <- sample(1:nrow(xy_train), size_train)
not_train <- setdiff(1:nrow(xy_train), train)
validation <- sample(not_train, size_validation)
test <- setdiff(not_train, validation)
| /1_eda.r | no_license | kmsteuben/kaggle-CareerCon2019 | R | false | false | 4,417 | r | ## Kaggle CareerCon 2019 Exploratory Data Analysis
setwd("H:/repos/kaggle-CareerCon2019/")
library(data.table)
# 1. Load Data --------------------------------------
# X test/train: the input data, covering 10 sensor channels and 128 measurements per time series plus three ID columns:
## row_id: The ID for this row.
## series_id: ID number for the measurement series. Foreign key to y_train/sample_submission.
## measurement_number: Measurement number within the series.
# Y: the surfaces for the training set
x_test <- fread("data/X_test.csv")
x_train <- fread("data/X_train.csv")
y_train <- fread("data/y_train.csv")
summary(y_train)
summary(x_train)
#table(x_train$series_id)
# Note: 128 measurements for each series_id
#length(unique(x_train$row_id)) == length(x_train$row_id)
# Note: row_id is a unique identifier
table(y_train$surface)
# Classification with 9 possible classes
# Note: variable sizes of outputs in training set
## To do:
# Create features from orientation, angular velocity, and linear acceleration
## Feature Extraction by series (should get this into just orientation, angular velocity, and linear acceleration first)
cols <- c("orientation_X", "orientation_Y", "orientation_Z", "orientation_W",
"angular_velocity_X", "angular_velocity_Y", "angular_velocity_Z",
"linear_acceleration_X", "linear_acceleration_Y", "linear_acceleration_Z")
x_train_mean <- x_train[, lapply(.SD, mean),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "mean", sep = "_"))]
x_train_sd <- x_train[, lapply(.SD, sd),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "sd", sep = "_"))]
x_train_median <- x_train[, lapply(.SD, median),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "median", sep = "_"))]
x_train_IQR <- x_train[, lapply(.SD, IQR),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "IQR", sep = "_"))]
x_train_mad <- x_train[, lapply(.SD, mad),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "mad", sep = "_"))]
y_train <- y_train[, .(series_id, surface)]
xy_train <- merge(x_train_mean, x_train_sd, by = "series_id")
xy_train <- merge(xy_train, x_train_median, by = "series_id")
xy_train <- merge(xy_train, x_train_IQR, by = "series_id")
xy_train <- merge(xy_train, x_train_mad, by = "series_id")
xy_train <- merge(xy_train, y_train, by = "series_id")
rm(x_train_mean, x_train_sd, x_train_median, x_train_IQR, x_train_mad)
# Create corresponding x_test
x_test_mean <- x_test[, lapply(.SD, mean),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "mean", sep = "_"))]
x_test_sd <- x_test[, lapply(.SD, sd),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "sd", sep = "_"))]
x_test_median <- x_test[, lapply(.SD, median),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "median", sep = "_"))]
x_test_IQR <- x_test[, lapply(.SD, IQR),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "IQR", sep = "_"))]
x_test_mad <- x_test[, lapply(.SD, mad),
by = series_id,
.SDcols = cols][, setnames(.SD, cols, paste(cols, "mad", sep = "_"))]
xy_test <- merge(x_test_mean, x_test_sd, by = "series_id")
xy_test <- merge(xy_test, x_test_median, by = "series_id")
xy_test <- merge(xy_test, x_test_IQR, by = "series_id")
xy_test <- merge(xy_test, x_test_mad, by = "series_id")
rm(x_test_mean, x_test_sd, x_test_median, x_test_IQR, x_test_mad)
#train_data 70%
#validation_data 15%
#test_data 15%
set.seed(314567)
size_train <- as.integer(.7 * nrow(xy_train))
size_validation <- as.integer(.15 * nrow(xy_train))
size_test <- nrow(xy_train) - size_train - size_validation
train <- sample(1:nrow(xy_train), size_train)
not_train <- setdiff(1:nrow(xy_train), train)
validation <- sample(not_train, size_validation)
test <- setdiff(not_train, validation)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.